mirror of
https://gitea.com/Lydanne/buildx.git
synced 2025-08-15 08:15:55 +08:00
Compare commits
156 Commits
v0.7.0-rc1
...
v0.8.2
Author | SHA1 | Date | |
---|---|---|---|
![]() |
6224def4dd | ||
![]() |
5abee699a6 | ||
![]() |
54e497dfba | ||
![]() |
dfbd226285 | ||
![]() |
a78c2957a2 | ||
![]() |
e80890ec89 | ||
![]() |
10fe8f380c | ||
![]() |
5fac64c2c4 | ||
![]() |
24ad37a5d2 | ||
![]() |
106651877d | ||
![]() |
35bcd88f08 | ||
![]() |
c8f7c1e93f | ||
![]() |
b78c680207 | ||
![]() |
d7412c9420 | ||
![]() |
a7fba7bf3a | ||
![]() |
19ff7cdadc | ||
![]() |
c255c04eed | ||
![]() |
9fcea76dea | ||
![]() |
1416bc1d83 | ||
![]() |
215a128fc1 | ||
![]() |
4e4eea7814 | ||
![]() |
8079bd2841 | ||
![]() |
2d5368cccc | ||
![]() |
a1256c6bb2 | ||
![]() |
e7863eb664 | ||
![]() |
171c4375a1 | ||
![]() |
45844805ec | ||
![]() |
b77d7864fa | ||
![]() |
6efcee28d5 | ||
![]() |
3ad24524c4 | ||
![]() |
971b5d2b73 | ||
![]() |
94c5dde85a | ||
![]() |
f62c02329e | ||
![]() |
d2e53f5e05 | ||
![]() |
7af29802d4 | ||
![]() |
6ac01ec9ac | ||
![]() |
20a55e9184 | ||
![]() |
6c56109083 | ||
![]() |
dab3fe71bd | ||
![]() |
9867ca279a | ||
![]() |
91e550b715 | ||
![]() |
280c008f81 | ||
![]() |
5939a23af6 | ||
![]() |
7f1041164e | ||
![]() |
64ce211ba4 | ||
![]() |
b5bf28d722 | ||
![]() |
10debb577e | ||
![]() |
75cdea48e4 | ||
![]() |
d96d7fb2dc | ||
![]() |
e3245a400a | ||
![]() |
e871c39f05 | ||
![]() |
9c0a23996d | ||
![]() |
3b2aeb2d5b | ||
![]() |
e98a476dc8 | ||
![]() |
7677052cb7 | ||
![]() |
c273e0986c | ||
![]() |
9ee499ae27 | ||
![]() |
230dfa96a3 | ||
![]() |
f1a8f54c83 | ||
![]() |
2bcf3524e5 | ||
![]() |
26918513e3 | ||
![]() |
893d505803 | ||
![]() |
22aaa260e7 | ||
![]() |
1bcc3556fc | ||
![]() |
eef6deb7c2 | ||
![]() |
542759ea31 | ||
![]() |
e5f590a7fa | ||
![]() |
ecf215b927 | ||
![]() |
299fd19c49 | ||
![]() |
4b633c3c7b | ||
![]() |
eb8057e8e0 | ||
![]() |
32f6358d78 | ||
![]() |
3b47722032 | ||
![]() |
e60f0f2c4f | ||
![]() |
b39ebab666 | ||
![]() |
f891187d8b | ||
![]() |
307c94e5c7 | ||
![]() |
60a025b227 | ||
![]() |
fec415a8e0 | ||
![]() |
2d7540fb0a | ||
![]() |
595285736c | ||
![]() |
378f0b45c6 | ||
![]() |
c3dab802d8 | ||
![]() |
fa04611afc | ||
![]() |
ffa062dc95 | ||
![]() |
0fc2b5ca85 | ||
![]() |
9a1267cd02 | ||
![]() |
c74b2fe7a4 | ||
![]() |
6c69d970f7 | ||
![]() |
d3e56ea9d9 | ||
![]() |
11b771c789 | ||
![]() |
278f94a8b6 | ||
![]() |
14b38a9aa8 | ||
![]() |
0044c28b1f | ||
![]() |
b568b219bc | ||
![]() |
aabbe5a56a | ||
![]() |
b038dd063e | ||
![]() |
f25d5ff02f | ||
![]() |
b67bdedb23 | ||
![]() |
3ccb883d95 | ||
![]() |
785c861233 | ||
![]() |
1b69919313 | ||
![]() |
24db7366ba | ||
![]() |
08547827db | ||
![]() |
f37c253ae4 | ||
![]() |
d77e2453da | ||
![]() |
2b4d305c58 | ||
![]() |
5d715ada96 | ||
![]() |
3400fa5628 | ||
![]() |
f04c8c8430 | ||
![]() |
de6b04d726 | ||
![]() |
fe9f9bba87 | ||
![]() |
e482ba2c73 | ||
![]() |
038727477c | ||
![]() |
ed4103ef52 | ||
![]() |
54286a0117 | ||
![]() |
9c3be32bc9 | ||
![]() |
59533bbb5c | ||
![]() |
5f8600f098 | ||
![]() |
d95ebef55c | ||
![]() |
33c121df01 | ||
![]() |
1dde00c4bc | ||
![]() |
4466a24f9e | ||
![]() |
ec9daba87e | ||
![]() |
202e99695b | ||
![]() |
7371dda7a2 | ||
![]() |
62bdf4d85e | ||
![]() |
7f8dbf890d | ||
![]() |
bede4ab552 | ||
![]() |
5c5125f30e | ||
![]() |
e9cf2cbe32 | ||
![]() |
4ee7f70400 | ||
![]() |
0abda783bb | ||
![]() |
aadd118883 | ||
![]() |
9aff9301ce | ||
![]() |
93d6b654ca | ||
![]() |
42287815b5 | ||
![]() |
dcabc22072 | ||
![]() |
ae53101e89 | ||
![]() |
61627c2ece | ||
![]() |
ab73275f58 | ||
![]() |
316ca972b6 | ||
![]() |
5c2b9bbfc5 | ||
![]() |
cc2a879660 | ||
![]() |
89334a88a9 | ||
![]() |
1927dba42f | ||
![]() |
72dab552b5 | ||
![]() |
a0a7db127c | ||
![]() |
bcfd434829 | ||
![]() |
d1aaed7a77 | ||
![]() |
f0026081a7 | ||
![]() |
a18829f837 | ||
![]() |
da0eb138d0 | ||
![]() |
a2c7d43e46 | ||
![]() |
f7cba04f5e | ||
![]() |
12b5db70e2 |
26
.github/workflows/build.yml
vendored
26
.github/workflows/build.yml
vendored
@@ -15,7 +15,6 @@ on:
|
||||
|
||||
env:
|
||||
REPO_SLUG: "docker/buildx-bin"
|
||||
REPO_SLUG_ORIGIN: "moby/buildkit:master"
|
||||
RELEASE_OUT: "./release-out"
|
||||
|
||||
jobs:
|
||||
@@ -31,8 +30,6 @@ jobs:
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
with:
|
||||
driver-opts: image=${{ env.REPO_SLUG_ORIGIN }}
|
||||
-
|
||||
name: Test
|
||||
run: |
|
||||
@@ -90,3 +87,26 @@ jobs:
|
||||
with:
|
||||
draft: true
|
||||
files: ${{ env.RELEASE_OUT }}/*
|
||||
|
||||
buildkit-edge:
|
||||
runs-on: ubuntu-latest
|
||||
continue-on-error: true
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
-
|
||||
name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v1
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
with:
|
||||
driver-opts: image=moby/buildkit:master
|
||||
buildkitd-flags: --debug
|
||||
-
|
||||
# Just run a bake target to check eveything runs fine
|
||||
name: Build
|
||||
uses: docker/bake-action@v1
|
||||
with:
|
||||
targets: binaries-cross
|
||||
|
1
.github/workflows/e2e.yml
vendored
1
.github/workflows/e2e.yml
vendored
@@ -31,6 +31,7 @@ jobs:
|
||||
- mnode-false
|
||||
- mnode-true
|
||||
platforms:
|
||||
- linux/amd64
|
||||
- linux/amd64,linux/arm64
|
||||
include:
|
||||
- driver: kubernetes
|
||||
|
@@ -6,7 +6,7 @@ ARG DOCKERD_VERSION=20.10.8
|
||||
FROM docker:$DOCKERD_VERSION AS dockerd-release
|
||||
|
||||
# xx is a helper for cross-compilation
|
||||
FROM --platform=$BUILDPLATFORM tonistiigi/xx@sha256:21a61be4744f6531cb5f33b0e6f40ede41fa3a1b8c82d5946178f80cc84bfc04 AS xx
|
||||
FROM --platform=$BUILDPLATFORM tonistiigi/xx:1.0.0 AS xx
|
||||
|
||||
FROM --platform=$BUILDPLATFORM golang:${GO_VERSION}-alpine AS golatest
|
||||
|
||||
@@ -24,12 +24,13 @@ RUN --mount=target=. \
|
||||
|
||||
FROM gobase AS buildx-build
|
||||
ENV CGO_ENABLED=0
|
||||
ARG LDFLAGS="-w -s"
|
||||
ARG TARGETPLATFORM
|
||||
RUN --mount=type=bind,target=. \
|
||||
--mount=type=cache,target=/root/.cache \
|
||||
--mount=type=cache,target=/go/pkg/mod \
|
||||
--mount=type=bind,source=/tmp/.ldflags,target=/tmp/.ldflags,from=buildx-version \
|
||||
set -x; xx-go build -ldflags "$(cat /tmp/.ldflags)" -o /usr/bin/buildx ./cmd/buildx && \
|
||||
set -x; xx-go build -ldflags "$(cat /tmp/.ldflags) ${LDFLAGS}" -o /usr/bin/buildx ./cmd/buildx && \
|
||||
xx-verify --static /usr/bin/buildx
|
||||
|
||||
FROM buildx-build AS test
|
||||
|
104
README.md
104
README.md
@@ -1,9 +1,10 @@
|
||||
# buildx
|
||||
|
||||
[](https://pkg.go.dev/github.com/docker/buildx)
|
||||
[](https://github.com/docker/buildx/actions?query=workflow%3Abuild)
|
||||
[](https://goreportcard.com/report/github.com/docker/buildx)
|
||||
[](https://codecov.io/gh/docker/buildx)
|
||||
[](https://github.com/docker/buildx/releases/latest)
|
||||
[](https://pkg.go.dev/github.com/docker/buildx)
|
||||
[](https://github.com/docker/buildx/actions?query=workflow%3Abuild)
|
||||
[](https://goreportcard.com/report/github.com/docker/buildx)
|
||||
[](https://codecov.io/gh/docker/buildx)
|
||||
|
||||
`buildx` is a Docker CLI plugin for extended build capabilities with
|
||||
[BuildKit](https://github.com/moby/buildkit).
|
||||
@@ -21,9 +22,10 @@ Key features:
|
||||
# Table of Contents
|
||||
|
||||
- [Installing](#installing)
|
||||
- [Docker](#docker)
|
||||
- [Binary release](#binary-release)
|
||||
- [From `Dockerfile`](#from-dockerfile)
|
||||
- [Windows and macOS](#windows-and-macos)
|
||||
- [Linux packages](#linux-packages)
|
||||
- [Manual download](#manual-download)
|
||||
- [Dockerfile](#dockerfile)
|
||||
- [Set buildx as the default builder](#set-buildx-as-the-default-builder)
|
||||
- [Building](#building)
|
||||
- [Getting started](#getting-started)
|
||||
@@ -31,18 +33,25 @@ Key features:
|
||||
- [Working with builder instances](#working-with-builder-instances)
|
||||
- [Building multi-platform images](#building-multi-platform-images)
|
||||
- [High-level build options](#high-level-build-options)
|
||||
- [Documentation](docs/reference/buildx.md)
|
||||
- [`buildx bake`](docs/reference/buildx_bake.md)
|
||||
- [Guides](docs/guides)
|
||||
- [CI/CD](docs/guides/cicd.md)
|
||||
- [CNI networking](docs/guides/cni-networking.md)
|
||||
- [Registry mirror](docs/guides/registry-mirror.md)
|
||||
- [Resource limiting](docs/guides/resource-limiting.md)
|
||||
- [Using a custom network](docs/guides/custom-network.md)
|
||||
- [Using a custom registry configuration](docs/guides/custom-registry-config.md)
|
||||
- [Reference](docs/reference/buildx.md)
|
||||
- [`buildx bake`](docs/reference/buildx_bake.md)
|
||||
- [`buildx build`](docs/reference/buildx_build.md)
|
||||
- [`buildx create`](docs/reference/buildx_create.md)
|
||||
- [`buildx du`](docs/reference/buildx_du.md)
|
||||
- [`buildx imagetools`](docs/reference/buildx_imagetools.md)
|
||||
- [`buildx imagetools create`](docs/reference/buildx_imagetools_create.md)
|
||||
- [`buildx imagetools inspect`](docs/reference/buildx_imagetools_inspect.md)
|
||||
- [`buildx imagetools inspect`](docs/reference/buildx_imagetools_inspect.md)
|
||||
- [`buildx inspect`](docs/reference/buildx_inspect.md)
|
||||
- [`buildx install`](docs/reference/buildx_install.md)
|
||||
- [`buildx ls`](docs/reference/buildx_ls.md)
|
||||
- [`buildx prune`](docs/reference/buildx_prune.md)
|
||||
- [`buildx ls`](docs/reference/buildx_ls.md)
|
||||
- [`buildx prune`](docs/reference/buildx_prune.md)
|
||||
- [`buildx rm`](docs/reference/buildx_rm.md)
|
||||
- [`buildx stop`](docs/reference/buildx_stop.md)
|
||||
- [`buildx uninstall`](docs/reference/buildx_uninstall.md)
|
||||
@@ -56,27 +65,60 @@ Using `buildx` as a docker CLI plugin requires using Docker 19.03 or newer.
|
||||
A limited set of functionality works with older versions of Docker when
|
||||
invoking the binary directly.
|
||||
|
||||
## Docker
|
||||
## Windows and macOS
|
||||
|
||||
`buildx` comes bundled with Docker Desktop and in latest
|
||||
[Docker CE packages](https://docs.docker.com/engine/install/), but may not be
|
||||
included in third-party software components (in which case follow the
|
||||
[binary release](#binary-release) instructions).
|
||||
Docker Buildx is included in [Docker Desktop](https://docs.docker.com/desktop/)
|
||||
for Windows and macOS.
|
||||
|
||||
## Binary release
|
||||
## Linux packages
|
||||
|
||||
You can also download the latest `buildx` binary from the
|
||||
[GitHub releases](https://github.com/docker/buildx/releases/latest) page, copy
|
||||
it to `~/.docker/cli-plugins` folder with name `docker-buildx` and change the
|
||||
permission to execute:
|
||||
Docker Linux packages also include Docker Buildx when installed using the
|
||||
[DEB or RPM packages](https://docs.docker.com/engine/install/).
|
||||
|
||||
```console
|
||||
$ chmod a+x ~/.docker/cli-plugins/docker-buildx
|
||||
```
|
||||
## Manual download
|
||||
|
||||
## From `Dockerfile`
|
||||
> **Important**
|
||||
>
|
||||
> This section is for unattended installation of the buildx component. These
|
||||
> instructions are mostly suitable for testing purposes. We do not recommend
|
||||
> installing buildx using manual download in production environments as they
|
||||
> will not be updated automatically with security updates.
|
||||
>
|
||||
> On Windows and macOS, we recommend that you install [Docker Desktop](https://docs.docker.com/desktop/)
|
||||
> instead. For Linux, we recommend that you follow the [instructions specific for your distribution](#linux-packages).
|
||||
|
||||
Here is how to use buildx inside a Dockerfile through the
|
||||
You can also download the latest binary from the [GitHub releases page](https://github.com/docker/buildx/releases/latest).
|
||||
|
||||
Rename the relevant binary and copy it to the destination matching your OS:
|
||||
|
||||
| OS | Binary name | Destination folder |
|
||||
| -------- | -------------------- | -----------------------------------------|
|
||||
| Linux | `docker-buildx` | `$HOME/.docker/cli-plugins` |
|
||||
| macOS | `docker-buildx` | `$HOME/.docker/cli-plugins` |
|
||||
| Windows | `docker-buildx.exe` | `%USERPROFILE%\.docker\cli-plugins` |
|
||||
|
||||
Or copy it into one of these folders for installing it system-wide.
|
||||
|
||||
On Unix environments:
|
||||
|
||||
* `/usr/local/lib/docker/cli-plugins` OR `/usr/local/libexec/docker/cli-plugins`
|
||||
* `/usr/lib/docker/cli-plugins` OR `/usr/libexec/docker/cli-plugins`
|
||||
|
||||
On Windows:
|
||||
|
||||
* `C:\ProgramData\Docker\cli-plugins`
|
||||
* `C:\Program Files\Docker\cli-plugins`
|
||||
|
||||
> **Note**
|
||||
>
|
||||
> On Unix environments, it may also be necessary to make it executable with `chmod +x`:
|
||||
> ```shell
|
||||
> $ chmod +x ~/.docker/cli-plugins/docker-buildx
|
||||
> ```
|
||||
|
||||
## Dockerfile
|
||||
|
||||
Here is how to install and use Buildx inside a Dockerfile through the
|
||||
[`docker/buildx-bin`](https://hub.docker.com/r/docker/buildx-bin) image:
|
||||
|
||||
```Dockerfile
|
||||
@@ -97,17 +139,17 @@ To remove this alias, run [`docker buildx uninstall`](docs/reference/buildx_unin
|
||||
|
||||
```console
|
||||
# Buildx 0.6+
|
||||
$ docker buildx bake "git://github.com/docker/buildx"
|
||||
$ docker buildx bake "https://github.com/docker/buildx.git"
|
||||
$ mkdir -p ~/.docker/cli-plugins
|
||||
$ mv ./bin/buildx ~/.docker/cli-plugins/docker-buildx
|
||||
|
||||
# Docker 19.03+
|
||||
$ DOCKER_BUILDKIT=1 docker build --platform=local -o . "git://github.com/docker/buildx"
|
||||
$ DOCKER_BUILDKIT=1 docker build --platform=local -o . "https://github.com/docker/buildx.git"
|
||||
$ mkdir -p ~/.docker/cli-plugins
|
||||
$ mv buildx ~/.docker/cli-plugins/docker-buildx
|
||||
|
||||
# Local
|
||||
$ git clone git://github.com/docker/buildx && cd buildx
|
||||
$ git clone https://github.com/docker/buildx.git && cd buildx
|
||||
$ make install
|
||||
```
|
||||
|
||||
@@ -162,7 +204,7 @@ with `--output`.
|
||||
## Working with builder instances
|
||||
|
||||
By default, buildx will initially use the `docker` driver if it is supported,
|
||||
providing a very similar user experience to the native `docker build`. Note tha
|
||||
providing a very similar user experience to the native `docker build`. Note that
|
||||
you must use a local shared daemon to build your applications.
|
||||
|
||||
Buildx allows you to create new instances of isolated builders. This can be
|
||||
|
328
bake/bake.go
328
bake/bake.go
@@ -7,7 +7,9 @@ import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
@@ -22,8 +24,15 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var httpPrefix = regexp.MustCompile(`^https?://`)
|
||||
var gitURLPathWithFragmentSuffix = regexp.MustCompile(`\.git(?:#.+)?$`)
|
||||
var (
|
||||
httpPrefix = regexp.MustCompile(`^https?://`)
|
||||
gitURLPathWithFragmentSuffix = regexp.MustCompile(`\.git(?:#.+)?$`)
|
||||
|
||||
validTargetNameChars = `[a-zA-Z0-9_-]+`
|
||||
validTargetNameCharsCompose = `[a-zA-Z0-9._-]+`
|
||||
targetNamePattern = regexp.MustCompile(`^` + validTargetNameChars + `$`)
|
||||
targetNamePatternCompose = regexp.MustCompile(`^` + validTargetNameCharsCompose + `$`)
|
||||
)
|
||||
|
||||
type File struct {
|
||||
Name string
|
||||
@@ -55,12 +64,21 @@ func ReadLocalFiles(names []string) ([]File, error) {
|
||||
out := make([]File, 0, len(names))
|
||||
|
||||
for _, n := range names {
|
||||
dt, err := ioutil.ReadFile(n)
|
||||
if err != nil {
|
||||
if isDefault && errors.Is(err, os.ErrNotExist) {
|
||||
continue
|
||||
var dt []byte
|
||||
var err error
|
||||
if n == "-" {
|
||||
dt, err = ioutil.ReadAll(os.Stdin)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
dt, err = ioutil.ReadFile(n)
|
||||
if err != nil {
|
||||
if isDefault && errors.Is(err, os.ErrNotExist) {
|
||||
continue
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
out = append(out, File{Name: n, Data: dt})
|
||||
}
|
||||
@@ -89,7 +107,55 @@ func ReadTargets(ctx context.Context, files []File, targets, overrides []string,
|
||||
}
|
||||
}
|
||||
}
|
||||
return m, c.Groups, nil
|
||||
|
||||
var g []*Group
|
||||
if len(targets) == 0 || (len(targets) == 1 && targets[0] == "default") {
|
||||
for _, group := range c.Groups {
|
||||
if group.Name != "default" {
|
||||
continue
|
||||
}
|
||||
g = []*Group{{Targets: group.Targets}}
|
||||
}
|
||||
} else {
|
||||
var gt []string
|
||||
for _, target := range targets {
|
||||
isGroup := false
|
||||
for _, group := range c.Groups {
|
||||
if target == group.Name {
|
||||
gt = append(gt, group.Targets...)
|
||||
isGroup = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !isGroup {
|
||||
gt = append(gt, target)
|
||||
}
|
||||
}
|
||||
g = []*Group{{Targets: dedupString(gt)}}
|
||||
}
|
||||
|
||||
for name, t := range m {
|
||||
if err := c.loadLinks(name, t, m, o, nil); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return m, g, nil
|
||||
}
|
||||
|
||||
func dedupString(s []string) []string {
|
||||
if len(s) == 0 {
|
||||
return s
|
||||
}
|
||||
var res []string
|
||||
seen := make(map[string]struct{})
|
||||
for _, val := range s {
|
||||
if _, ok := seen[val]; !ok {
|
||||
res = append(res, val)
|
||||
seen[val] = struct{}{}
|
||||
}
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func ParseFiles(files []File, defaults map[string]string) (_ *Config, err error) {
|
||||
@@ -125,8 +191,9 @@ func ParseFiles(files []File, defaults map[string]string) (_ *Config, err error)
|
||||
|
||||
if len(fs) > 0 {
|
||||
if err := hclparser.Parse(hcl.MergeFiles(fs), hclparser.Opt{
|
||||
LookupVar: os.LookupEnv,
|
||||
Vars: defaults,
|
||||
LookupVar: os.LookupEnv,
|
||||
Vars: defaults,
|
||||
ValidateLabel: validateTargetName,
|
||||
}, &c); err.HasErrors() {
|
||||
return nil, err
|
||||
}
|
||||
@@ -246,10 +313,45 @@ func (c Config) expandTargets(pattern string) ([]string, error) {
|
||||
return names, nil
|
||||
}
|
||||
|
||||
func (c Config) loadLinks(name string, t *Target, m map[string]*Target, o map[string]map[string]Override, visited []string) error {
|
||||
visited = append(visited, name)
|
||||
for _, v := range t.Contexts {
|
||||
if strings.HasPrefix(v, "target:") {
|
||||
target := strings.TrimPrefix(v, "target:")
|
||||
if target == t.Name {
|
||||
return errors.Errorf("target %s cannot link to itself", target)
|
||||
}
|
||||
for _, v := range visited {
|
||||
if v == target {
|
||||
return errors.Errorf("infinite loop from %s to %s", name, target)
|
||||
}
|
||||
}
|
||||
t2, ok := m[target]
|
||||
if !ok {
|
||||
var err error
|
||||
t2, err = c.ResolveTarget(target, o)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
t2.Outputs = nil
|
||||
m[target] = t2
|
||||
}
|
||||
if err := c.loadLinks(target, t2, m, o, visited); err != nil {
|
||||
return err
|
||||
}
|
||||
if len(t.Platforms) > 1 && len(t2.Platforms) > 1 {
|
||||
if !sliceEqual(t.Platforms, t2.Platforms) {
|
||||
return errors.Errorf("target %s can't be used by %s because it is defined for different platforms %v and %v", target, name, t2.Platforms, t.Platforms)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c Config) newOverrides(v []string) (map[string]map[string]Override, error) {
|
||||
m := map[string]map[string]Override{}
|
||||
for _, v := range v {
|
||||
|
||||
parts := strings.SplitN(v, "=", 2)
|
||||
keys := strings.SplitN(parts[0], ".", 3)
|
||||
if len(keys) < 2 {
|
||||
@@ -294,6 +396,11 @@ func (c Config) newOverrides(v []string) (map[string]map[string]Override, error)
|
||||
o.Value = v
|
||||
}
|
||||
fallthrough
|
||||
case "contexts":
|
||||
if len(keys) != 3 {
|
||||
return nil, errors.Errorf("invalid key %s, contexts requires name", parts[0])
|
||||
}
|
||||
fallthrough
|
||||
default:
|
||||
if len(parts) == 2 {
|
||||
o.Value = parts[1]
|
||||
@@ -307,12 +414,12 @@ func (c Config) newOverrides(v []string) (map[string]map[string]Override, error)
|
||||
}
|
||||
|
||||
func (c Config) ResolveGroup(name string) []string {
|
||||
return c.group(name, map[string]struct{}{})
|
||||
return dedupString(c.group(name, map[string][]string{}))
|
||||
}
|
||||
|
||||
func (c Config) group(name string, visited map[string]struct{}) []string {
|
||||
func (c Config) group(name string, visited map[string][]string) []string {
|
||||
if _, ok := visited[name]; ok {
|
||||
return nil
|
||||
return visited[name]
|
||||
}
|
||||
var g *Group
|
||||
for _, group := range c.Groups {
|
||||
@@ -324,19 +431,26 @@ func (c Config) group(name string, visited map[string]struct{}) []string {
|
||||
if g == nil {
|
||||
return []string{name}
|
||||
}
|
||||
visited[name] = struct{}{}
|
||||
visited[name] = []string{}
|
||||
targets := make([]string, 0, len(g.Targets))
|
||||
for _, t := range g.Targets {
|
||||
targets = append(targets, c.group(t, visited)...)
|
||||
tgroup := c.group(t, visited)
|
||||
if len(tgroup) > 0 {
|
||||
targets = append(targets, tgroup...)
|
||||
} else {
|
||||
targets = append(targets, t)
|
||||
}
|
||||
}
|
||||
visited[name] = targets
|
||||
return targets
|
||||
}
|
||||
|
||||
func (c Config) ResolveTarget(name string, overrides map[string]map[string]Override) (*Target, error) {
|
||||
t, err := c.target(name, map[string]struct{}{}, overrides)
|
||||
t, err := c.target(name, map[string]*Target{}, overrides)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
t.Inherits = nil
|
||||
if t.Context == nil {
|
||||
s := "."
|
||||
t.Context = &s
|
||||
@@ -348,11 +462,11 @@ func (c Config) ResolveTarget(name string, overrides map[string]map[string]Overr
|
||||
return t, nil
|
||||
}
|
||||
|
||||
func (c Config) target(name string, visited map[string]struct{}, overrides map[string]map[string]Override) (*Target, error) {
|
||||
if _, ok := visited[name]; ok {
|
||||
return nil, nil
|
||||
func (c Config) target(name string, visited map[string]*Target, overrides map[string]map[string]Override) (*Target, error) {
|
||||
if t, ok := visited[name]; ok {
|
||||
return t, nil
|
||||
}
|
||||
visited[name] = struct{}{}
|
||||
visited[name] = nil
|
||||
var t *Target
|
||||
for _, target := range c.Targets {
|
||||
if target.Name == name {
|
||||
@@ -373,7 +487,6 @@ func (c Config) target(name string, visited map[string]struct{}, overrides map[s
|
||||
tt.Merge(t)
|
||||
}
|
||||
}
|
||||
t.Inherits = nil
|
||||
m := defaultTarget()
|
||||
m.Merge(tt)
|
||||
m.Merge(t)
|
||||
@@ -381,8 +494,8 @@ func (c Config) target(name string, visited map[string]struct{}, overrides map[s
|
||||
if err := tt.AddOverrides(overrides[name]); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
tt.normalize()
|
||||
visited[name] = tt
|
||||
return tt, nil
|
||||
}
|
||||
|
||||
@@ -399,6 +512,7 @@ type Target struct {
|
||||
Inherits []string `json:"inherits,omitempty" hcl:"inherits,optional"`
|
||||
|
||||
Context *string `json:"context,omitempty" hcl:"context,optional"`
|
||||
Contexts map[string]string `json:"contexts,omitempty" hcl:"contexts,optional"`
|
||||
Dockerfile *string `json:"dockerfile,omitempty" hcl:"dockerfile,optional"`
|
||||
DockerfileInline *string `json:"dockerfile-inline,omitempty" hcl:"dockerfile-inline,optional"`
|
||||
Args map[string]string `json:"args,omitempty" hcl:"args,optional"`
|
||||
@@ -413,7 +527,8 @@ type Target struct {
|
||||
Outputs []string `json:"output,omitempty" hcl:"output,optional"`
|
||||
Pull *bool `json:"pull,omitempty" hcl:"pull,optional"`
|
||||
NoCache *bool `json:"no-cache,omitempty" hcl:"no-cache,optional"`
|
||||
|
||||
NetworkMode *string `json:"-" hcl:"-"`
|
||||
NoCacheFilter []string `json:"no-cache-filter,omitempty" hcl:"no-cache-filter,optional"`
|
||||
// IMPORTANT: if you add more fields here, do not forget to update newOverrides and README.
|
||||
}
|
||||
|
||||
@@ -425,6 +540,16 @@ func (t *Target) normalize() {
|
||||
t.CacheFrom = removeDupes(t.CacheFrom)
|
||||
t.CacheTo = removeDupes(t.CacheTo)
|
||||
t.Outputs = removeDupes(t.Outputs)
|
||||
t.NoCacheFilter = removeDupes(t.NoCacheFilter)
|
||||
|
||||
for k, v := range t.Contexts {
|
||||
if v == "" {
|
||||
delete(t.Contexts, k)
|
||||
}
|
||||
}
|
||||
if len(t.Contexts) == 0 {
|
||||
t.Contexts = nil
|
||||
}
|
||||
}
|
||||
|
||||
func (t *Target) Merge(t2 *Target) {
|
||||
@@ -443,6 +568,12 @@ func (t *Target) Merge(t2 *Target) {
|
||||
}
|
||||
t.Args[k] = v
|
||||
}
|
||||
for k, v := range t2.Contexts {
|
||||
if t.Contexts == nil {
|
||||
t.Contexts = map[string]string{}
|
||||
}
|
||||
t.Contexts[k] = v
|
||||
}
|
||||
for k, v := range t2.Labels {
|
||||
if t.Labels == nil {
|
||||
t.Labels = map[string]string{}
|
||||
@@ -479,6 +610,12 @@ func (t *Target) Merge(t2 *Target) {
|
||||
if t2.NoCache != nil {
|
||||
t.NoCache = t2.NoCache
|
||||
}
|
||||
if t2.NetworkMode != nil {
|
||||
t.NetworkMode = t2.NetworkMode
|
||||
}
|
||||
if t2.NoCacheFilter != nil { // merge
|
||||
t.NoCacheFilter = append(t.NoCacheFilter, t2.NoCacheFilter...)
|
||||
}
|
||||
t.Inherits = append(t.Inherits, t2.Inherits...)
|
||||
}
|
||||
|
||||
@@ -499,7 +636,14 @@ func (t *Target) AddOverrides(overrides map[string]Override) error {
|
||||
t.Args = map[string]string{}
|
||||
}
|
||||
t.Args[keys[1]] = value
|
||||
|
||||
case "contexts":
|
||||
if len(keys) != 2 {
|
||||
return errors.Errorf("contexts require name")
|
||||
}
|
||||
if t.Contexts == nil {
|
||||
t.Contexts = map[string]string{}
|
||||
}
|
||||
t.Contexts[keys[1]] = value
|
||||
case "labels":
|
||||
if len(keys) != 2 {
|
||||
return errors.Errorf("labels require name")
|
||||
@@ -530,6 +674,8 @@ func (t *Target) AddOverrides(overrides map[string]Override) error {
|
||||
return errors.Errorf("invalid value %s for boolean key no-cache", value)
|
||||
}
|
||||
t.NoCache = &noCache
|
||||
case "no-cache-filter":
|
||||
t.NoCacheFilter = o.ArrValue
|
||||
case "pull":
|
||||
pull, err := strconv.ParseBool(value)
|
||||
if err != nil {
|
||||
@@ -573,6 +719,21 @@ func updateContext(t *build.Inputs, inp *Input) {
|
||||
if inp == nil || inp.State == nil {
|
||||
return
|
||||
}
|
||||
|
||||
for k, v := range t.NamedContexts {
|
||||
if v.Path == "." {
|
||||
t.NamedContexts[k] = build.NamedContext{Path: inp.URL}
|
||||
}
|
||||
if strings.HasPrefix(v.Path, "cwd://") || strings.HasPrefix(v.Path, "target:") || strings.HasPrefix(v.Path, "docker-image:") {
|
||||
continue
|
||||
}
|
||||
if IsRemoteURL(v.Path) {
|
||||
continue
|
||||
}
|
||||
st := llb.Scratch().File(llb.Copy(*inp.State, v.Path, "/"), llb.WithCustomNamef("set context %s to %s", k, v.Path))
|
||||
t.NamedContexts[k] = build.NamedContext{State: &st}
|
||||
}
|
||||
|
||||
if t.ContextPath == "." {
|
||||
t.ContextPath = inp.URL
|
||||
return
|
||||
@@ -587,6 +748,59 @@ func updateContext(t *build.Inputs, inp *Input) {
|
||||
t.ContextState = &st
|
||||
}
|
||||
|
||||
// validateContextsEntitlements is a basic check to ensure contexts do not
|
||||
// escape local directories when loaded from remote sources. This is to be
|
||||
// replaced with proper entitlements support in the future.
|
||||
func validateContextsEntitlements(t build.Inputs, inp *Input) error {
|
||||
if inp == nil || inp.State == nil {
|
||||
return nil
|
||||
}
|
||||
if v, ok := os.LookupEnv("BAKE_ALLOW_REMOTE_FS_ACCESS"); ok {
|
||||
if vv, _ := strconv.ParseBool(v); vv {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
if t.ContextState == nil {
|
||||
if err := checkPath(t.ContextPath); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
for _, v := range t.NamedContexts {
|
||||
if v.State != nil {
|
||||
continue
|
||||
}
|
||||
if err := checkPath(v.Path); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func checkPath(p string) error {
|
||||
if IsRemoteURL(p) || strings.HasPrefix(p, "target:") || strings.HasPrefix(p, "docker-image:") {
|
||||
return nil
|
||||
}
|
||||
p, err := filepath.EvalSymlinks(p)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
wd, err := os.Getwd()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rel, err := filepath.Rel(wd, p)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) {
|
||||
return errors.Errorf("path %s is outside of the working directory, please set BAKE_ALLOW_REMOTE_FS_ACCESS=1", p)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func toBuildOpt(t *Target, inp *Input) (*build.Options, error) {
|
||||
if v := t.Context; v != nil && *v == "-" {
|
||||
return nil, errors.Errorf("context from stdin not allowed in bake")
|
||||
@@ -619,10 +833,15 @@ func toBuildOpt(t *Target, inp *Input) (*build.Options, error) {
|
||||
if t.Pull != nil {
|
||||
pull = *t.Pull
|
||||
}
|
||||
networkMode := ""
|
||||
if t.NetworkMode != nil {
|
||||
networkMode = *t.NetworkMode
|
||||
}
|
||||
|
||||
bi := build.Inputs{
|
||||
ContextPath: contextPath,
|
||||
DockerfilePath: dockerfilePath,
|
||||
NamedContexts: toNamedContexts(t.Contexts),
|
||||
}
|
||||
if t.DockerfileInline != nil {
|
||||
bi.DockerfileInline = *t.DockerfileInline
|
||||
@@ -631,16 +850,27 @@ func toBuildOpt(t *Target, inp *Input) (*build.Options, error) {
|
||||
if strings.HasPrefix(bi.ContextPath, "cwd://") {
|
||||
bi.ContextPath = path.Clean(strings.TrimPrefix(bi.ContextPath, "cwd://"))
|
||||
}
|
||||
for k, v := range bi.NamedContexts {
|
||||
if strings.HasPrefix(v.Path, "cwd://") {
|
||||
bi.NamedContexts[k] = build.NamedContext{Path: path.Clean(strings.TrimPrefix(v.Path, "cwd://"))}
|
||||
}
|
||||
}
|
||||
|
||||
if err := validateContextsEntitlements(bi, inp); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
t.Context = &bi.ContextPath
|
||||
|
||||
bo := &build.Options{
|
||||
Inputs: bi,
|
||||
Tags: t.Tags,
|
||||
BuildArgs: t.Args,
|
||||
Labels: t.Labels,
|
||||
NoCache: noCache,
|
||||
Pull: pull,
|
||||
Inputs: bi,
|
||||
Tags: t.Tags,
|
||||
BuildArgs: t.Args,
|
||||
Labels: t.Labels,
|
||||
NoCache: noCache,
|
||||
NoCacheFilter: t.NoCacheFilter,
|
||||
Pull: pull,
|
||||
NetworkMode: networkMode,
|
||||
}
|
||||
|
||||
platforms, err := platformutil.Parse(t.Platforms)
|
||||
@@ -733,3 +963,39 @@ func parseOutputType(str string) string {
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func validateTargetName(name string) error {
|
||||
if !targetNamePattern.MatchString(name) {
|
||||
return errors.Errorf("only %q are allowed", validTargetNameChars)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func validateTargetNameCompose(name string) error {
|
||||
if !targetNamePatternCompose.MatchString(name) {
|
||||
return errors.Errorf("only %q are allowed", validTargetNameCharsCompose)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func sliceEqual(s1, s2 []string) bool {
|
||||
if len(s1) != len(s2) {
|
||||
return false
|
||||
}
|
||||
sort.Strings(s1)
|
||||
sort.Strings(s2)
|
||||
for i := range s1 {
|
||||
if s1[i] != s2[i] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func toNamedContexts(m map[string]string) map[string]build.NamedContext {
|
||||
m2 := make(map[string]build.NamedContext, len(m))
|
||||
for k, v := range m {
|
||||
m2[k] = build.NamedContext{Path: v}
|
||||
}
|
||||
return m2
|
||||
}
|
||||
|
@@ -3,6 +3,7 @@ package bake
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"sort"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
@@ -34,7 +35,7 @@ target "webapp" {
|
||||
ctx := context.TODO()
|
||||
|
||||
t.Run("NoOverrides", func(t *testing.T) {
|
||||
m, _, err := ReadTargets(ctx, []File{fp}, []string{"webapp"}, nil, nil)
|
||||
m, g, err := ReadTargets(ctx, []File{fp}, []string{"webapp"}, nil, nil)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(m))
|
||||
|
||||
@@ -43,6 +44,9 @@ target "webapp" {
|
||||
require.Equal(t, "webDEP", m["webapp"].Args["VAR_INHERITED"])
|
||||
require.Equal(t, true, *m["webapp"].NoCache)
|
||||
require.Nil(t, m["webapp"].Pull)
|
||||
|
||||
require.Equal(t, 1, len(g))
|
||||
require.Equal(t, []string{"webapp"}, g[0].Targets)
|
||||
})
|
||||
|
||||
t.Run("InvalidTargetOverrides", func(t *testing.T) {
|
||||
@@ -56,7 +60,7 @@ target "webapp" {
|
||||
os.Setenv("VAR_FROMENV"+t.Name(), "fromEnv")
|
||||
defer os.Unsetenv("VAR_FROM_ENV" + t.Name())
|
||||
|
||||
m, _, err := ReadTargets(ctx, []File{fp}, []string{"webapp"}, []string{
|
||||
m, g, err := ReadTargets(ctx, []File{fp}, []string{"webapp"}, []string{
|
||||
"webapp.args.VAR_UNSET",
|
||||
"webapp.args.VAR_EMPTY=",
|
||||
"webapp.args.VAR_SET=bananas",
|
||||
@@ -81,17 +85,23 @@ target "webapp" {
|
||||
|
||||
require.Equal(t, m["webapp"].Args["VAR_BOTH"], "webapp")
|
||||
require.Equal(t, m["webapp"].Args["VAR_INHERITED"], "override")
|
||||
|
||||
require.Equal(t, 1, len(g))
|
||||
require.Equal(t, []string{"webapp"}, g[0].Targets)
|
||||
})
|
||||
|
||||
// building leaf but overriding parent fields
|
||||
t.Run("parent", func(t *testing.T) {
|
||||
m, _, err := ReadTargets(ctx, []File{fp}, []string{"webapp"}, []string{
|
||||
m, g, err := ReadTargets(ctx, []File{fp}, []string{"webapp"}, []string{
|
||||
"webDEP.args.VAR_INHERITED=override",
|
||||
"webDEP.args.VAR_BOTH=override",
|
||||
}, nil)
|
||||
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, m["webapp"].Args["VAR_INHERITED"], "override")
|
||||
require.Equal(t, m["webapp"].Args["VAR_BOTH"], "webapp")
|
||||
require.Equal(t, 1, len(g))
|
||||
require.Equal(t, []string{"webapp"}, g[0].Targets)
|
||||
})
|
||||
})
|
||||
|
||||
@@ -99,40 +109,48 @@ target "webapp" {
|
||||
_, _, err := ReadTargets(ctx, []File{fp}, []string{"webapp"}, []string{"webapp.context"}, nil)
|
||||
require.NotNil(t, err)
|
||||
|
||||
m, _, err := ReadTargets(ctx, []File{fp}, []string{"webapp"}, []string{"webapp.context=foo"}, nil)
|
||||
m, g, err := ReadTargets(ctx, []File{fp}, []string{"webapp"}, []string{"webapp.context=foo"}, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, "foo", *m["webapp"].Context)
|
||||
require.Equal(t, 1, len(g))
|
||||
require.Equal(t, []string{"webapp"}, g[0].Targets)
|
||||
})
|
||||
|
||||
t.Run("NoCacheOverride", func(t *testing.T) {
|
||||
m, _, err := ReadTargets(ctx, []File{fp}, []string{"webapp"}, []string{"webapp.no-cache=false"}, nil)
|
||||
m, g, err := ReadTargets(ctx, []File{fp}, []string{"webapp"}, []string{"webapp.no-cache=false"}, nil)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, *m["webapp"].NoCache)
|
||||
require.Equal(t, 1, len(g))
|
||||
require.Equal(t, []string{"webapp"}, g[0].Targets)
|
||||
})
|
||||
|
||||
t.Run("PullOverride", func(t *testing.T) {
|
||||
m, _, err := ReadTargets(ctx, []File{fp}, []string{"webapp"}, []string{"webapp.pull=false"}, nil)
|
||||
m, g, err := ReadTargets(ctx, []File{fp}, []string{"webapp"}, []string{"webapp.pull=false"}, nil)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, false, *m["webapp"].Pull)
|
||||
require.Equal(t, 1, len(g))
|
||||
require.Equal(t, []string{"webapp"}, g[0].Targets)
|
||||
})
|
||||
|
||||
t.Run("PatternOverride", func(t *testing.T) {
|
||||
// same check for two cases
|
||||
multiTargetCheck := func(t *testing.T, m map[string]*Target, err error) {
|
||||
multiTargetCheck := func(t *testing.T, m map[string]*Target, g []*Group, err error) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 2, len(m))
|
||||
require.Equal(t, "foo", *m["webapp"].Dockerfile)
|
||||
require.Equal(t, "webDEP", m["webapp"].Args["VAR_INHERITED"])
|
||||
require.Equal(t, "foo", *m["webDEP"].Dockerfile)
|
||||
require.Equal(t, "webDEP", m["webDEP"].Args["VAR_INHERITED"])
|
||||
require.Equal(t, 1, len(g))
|
||||
sort.Strings(g[0].Targets)
|
||||
require.Equal(t, []string{"webDEP", "webapp"}, g[0].Targets)
|
||||
}
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
targets []string
|
||||
overrides []string
|
||||
check func(*testing.T, map[string]*Target, error)
|
||||
check func(*testing.T, map[string]*Target, []*Group, error)
|
||||
}{
|
||||
{
|
||||
name: "multi target single pattern",
|
||||
@@ -150,18 +168,20 @@ target "webapp" {
|
||||
name: "single target",
|
||||
targets: []string{"webapp"},
|
||||
overrides: []string{"web*.dockerfile=foo"},
|
||||
check: func(t *testing.T, m map[string]*Target, err error) {
|
||||
check: func(t *testing.T, m map[string]*Target, g []*Group, err error) {
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(m))
|
||||
require.Equal(t, "foo", *m["webapp"].Dockerfile)
|
||||
require.Equal(t, "webDEP", m["webapp"].Args["VAR_INHERITED"])
|
||||
require.Equal(t, 1, len(g))
|
||||
require.Equal(t, []string{"webapp"}, g[0].Targets)
|
||||
},
|
||||
},
|
||||
{
|
||||
name: "nomatch",
|
||||
targets: []string{"webapp"},
|
||||
overrides: []string{"nomatch*.dockerfile=foo"},
|
||||
check: func(t *testing.T, m map[string]*Target, err error) {
|
||||
check: func(t *testing.T, m map[string]*Target, g []*Group, err error) {
|
||||
// NOTE: I am unsure whether failing to match should always error out
|
||||
// instead of simply skipping that override.
|
||||
// Let's enforce the error and we can relax it later if users complain.
|
||||
@@ -172,8 +192,8 @@ target "webapp" {
|
||||
}
|
||||
for _, test := range cases {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
m, _, err := ReadTargets(ctx, []File{fp}, test.targets, test.overrides, nil)
|
||||
test.check(t, m, err)
|
||||
m, g, err := ReadTargets(ctx, []File{fp}, test.targets, test.overrides, nil)
|
||||
test.check(t, m, g, err)
|
||||
})
|
||||
}
|
||||
})
|
||||
@@ -183,7 +203,7 @@ func TestPushOverride(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
fp := File{
|
||||
Name: "docker-bake.hc",
|
||||
Name: "docker-bake.hcl",
|
||||
Data: []byte(
|
||||
`target "app" {
|
||||
output = ["type=image,compression=zstd"]
|
||||
@@ -197,7 +217,7 @@ func TestPushOverride(t *testing.T) {
|
||||
require.Equal(t, "type=image,compression=zstd,push=true", m["app"].Outputs[0])
|
||||
|
||||
fp = File{
|
||||
Name: "docker-bake.hc",
|
||||
Name: "docker-bake.hcl",
|
||||
Data: []byte(
|
||||
`target "app" {
|
||||
output = ["type=image,compression=zstd"]
|
||||
@@ -211,7 +231,7 @@ func TestPushOverride(t *testing.T) {
|
||||
require.Equal(t, "type=image,compression=zstd,push=false", m["app"].Outputs[0])
|
||||
|
||||
fp = File{
|
||||
Name: "docker-bake.hc",
|
||||
Name: "docker-bake.hcl",
|
||||
Data: []byte(
|
||||
`target "app" {
|
||||
}`),
|
||||
@@ -260,16 +280,21 @@ services:
|
||||
|
||||
ctx := context.TODO()
|
||||
|
||||
m, _, err := ReadTargets(ctx, []File{fp, fp2}, []string{"default"}, nil, nil)
|
||||
m, g, err := ReadTargets(ctx, []File{fp, fp2}, []string{"default"}, nil, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, 3, len(m))
|
||||
_, ok := m["newservice"]
|
||||
|
||||
require.True(t, ok)
|
||||
require.Equal(t, "Dockerfile.webapp", *m["webapp"].Dockerfile)
|
||||
require.Equal(t, ".", *m["webapp"].Context)
|
||||
require.Equal(t, "1", m["webapp"].Args["buildno"])
|
||||
require.Equal(t, "12", m["webapp"].Args["buildno2"])
|
||||
|
||||
require.Equal(t, 1, len(g))
|
||||
sort.Strings(g[0].Targets)
|
||||
require.Equal(t, []string{"db", "newservice", "webapp"}, g[0].Targets)
|
||||
}
|
||||
|
||||
func TestHCLCwdPrefix(t *testing.T) {
|
||||
@@ -282,7 +307,7 @@ func TestHCLCwdPrefix(t *testing.T) {
|
||||
}`),
|
||||
}
|
||||
ctx := context.TODO()
|
||||
m, _, err := ReadTargets(ctx, []File{fp}, []string{"app"}, nil, nil)
|
||||
m, g, err := ReadTargets(ctx, []File{fp}, []string{"app"}, nil, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, 1, len(m))
|
||||
@@ -294,6 +319,9 @@ func TestHCLCwdPrefix(t *testing.T) {
|
||||
|
||||
require.Equal(t, "test", *m["app"].Dockerfile)
|
||||
require.Equal(t, "foo", *m["app"].Context)
|
||||
|
||||
require.Equal(t, 1, len(g))
|
||||
require.Equal(t, []string{"app"}, g[0].Targets)
|
||||
}
|
||||
|
||||
func TestOverrideMerge(t *testing.T) {
|
||||
@@ -324,3 +352,772 @@ func TestOverrideMerge(t *testing.T) {
|
||||
require.Equal(t, 1, len(m["app"].Outputs))
|
||||
require.Equal(t, "type=registry", m["app"].Outputs[0])
|
||||
}
|
||||
|
||||
func TestReadContexts(t *testing.T) {
|
||||
fp := File{
|
||||
Name: "docker-bake.hcl",
|
||||
Data: []byte(`
|
||||
target "base" {
|
||||
contexts = {
|
||||
foo: "bar"
|
||||
abc: "def"
|
||||
}
|
||||
}
|
||||
target "app" {
|
||||
inherits = ["base"]
|
||||
contexts = {
|
||||
foo: "baz"
|
||||
}
|
||||
}
|
||||
`),
|
||||
}
|
||||
|
||||
ctx := context.TODO()
|
||||
m, _, err := ReadTargets(ctx, []File{fp}, []string{"app"}, []string{}, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, 1, len(m))
|
||||
_, ok := m["app"]
|
||||
require.True(t, ok)
|
||||
|
||||
bo, err := TargetsToBuildOpt(m, &Input{})
|
||||
require.NoError(t, err)
|
||||
|
||||
ctxs := bo["app"].Inputs.NamedContexts
|
||||
require.Equal(t, 2, len(ctxs))
|
||||
|
||||
require.Equal(t, "baz", ctxs["foo"].Path)
|
||||
require.Equal(t, "def", ctxs["abc"].Path)
|
||||
|
||||
m, _, err = ReadTargets(ctx, []File{fp}, []string{"app"}, []string{"app.contexts.foo=bay", "base.contexts.ghi=jkl"}, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, 1, len(m))
|
||||
_, ok = m["app"]
|
||||
require.True(t, ok)
|
||||
|
||||
bo, err = TargetsToBuildOpt(m, &Input{})
|
||||
require.NoError(t, err)
|
||||
|
||||
ctxs = bo["app"].Inputs.NamedContexts
|
||||
require.Equal(t, 3, len(ctxs))
|
||||
|
||||
require.Equal(t, "bay", ctxs["foo"].Path)
|
||||
require.Equal(t, "def", ctxs["abc"].Path)
|
||||
require.Equal(t, "jkl", ctxs["ghi"].Path)
|
||||
|
||||
// test resetting base values
|
||||
m, _, err = ReadTargets(ctx, []File{fp}, []string{"app"}, []string{"app.contexts.foo="}, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, 1, len(m))
|
||||
_, ok = m["app"]
|
||||
require.True(t, ok)
|
||||
|
||||
bo, err = TargetsToBuildOpt(m, &Input{})
|
||||
require.NoError(t, err)
|
||||
|
||||
ctxs = bo["app"].Inputs.NamedContexts
|
||||
require.Equal(t, 1, len(ctxs))
|
||||
require.Equal(t, "def", ctxs["abc"].Path)
|
||||
}
|
||||
|
||||
func TestReadContextFromTargetUnknown(t *testing.T) {
|
||||
fp := File{
|
||||
Name: "docker-bake.hcl",
|
||||
Data: []byte(`
|
||||
target "base" {
|
||||
contexts = {
|
||||
foo: "bar"
|
||||
abc: "def"
|
||||
}
|
||||
}
|
||||
target "app" {
|
||||
contexts = {
|
||||
foo: "baz"
|
||||
bar: "target:bar"
|
||||
}
|
||||
}
|
||||
`),
|
||||
}
|
||||
|
||||
ctx := context.TODO()
|
||||
_, _, err := ReadTargets(ctx, []File{fp}, []string{"app"}, []string{}, nil)
|
||||
require.Error(t, err)
|
||||
require.Contains(t, err.Error(), "failed to find target bar")
|
||||
}
|
||||
func TestReadContextFromTargetChain(t *testing.T) {
|
||||
ctx := context.TODO()
|
||||
fp := File{
|
||||
Name: "docker-bake.hcl",
|
||||
Data: []byte(`
|
||||
target "base" {
|
||||
}
|
||||
target "mid" {
|
||||
output = ["foo"]
|
||||
contexts = {
|
||||
parent: "target:base"
|
||||
}
|
||||
}
|
||||
target "app" {
|
||||
contexts = {
|
||||
foo: "baz"
|
||||
bar: "target:mid"
|
||||
}
|
||||
}
|
||||
target "unused" {}
|
||||
`),
|
||||
}
|
||||
|
||||
m, _, err := ReadTargets(ctx, []File{fp}, []string{"app"}, []string{}, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, 3, len(m))
|
||||
app, ok := m["app"]
|
||||
require.True(t, ok)
|
||||
|
||||
require.Equal(t, 2, len(app.Contexts))
|
||||
|
||||
mid, ok := m["mid"]
|
||||
require.True(t, ok)
|
||||
require.Equal(t, 0, len(mid.Outputs))
|
||||
require.Equal(t, 1, len(mid.Contexts))
|
||||
|
||||
base, ok := m["base"]
|
||||
require.True(t, ok)
|
||||
require.Equal(t, 0, len(base.Contexts))
|
||||
}
|
||||
|
||||
func TestReadContextFromTargetInfiniteLoop(t *testing.T) {
|
||||
ctx := context.TODO()
|
||||
fp := File{
|
||||
Name: "docker-bake.hcl",
|
||||
Data: []byte(`
|
||||
target "mid" {
|
||||
output = ["foo"]
|
||||
contexts = {
|
||||
parent: "target:app"
|
||||
}
|
||||
}
|
||||
target "app" {
|
||||
contexts = {
|
||||
foo: "baz"
|
||||
bar: "target:mid"
|
||||
}
|
||||
}
|
||||
`),
|
||||
}
|
||||
_, _, err := ReadTargets(ctx, []File{fp}, []string{"app", "mid"}, []string{}, nil)
|
||||
require.Error(t, err)
|
||||
require.Contains(t, err.Error(), "infinite loop from")
|
||||
}
|
||||
|
||||
func TestReadContextFromTargetMultiPlatform(t *testing.T) {
|
||||
ctx := context.TODO()
|
||||
fp := File{
|
||||
Name: "docker-bake.hcl",
|
||||
Data: []byte(`
|
||||
target "mid" {
|
||||
output = ["foo"]
|
||||
platforms = ["linux/amd64", "linux/arm64"]
|
||||
}
|
||||
target "app" {
|
||||
contexts = {
|
||||
bar: "target:mid"
|
||||
}
|
||||
platforms = ["linux/amd64", "linux/arm64"]
|
||||
}
|
||||
`),
|
||||
}
|
||||
_, _, err := ReadTargets(ctx, []File{fp}, []string{"app"}, []string{}, nil)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestReadContextFromTargetInvalidPlatforms(t *testing.T) {
|
||||
ctx := context.TODO()
|
||||
fp := File{
|
||||
Name: "docker-bake.hcl",
|
||||
Data: []byte(`
|
||||
target "mid" {
|
||||
output = ["foo"]
|
||||
platforms = ["linux/amd64", "linux/riscv64"]
|
||||
}
|
||||
target "app" {
|
||||
contexts = {
|
||||
bar: "target:mid"
|
||||
}
|
||||
platforms = ["linux/amd64", "linux/arm64"]
|
||||
}
|
||||
`),
|
||||
}
|
||||
_, _, err := ReadTargets(ctx, []File{fp}, []string{"app"}, []string{}, nil)
|
||||
require.Error(t, err)
|
||||
require.Contains(t, err.Error(), "defined for different platforms")
|
||||
}
|
||||
|
||||
func TestReadTargetsDefault(t *testing.T) {
|
||||
t.Parallel()
|
||||
ctx := context.TODO()
|
||||
|
||||
f := File{
|
||||
Name: "docker-bake.hcl",
|
||||
Data: []byte(`
|
||||
target "default" {
|
||||
dockerfile = "test"
|
||||
}`)}
|
||||
|
||||
m, g, err := ReadTargets(ctx, []File{f}, []string{"default"}, nil, nil)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(g))
|
||||
require.Equal(t, 1, len(m))
|
||||
require.Equal(t, "test", *m["default"].Dockerfile)
|
||||
}
|
||||
|
||||
func TestReadTargetsSpecified(t *testing.T) {
|
||||
t.Parallel()
|
||||
ctx := context.TODO()
|
||||
|
||||
f := File{
|
||||
Name: "docker-bake.hcl",
|
||||
Data: []byte(`
|
||||
target "image" {
|
||||
dockerfile = "test"
|
||||
}`)}
|
||||
|
||||
_, _, err := ReadTargets(ctx, []File{f}, []string{"default"}, nil, nil)
|
||||
require.Error(t, err)
|
||||
|
||||
m, g, err := ReadTargets(ctx, []File{f}, []string{"image"}, nil, nil)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(g))
|
||||
require.Equal(t, []string{"image"}, g[0].Targets)
|
||||
require.Equal(t, 1, len(m))
|
||||
require.Equal(t, "test", *m["image"].Dockerfile)
|
||||
}
|
||||
|
||||
func TestReadTargetsGroup(t *testing.T) {
|
||||
t.Parallel()
|
||||
ctx := context.TODO()
|
||||
|
||||
f := File{
|
||||
Name: "docker-bake.hcl",
|
||||
Data: []byte(`
|
||||
group "foo" {
|
||||
targets = ["image"]
|
||||
}
|
||||
target "image" {
|
||||
dockerfile = "test"
|
||||
}`)}
|
||||
|
||||
m, g, err := ReadTargets(ctx, []File{f}, []string{"foo"}, nil, nil)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(g))
|
||||
require.Equal(t, []string{"image"}, g[0].Targets)
|
||||
require.Equal(t, 1, len(m))
|
||||
require.Equal(t, "test", *m["image"].Dockerfile)
|
||||
}
|
||||
|
||||
func TestReadTargetsGroupAndTarget(t *testing.T) {
|
||||
t.Parallel()
|
||||
ctx := context.TODO()
|
||||
|
||||
f := File{
|
||||
Name: "docker-bake.hcl",
|
||||
Data: []byte(`
|
||||
group "foo" {
|
||||
targets = ["image"]
|
||||
}
|
||||
target "foo" {
|
||||
dockerfile = "bar"
|
||||
}
|
||||
target "image" {
|
||||
dockerfile = "test"
|
||||
}`)}
|
||||
|
||||
m, g, err := ReadTargets(ctx, []File{f}, []string{"foo"}, nil, nil)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(g))
|
||||
require.Equal(t, []string{"image"}, g[0].Targets)
|
||||
require.Equal(t, 1, len(m))
|
||||
require.Equal(t, "test", *m["image"].Dockerfile)
|
||||
|
||||
m, g, err = ReadTargets(ctx, []File{f}, []string{"foo", "foo"}, nil, nil)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(g))
|
||||
require.Equal(t, []string{"image"}, g[0].Targets)
|
||||
require.Equal(t, 1, len(m))
|
||||
require.Equal(t, "test", *m["image"].Dockerfile)
|
||||
}
|
||||
|
||||
func TestReadTargetsMixed(t *testing.T) {
|
||||
t.Parallel()
|
||||
ctx := context.TODO()
|
||||
|
||||
fhcl := File{
|
||||
Name: "docker-bake.hcl",
|
||||
Data: []byte(`
|
||||
group "default" {
|
||||
targets = ["image"]
|
||||
}
|
||||
target "nocache" {
|
||||
no-cache = true
|
||||
}
|
||||
group "release" {
|
||||
targets = ["image-release"]
|
||||
}
|
||||
target "image" {
|
||||
inherits = ["nocache"]
|
||||
output = ["type=docker"]
|
||||
}
|
||||
target "image-release" {
|
||||
inherits = ["image"]
|
||||
output = ["type=image,push=true"]
|
||||
tags = ["user/app:latest"]
|
||||
}`)}
|
||||
|
||||
fyml := File{
|
||||
Name: "docker-compose.yml",
|
||||
Data: []byte(`
|
||||
services:
|
||||
addon:
|
||||
build:
|
||||
context: .
|
||||
dockerfile: ./Dockerfile
|
||||
args:
|
||||
CT_ECR: foo
|
||||
CT_TAG: bar
|
||||
image: ct-addon:bar
|
||||
environment:
|
||||
- NODE_ENV=test
|
||||
- AWS_ACCESS_KEY_ID=dummy
|
||||
- AWS_SECRET_ACCESS_KEY=dummy
|
||||
aws:
|
||||
build:
|
||||
dockerfile: ./aws.Dockerfile
|
||||
args:
|
||||
CT_ECR: foo
|
||||
CT_TAG: bar
|
||||
image: ct-fake-aws:bar`)}
|
||||
|
||||
fjson := File{
|
||||
Name: "docker-bake.json",
|
||||
Data: []byte(`{
|
||||
"group": {
|
||||
"default": {
|
||||
"targets": [
|
||||
"image"
|
||||
]
|
||||
}
|
||||
},
|
||||
"target": {
|
||||
"image": {
|
||||
"context": ".",
|
||||
"dockerfile": "Dockerfile",
|
||||
"output": [
|
||||
"type=docker"
|
||||
]
|
||||
}
|
||||
}
|
||||
}`)}
|
||||
|
||||
m, g, err := ReadTargets(ctx, []File{fhcl}, []string{"default"}, nil, nil)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(g))
|
||||
require.Equal(t, []string{"image"}, g[0].Targets)
|
||||
require.Equal(t, 1, len(m))
|
||||
require.Equal(t, 1, len(m["image"].Outputs))
|
||||
require.Equal(t, "type=docker", m["image"].Outputs[0])
|
||||
|
||||
m, g, err = ReadTargets(ctx, []File{fhcl}, []string{"image-release"}, nil, nil)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(g))
|
||||
require.Equal(t, []string{"image-release"}, g[0].Targets)
|
||||
require.Equal(t, 1, len(m))
|
||||
require.Equal(t, 1, len(m["image-release"].Outputs))
|
||||
require.Equal(t, "type=image,push=true", m["image-release"].Outputs[0])
|
||||
|
||||
m, g, err = ReadTargets(ctx, []File{fhcl}, []string{"image", "image-release"}, nil, nil)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(g))
|
||||
require.Equal(t, []string{"image", "image-release"}, g[0].Targets)
|
||||
require.Equal(t, 2, len(m))
|
||||
require.Equal(t, ".", *m["image"].Context)
|
||||
require.Equal(t, 1, len(m["image-release"].Outputs))
|
||||
require.Equal(t, "type=image,push=true", m["image-release"].Outputs[0])
|
||||
|
||||
m, g, err = ReadTargets(ctx, []File{fyml, fhcl}, []string{"default"}, nil, nil)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(g))
|
||||
require.Equal(t, []string{"image"}, g[0].Targets)
|
||||
require.Equal(t, 1, len(m))
|
||||
require.Equal(t, ".", *m["image"].Context)
|
||||
|
||||
m, g, err = ReadTargets(ctx, []File{fjson}, []string{"default"}, nil, nil)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(g))
|
||||
require.Equal(t, []string{"image"}, g[0].Targets)
|
||||
require.Equal(t, 1, len(m))
|
||||
require.Equal(t, ".", *m["image"].Context)
|
||||
|
||||
m, g, err = ReadTargets(ctx, []File{fyml}, []string{"default"}, nil, nil)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(g))
|
||||
sort.Strings(g[0].Targets)
|
||||
require.Equal(t, []string{"addon", "aws"}, g[0].Targets)
|
||||
require.Equal(t, 2, len(m))
|
||||
require.Equal(t, "./Dockerfile", *m["addon"].Dockerfile)
|
||||
require.Equal(t, "./aws.Dockerfile", *m["aws"].Dockerfile)
|
||||
|
||||
m, g, err = ReadTargets(ctx, []File{fyml, fhcl}, []string{"addon", "aws"}, nil, nil)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(g))
|
||||
sort.Strings(g[0].Targets)
|
||||
require.Equal(t, []string{"addon", "aws"}, g[0].Targets)
|
||||
require.Equal(t, 2, len(m))
|
||||
require.Equal(t, "./Dockerfile", *m["addon"].Dockerfile)
|
||||
require.Equal(t, "./aws.Dockerfile", *m["aws"].Dockerfile)
|
||||
|
||||
m, g, err = ReadTargets(ctx, []File{fyml, fhcl}, []string{"addon", "aws", "image"}, nil, nil)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(g))
|
||||
sort.Strings(g[0].Targets)
|
||||
require.Equal(t, []string{"addon", "aws", "image"}, g[0].Targets)
|
||||
require.Equal(t, 3, len(m))
|
||||
require.Equal(t, ".", *m["image"].Context)
|
||||
require.Equal(t, "./Dockerfile", *m["addon"].Dockerfile)
|
||||
require.Equal(t, "./aws.Dockerfile", *m["aws"].Dockerfile)
|
||||
}
|
||||
|
||||
func TestReadTargetsSameGroupTarget(t *testing.T) {
|
||||
t.Parallel()
|
||||
ctx := context.TODO()
|
||||
|
||||
f := File{
|
||||
Name: "docker-bake.hcl",
|
||||
Data: []byte(`
|
||||
group "foo" {
|
||||
targets = ["foo"]
|
||||
}
|
||||
target "foo" {
|
||||
dockerfile = "bar"
|
||||
}
|
||||
target "image" {
|
||||
output = ["type=docker"]
|
||||
}`)}
|
||||
|
||||
m, g, err := ReadTargets(ctx, []File{f}, []string{"foo"}, nil, nil)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(g))
|
||||
require.Equal(t, []string{"foo"}, g[0].Targets)
|
||||
require.Equal(t, 1, len(m))
|
||||
require.Equal(t, "bar", *m["foo"].Dockerfile)
|
||||
|
||||
m, g, err = ReadTargets(ctx, []File{f}, []string{"foo", "foo"}, nil, nil)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(g))
|
||||
require.Equal(t, []string{"foo"}, g[0].Targets)
|
||||
require.Equal(t, 1, len(m))
|
||||
require.Equal(t, "bar", *m["foo"].Dockerfile)
|
||||
}
|
||||
|
||||
func TestReadTargetsSameGroupTargetMulti(t *testing.T) {
|
||||
t.Parallel()
|
||||
ctx := context.TODO()
|
||||
|
||||
f := File{
|
||||
Name: "docker-bake.hcl",
|
||||
Data: []byte(`
|
||||
group "foo" {
|
||||
targets = ["foo", "image"]
|
||||
}
|
||||
target "foo" {
|
||||
dockerfile = "bar"
|
||||
}
|
||||
target "image" {
|
||||
output = ["type=docker"]
|
||||
}`)}
|
||||
|
||||
m, g, err := ReadTargets(ctx, []File{f}, []string{"foo"}, nil, nil)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(g))
|
||||
require.Equal(t, []string{"foo", "image"}, g[0].Targets)
|
||||
require.Equal(t, 2, len(m))
|
||||
require.Equal(t, "bar", *m["foo"].Dockerfile)
|
||||
require.Equal(t, "type=docker", m["image"].Outputs[0])
|
||||
|
||||
m, g, err = ReadTargets(ctx, []File{f}, []string{"foo", "image"}, nil, nil)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(g))
|
||||
require.Equal(t, []string{"foo", "image"}, g[0].Targets)
|
||||
require.Equal(t, 2, len(m))
|
||||
require.Equal(t, "bar", *m["foo"].Dockerfile)
|
||||
require.Equal(t, "type=docker", m["image"].Outputs[0])
|
||||
}
|
||||
|
||||
func TestNestedInherits(t *testing.T) {
|
||||
ctx := context.TODO()
|
||||
|
||||
f := File{
|
||||
Name: "docker-bake.hcl",
|
||||
Data: []byte(`
|
||||
target "a" {
|
||||
args = {
|
||||
foo = "123"
|
||||
bar = "234"
|
||||
}
|
||||
}
|
||||
target "b" {
|
||||
inherits = ["a"]
|
||||
args = {
|
||||
bar = "567"
|
||||
}
|
||||
}
|
||||
target "c" {
|
||||
inherits = ["a"]
|
||||
args = {
|
||||
baz = "890"
|
||||
}
|
||||
}
|
||||
target "d" {
|
||||
inherits = ["b", "c"]
|
||||
}`)}
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
overrides []string
|
||||
want map[string]string
|
||||
}{
|
||||
{
|
||||
name: "nested simple",
|
||||
overrides: nil,
|
||||
want: map[string]string{"bar": "234", "baz": "890", "foo": "123"},
|
||||
},
|
||||
{
|
||||
name: "nested with overrides first",
|
||||
overrides: []string{"a.args.foo=321", "b.args.bar=432"},
|
||||
want: map[string]string{"bar": "234", "baz": "890", "foo": "321"},
|
||||
},
|
||||
{
|
||||
name: "nested with overrides last",
|
||||
overrides: []string{"a.args.foo=321", "c.args.bar=432"},
|
||||
want: map[string]string{"bar": "432", "baz": "890", "foo": "321"},
|
||||
},
|
||||
}
|
||||
for _, tt := range cases {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
m, g, err := ReadTargets(ctx, []File{f}, []string{"d"}, tt.overrides, nil)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(g))
|
||||
require.Equal(t, []string{"d"}, g[0].Targets)
|
||||
require.Equal(t, 1, len(m))
|
||||
require.Equal(t, tt.want, m["d"].Args)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestNestedInheritsWithGroup(t *testing.T) {
|
||||
ctx := context.TODO()
|
||||
|
||||
f := File{
|
||||
Name: "docker-bake.hcl",
|
||||
Data: []byte(`
|
||||
target "grandparent" {
|
||||
output = ["type=docker"]
|
||||
args = {
|
||||
BAR = "fuu"
|
||||
}
|
||||
}
|
||||
target "parent" {
|
||||
inherits = ["grandparent"]
|
||||
args = {
|
||||
FOO = "bar"
|
||||
}
|
||||
}
|
||||
target "child1" {
|
||||
inherits = ["parent"]
|
||||
}
|
||||
target "child2" {
|
||||
inherits = ["parent"]
|
||||
args = {
|
||||
FOO2 = "bar2"
|
||||
}
|
||||
}
|
||||
group "default" {
|
||||
targets = [
|
||||
"child1",
|
||||
"child2"
|
||||
]
|
||||
}`)}
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
overrides []string
|
||||
wantch1 map[string]string
|
||||
wantch2 map[string]string
|
||||
}{
|
||||
{
|
||||
name: "nested simple",
|
||||
overrides: nil,
|
||||
wantch1: map[string]string{"BAR": "fuu", "FOO": "bar"},
|
||||
wantch2: map[string]string{"BAR": "fuu", "FOO": "bar", "FOO2": "bar2"},
|
||||
},
|
||||
{
|
||||
name: "nested with overrides first",
|
||||
overrides: []string{"grandparent.args.BAR=fii", "child1.args.FOO=baaar"},
|
||||
wantch1: map[string]string{"BAR": "fii", "FOO": "baaar"},
|
||||
wantch2: map[string]string{"BAR": "fii", "FOO": "bar", "FOO2": "bar2"},
|
||||
},
|
||||
{
|
||||
name: "nested with overrides last",
|
||||
overrides: []string{"grandparent.args.BAR=fii", "child2.args.FOO=baaar"},
|
||||
wantch1: map[string]string{"BAR": "fii", "FOO": "bar"},
|
||||
wantch2: map[string]string{"BAR": "fii", "FOO": "baaar", "FOO2": "bar2"},
|
||||
},
|
||||
}
|
||||
for _, tt := range cases {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
m, g, err := ReadTargets(ctx, []File{f}, []string{"default"}, tt.overrides, nil)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(g))
|
||||
require.Equal(t, []string{"child1", "child2"}, g[0].Targets)
|
||||
require.Equal(t, 2, len(m))
|
||||
require.Equal(t, tt.wantch1, m["child1"].Args)
|
||||
require.Equal(t, []string{"type=docker"}, m["child1"].Outputs)
|
||||
require.Equal(t, tt.wantch2, m["child2"].Args)
|
||||
require.Equal(t, []string{"type=docker"}, m["child2"].Outputs)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestTargetName(t *testing.T) {
|
||||
ctx := context.TODO()
|
||||
cases := []struct {
|
||||
target string
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
target: "a",
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
target: "abc",
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
target: "a/b",
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
target: "a.b",
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
target: "_a",
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
target: "a_b",
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
target: "AbC",
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
target: "AbC-0123",
|
||||
wantErr: false,
|
||||
},
|
||||
}
|
||||
for _, tt := range cases {
|
||||
tt := tt
|
||||
t.Run(tt.target, func(t *testing.T) {
|
||||
_, _, err := ReadTargets(ctx, []File{{
|
||||
Name: "docker-bake.hcl",
|
||||
Data: []byte(`target "` + tt.target + `" {}`),
|
||||
}}, []string{tt.target}, nil, nil)
|
||||
if tt.wantErr {
|
||||
require.Error(t, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestNestedGroupsWithSameTarget(t *testing.T) {
|
||||
ctx := context.TODO()
|
||||
|
||||
f := File{
|
||||
Name: "docker-bake.hcl",
|
||||
Data: []byte(`
|
||||
group "a" {
|
||||
targets = ["b", "c"]
|
||||
}
|
||||
|
||||
group "b" {
|
||||
targets = ["d"]
|
||||
}
|
||||
|
||||
group "c" {
|
||||
targets = ["b"]
|
||||
}
|
||||
|
||||
target "d" {
|
||||
context = "."
|
||||
dockerfile = "./testdockerfile"
|
||||
}
|
||||
|
||||
group "e" {
|
||||
targets = ["a", "f"]
|
||||
}
|
||||
|
||||
target "f" {
|
||||
context = "./foo"
|
||||
}`)}
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
targets []string
|
||||
ntargets int
|
||||
}{
|
||||
{
|
||||
name: "a",
|
||||
targets: []string{"b", "c"},
|
||||
ntargets: 1,
|
||||
},
|
||||
{
|
||||
name: "b",
|
||||
targets: []string{"d"},
|
||||
ntargets: 1,
|
||||
},
|
||||
{
|
||||
name: "c",
|
||||
targets: []string{"b"},
|
||||
ntargets: 1,
|
||||
},
|
||||
{
|
||||
name: "d",
|
||||
targets: []string{"d"},
|
||||
ntargets: 1,
|
||||
},
|
||||
{
|
||||
name: "e",
|
||||
targets: []string{"a", "f"},
|
||||
ntargets: 2,
|
||||
},
|
||||
}
|
||||
for _, tt := range cases {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
m, g, err := ReadTargets(ctx, []File{f}, []string{tt.name}, nil, nil)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(g))
|
||||
require.Equal(t, tt.targets, g[0].Targets)
|
||||
require.Equal(t, tt.ntargets, len(m))
|
||||
require.Equal(t, ".", *m["d"].Context)
|
||||
require.Equal(t, "./testdockerfile", *m["d"].Dockerfile)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@@ -8,6 +8,7 @@ import (
|
||||
|
||||
"github.com/compose-spec/compose-go/loader"
|
||||
compose "github.com/compose-spec/compose-go/types"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func parseCompose(dt []byte) (*compose.Project, error) {
|
||||
@@ -59,6 +60,10 @@ func ParseCompose(dt []byte) (*Config, error) {
|
||||
continue
|
||||
}
|
||||
|
||||
if err = validateTargetNameCompose(s.Name); err != nil {
|
||||
return nil, errors.Wrapf(err, "invalid service name %q", s.Name)
|
||||
}
|
||||
|
||||
var contextPathP *string
|
||||
if s.Build.Context != "" {
|
||||
contextPath := s.Build.Context
|
||||
@@ -76,10 +81,14 @@ func ParseCompose(dt []byte) (*Config, error) {
|
||||
Dockerfile: dockerfilePathP,
|
||||
Labels: s.Build.Labels,
|
||||
Args: flatten(s.Build.Args.Resolve(func(val string) (string, bool) {
|
||||
if val, ok := s.Environment[val]; ok && val != nil {
|
||||
return *val, true
|
||||
}
|
||||
val, ok := cfg.Environment[val]
|
||||
return val, ok
|
||||
})),
|
||||
CacheFrom: s.Build.CacheFrom,
|
||||
CacheFrom: s.Build.CacheFrom,
|
||||
NetworkMode: &s.Build.Network,
|
||||
}
|
||||
if err = t.composeExtTarget(s.Build.Extensions); err != nil {
|
||||
return nil, err
|
||||
@@ -185,6 +194,14 @@ func (t *Target) composeExtTarget(exts map[string]interface{}) error {
|
||||
if res, ok := val.(bool); ok {
|
||||
t.NoCache = &res
|
||||
}
|
||||
case "no-cache-filter":
|
||||
if res, k := val.(string); k {
|
||||
t.NoCacheFilter = append(t.NoCacheFilter, res)
|
||||
} else {
|
||||
for _, res := range val.([]interface{}) {
|
||||
t.NoCacheFilter = append(t.NoCacheFilter, res.(string))
|
||||
}
|
||||
}
|
||||
default:
|
||||
return fmt.Errorf("compose file invalid: unkwown %s field for x-bake", key)
|
||||
}
|
||||
|
@@ -19,6 +19,8 @@ services:
|
||||
build:
|
||||
context: ./dir
|
||||
dockerfile: Dockerfile-alternate
|
||||
network:
|
||||
none
|
||||
args:
|
||||
buildno: 123
|
||||
`)
|
||||
@@ -43,6 +45,7 @@ services:
|
||||
require.Equal(t, "Dockerfile-alternate", *c.Targets[1].Dockerfile)
|
||||
require.Equal(t, 1, len(c.Targets[1].Args))
|
||||
require.Equal(t, "123", c.Targets[1].Args["buildno"])
|
||||
require.Equal(t, "none", *c.Targets[1].NetworkMode)
|
||||
}
|
||||
|
||||
func TestNoBuildOutOfTreeService(t *testing.T) {
|
||||
@@ -277,7 +280,114 @@ services:
|
||||
require.Equal(t, c.Targets[1].NoCache, newBool(true))
|
||||
}
|
||||
|
||||
func TestEnv(t *testing.T) {
|
||||
envf, err := os.CreateTemp("", "env")
|
||||
require.NoError(t, err)
|
||||
defer os.Remove(envf.Name())
|
||||
|
||||
_, err = envf.WriteString("FOO=bsdf -csdf\n")
|
||||
require.NoError(t, err)
|
||||
|
||||
var dt = []byte(`
|
||||
services:
|
||||
scratch:
|
||||
build:
|
||||
context: .
|
||||
args:
|
||||
CT_ECR: foo
|
||||
FOO:
|
||||
NODE_ENV:
|
||||
environment:
|
||||
- NODE_ENV=test
|
||||
- AWS_ACCESS_KEY_ID=dummy
|
||||
- AWS_SECRET_ACCESS_KEY=dummy
|
||||
env_file:
|
||||
- ` + envf.Name() + `
|
||||
`)
|
||||
|
||||
c, err := ParseCompose(dt)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, c.Targets[0].Args, map[string]string{"CT_ECR": "foo", "FOO": "bsdf -csdf", "NODE_ENV": "test"})
|
||||
}
|
||||
|
||||
func TestPorts(t *testing.T) {
|
||||
var dt = []byte(`
|
||||
services:
|
||||
foo:
|
||||
build:
|
||||
context: .
|
||||
ports:
|
||||
- 3306:3306
|
||||
bar:
|
||||
build:
|
||||
context: .
|
||||
ports:
|
||||
- mode: ingress
|
||||
target: 3306
|
||||
published: "3306"
|
||||
protocol: tcp
|
||||
`)
|
||||
_, err := ParseCompose(dt)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func newBool(val bool) *bool {
|
||||
b := val
|
||||
return &b
|
||||
}
|
||||
|
||||
func TestServiceName(t *testing.T) {
|
||||
cases := []struct {
|
||||
svc string
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
svc: "a",
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
svc: "abc",
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
svc: "a.b",
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
svc: "a?b",
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
svc: "_a",
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
svc: "a_b",
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
svc: "AbC",
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
svc: "AbC-0123",
|
||||
wantErr: false,
|
||||
},
|
||||
}
|
||||
for _, tt := range cases {
|
||||
tt := tt
|
||||
t.Run(tt.svc, func(t *testing.T) {
|
||||
_, err := ParseCompose([]byte(`
|
||||
services:
|
||||
` + tt.svc + `:
|
||||
build:
|
||||
context: .
|
||||
`))
|
||||
if tt.wantErr {
|
||||
require.Error(t, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@@ -3,7 +3,7 @@ package bake
|
||||
import (
|
||||
"strings"
|
||||
|
||||
hcl "github.com/hashicorp/hcl/v2"
|
||||
"github.com/hashicorp/hcl/v2"
|
||||
"github.com/hashicorp/hcl/v2/hclparse"
|
||||
"github.com/moby/buildkit/solver/errdefs"
|
||||
"github.com/moby/buildkit/solver/pb"
|
||||
|
@@ -16,8 +16,9 @@ import (
|
||||
)
|
||||
|
||||
type Opt struct {
|
||||
LookupVar func(string) (string, bool)
|
||||
Vars map[string]string
|
||||
LookupVar func(string) (string, bool)
|
||||
Vars map[string]string
|
||||
ValidateLabel func(string) error
|
||||
}
|
||||
|
||||
type variable struct {
|
||||
@@ -262,6 +263,12 @@ func Parse(b hcl.Body, opt Opt, val interface{}) hcl.Diagnostics {
|
||||
}
|
||||
}
|
||||
|
||||
if opt.ValidateLabel == nil {
|
||||
opt.ValidateLabel = func(string) error {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
p := &parser{
|
||||
opt: opt,
|
||||
|
||||
@@ -446,6 +453,17 @@ func Parse(b hcl.Body, opt Opt, val interface{}) hcl.Diagnostics {
|
||||
continue
|
||||
}
|
||||
|
||||
if err := opt.ValidateLabel(b.Labels[0]); err != nil {
|
||||
return hcl.Diagnostics{
|
||||
&hcl.Diagnostic{
|
||||
Severity: hcl.DiagError,
|
||||
Summary: "Invalid name",
|
||||
Detail: err.Error(),
|
||||
Subject: &b.LabelRanges[0],
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
lblIndex := setLabel(vv, b.Labels[0])
|
||||
|
||||
oldValue, exists := t.values[b.Labels[0]]
|
||||
|
319
build/build.go
319
build/build.go
@@ -14,6 +14,7 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/containerd/containerd/images"
|
||||
@@ -22,6 +23,7 @@ import (
|
||||
"github.com/docker/buildx/util/imagetools"
|
||||
"github.com/docker/buildx/util/progress"
|
||||
"github.com/docker/buildx/util/resolver"
|
||||
"github.com/docker/buildx/util/waitmap"
|
||||
"github.com/docker/cli/opts"
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/docker/docker/api/types"
|
||||
@@ -30,9 +32,11 @@ import (
|
||||
"github.com/docker/docker/pkg/urlutil"
|
||||
"github.com/moby/buildkit/client"
|
||||
"github.com/moby/buildkit/client/llb"
|
||||
"github.com/moby/buildkit/exporter/containerimage/exptypes"
|
||||
gateway "github.com/moby/buildkit/frontend/gateway/client"
|
||||
"github.com/moby/buildkit/session"
|
||||
"github.com/moby/buildkit/session/upload/uploadprovider"
|
||||
"github.com/moby/buildkit/solver/pb"
|
||||
"github.com/moby/buildkit/util/apicaps"
|
||||
"github.com/moby/buildkit/util/entitlements"
|
||||
"github.com/moby/buildkit/util/progress/progresswriter"
|
||||
@@ -53,24 +57,25 @@ var (
|
||||
type Options struct {
|
||||
Inputs Inputs
|
||||
|
||||
Allow []entitlements.Entitlement
|
||||
BuildArgs map[string]string
|
||||
CacheFrom []client.CacheOptionsEntry
|
||||
CacheTo []client.CacheOptionsEntry
|
||||
CgroupParent string
|
||||
Exports []client.ExportEntry
|
||||
ExtraHosts []string
|
||||
ImageIDFile string
|
||||
Labels map[string]string
|
||||
NetworkMode string
|
||||
NoCache bool
|
||||
Platforms []specs.Platform
|
||||
Pull bool
|
||||
Session []session.Attachable
|
||||
ShmSize opts.MemBytes
|
||||
Tags []string
|
||||
Target string
|
||||
Ulimits *opts.UlimitOpt
|
||||
Allow []entitlements.Entitlement
|
||||
BuildArgs map[string]string
|
||||
CacheFrom []client.CacheOptionsEntry
|
||||
CacheTo []client.CacheOptionsEntry
|
||||
CgroupParent string
|
||||
Exports []client.ExportEntry
|
||||
ExtraHosts []string
|
||||
ImageIDFile string
|
||||
Labels map[string]string
|
||||
NetworkMode string
|
||||
NoCache bool
|
||||
NoCacheFilter []string
|
||||
Platforms []specs.Platform
|
||||
Pull bool
|
||||
Session []session.Attachable
|
||||
ShmSize opts.MemBytes
|
||||
Tags []string
|
||||
Target string
|
||||
Ulimits *opts.UlimitOpt
|
||||
}
|
||||
|
||||
type Inputs struct {
|
||||
@@ -79,14 +84,21 @@ type Inputs struct {
|
||||
InStream io.Reader
|
||||
ContextState *llb.State
|
||||
DockerfileInline string
|
||||
NamedContexts map[string]NamedContext
|
||||
}
|
||||
|
||||
type NamedContext struct {
|
||||
Path string
|
||||
State *llb.State
|
||||
}
|
||||
|
||||
type DriverInfo struct {
|
||||
Driver driver.Driver
|
||||
Name string
|
||||
Platform []specs.Platform
|
||||
Err error
|
||||
ImageOpt imagetools.Opt
|
||||
Driver driver.Driver
|
||||
Name string
|
||||
Platform []specs.Platform
|
||||
Err error
|
||||
ImageOpt imagetools.Opt
|
||||
ProxyConfig map[string]string
|
||||
}
|
||||
|
||||
type DockerAPI interface {
|
||||
@@ -177,6 +189,10 @@ func splitToDriverPairs(availablePlatforms map[string]int, opt map[string]Option
|
||||
pp = append(pp, p)
|
||||
mm[idx] = pp
|
||||
}
|
||||
// if no platform is specified, use first driver
|
||||
if len(mm) == 0 {
|
||||
mm[0] = nil
|
||||
}
|
||||
dps := make([]driverPair, 0, 2)
|
||||
for idx, pp := range mm {
|
||||
dps = append(dps, driverPair{driverIndex: idx, platforms: pp})
|
||||
@@ -333,7 +349,8 @@ func toRepoOnly(in string) (string, error) {
|
||||
return strings.Join(out, ","), nil
|
||||
}
|
||||
|
||||
func toSolveOpt(ctx context.Context, d driver.Driver, multiDriver bool, opt Options, bopts gateway.BuildOpts, configDir string, pw progress.Writer, dl dockerLoadCallback) (solveOpt *client.SolveOpt, release func(), err error) {
|
||||
func toSolveOpt(ctx context.Context, di DriverInfo, multiDriver bool, opt Options, bopts gateway.BuildOpts, configDir string, pw progress.Writer, dl dockerLoadCallback) (solveOpt *client.SolveOpt, release func(), err error) {
|
||||
d := di.Driver
|
||||
defers := make([]func(), 0, 2)
|
||||
releaseF := func() {
|
||||
for _, f := range defers {
|
||||
@@ -499,6 +516,12 @@ func toSolveOpt(ctx context.Context, d driver.Driver, multiDriver bool, opt Opti
|
||||
}
|
||||
}
|
||||
}
|
||||
if e.Type == "docker" || e.Type == "image" || e.Type == "oci" {
|
||||
// inline buildinfo attrs from build arg
|
||||
if v, ok := opt.BuildArgs["BUILDKIT_INLINE_BUILDINFO_ATTRS"]; ok {
|
||||
e.Attrs["buildinfo-attrs"] = v
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
so.Exports = opt.Exports
|
||||
@@ -523,6 +546,9 @@ func toSolveOpt(ctx context.Context, d driver.Driver, multiDriver bool, opt Opti
|
||||
if opt.Target != "" {
|
||||
so.FrontendAttrs["target"] = opt.Target
|
||||
}
|
||||
if len(opt.NoCacheFilter) > 0 {
|
||||
so.FrontendAttrs["no-cache"] = strings.Join(opt.NoCacheFilter, ",")
|
||||
}
|
||||
if opt.NoCache {
|
||||
so.FrontendAttrs["no-cache"] = ""
|
||||
}
|
||||
@@ -533,6 +559,12 @@ func toSolveOpt(ctx context.Context, d driver.Driver, multiDriver bool, opt Opti
|
||||
so.FrontendAttrs["label:"+k] = v
|
||||
}
|
||||
|
||||
for k, v := range di.ProxyConfig {
|
||||
if _, ok := opt.BuildArgs[k]; !ok {
|
||||
so.FrontendAttrs["build-arg:"+k] = v
|
||||
}
|
||||
}
|
||||
|
||||
// set platforms
|
||||
if len(opt.Platforms) != 0 {
|
||||
pp := make([]string, len(opt.Platforms))
|
||||
@@ -627,12 +659,12 @@ func Build(ctx context.Context, drivers []DriverInfo, opt map[string]Options, do
|
||||
multiDriver := len(m[k]) > 1
|
||||
hasMobyDriver := false
|
||||
for i, dp := range m[k] {
|
||||
d := drivers[dp.driverIndex].Driver
|
||||
if d.IsMobyDriver() {
|
||||
di := drivers[dp.driverIndex]
|
||||
if di.Driver.IsMobyDriver() {
|
||||
hasMobyDriver = true
|
||||
}
|
||||
opt.Platforms = dp.platforms
|
||||
so, release, err := toSolveOpt(ctx, d, multiDriver, opt, dp.bopts, configDir, w, func(name string) (io.WriteCloser, func(), error) {
|
||||
so, release, err := toSolveOpt(ctx, di, multiDriver, opt, dp.bopts, configDir, w, func(name string) (io.WriteCloser, func(), error) {
|
||||
return newDockerLoader(ctx, docker, name, w)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -665,8 +697,35 @@ func Build(ctx context.Context, drivers []DriverInfo, opt map[string]Options, do
|
||||
}
|
||||
}
|
||||
|
||||
// validate that all links between targets use same drivers
|
||||
for name := range opt {
|
||||
dps := m[name]
|
||||
for _, dp := range dps {
|
||||
for k, v := range dp.so.FrontendAttrs {
|
||||
if strings.HasPrefix(k, "context:") && strings.HasPrefix(v, "target:") {
|
||||
k2 := strings.TrimPrefix(v, "target:")
|
||||
dps2, ok := m[k2]
|
||||
if !ok {
|
||||
return nil, errors.Errorf("failed to find target %s for context %s", k2, strings.TrimPrefix(k, "context:")) // should be validated before already
|
||||
}
|
||||
var found bool
|
||||
for _, dp2 := range dps2 {
|
||||
if dp2.driverIndex == dp.driverIndex {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
return nil, errors.Errorf("failed to use %s as context %s for %s because targets build with different drivers", k2, strings.TrimPrefix(k, "context:"), name)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
resp = map[string]*client.SolveResponse{}
|
||||
var respMu sync.Mutex
|
||||
results := waitmap.New()
|
||||
|
||||
multiTarget := len(opt) > 1
|
||||
|
||||
@@ -707,9 +766,12 @@ func Build(ctx context.Context, drivers []DriverInfo, opt map[string]Options, do
|
||||
resp[k] = res[0]
|
||||
respMu.Unlock()
|
||||
if len(res) == 1 {
|
||||
digest := res[0].ExporterResponse["containerimage.digest"]
|
||||
dgst := res[0].ExporterResponse[exptypes.ExporterImageDigestKey]
|
||||
if v, ok := res[0].ExporterResponse[exptypes.ExporterImageConfigDigestKey]; ok {
|
||||
dgst = v
|
||||
}
|
||||
if opt.ImageIDFile != "" {
|
||||
return ioutil.WriteFile(opt.ImageIDFile, []byte(digest), 0644)
|
||||
return ioutil.WriteFile(opt.ImageIDFile, []byte(dgst), 0644)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -719,7 +781,7 @@ func Build(ctx context.Context, drivers []DriverInfo, opt map[string]Options, do
|
||||
descs := make([]specs.Descriptor, 0, len(res))
|
||||
|
||||
for _, r := range res {
|
||||
s, ok := r.ExporterResponse["containerimage.digest"]
|
||||
s, ok := r.ExporterResponse[exptypes.ExporterImageDigestKey]
|
||||
if ok {
|
||||
descs = append(descs, specs.Descriptor{
|
||||
Digest: digest.Digest(s),
|
||||
@@ -791,7 +853,6 @@ func Build(ctx context.Context, drivers []DriverInfo, opt map[string]Options, do
|
||||
|
||||
for i, dp := range dps {
|
||||
so := *dp.so
|
||||
|
||||
if multiDriver {
|
||||
for i, e := range so.Exports {
|
||||
switch e.Type {
|
||||
@@ -824,14 +885,43 @@ func Build(ctx context.Context, drivers []DriverInfo, opt map[string]Options, do
|
||||
pw := progress.WithPrefix(w, k, multiTarget)
|
||||
|
||||
c := clients[dp.driverIndex]
|
||||
|
||||
pw = progress.ResetTime(pw)
|
||||
|
||||
eg.Go(func() error {
|
||||
pw = progress.ResetTime(pw)
|
||||
defer wg.Done()
|
||||
|
||||
if err := waitContextDeps(ctx, dp.driverIndex, results, &so); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
frontendInputs := make(map[string]*pb.Definition)
|
||||
for key, st := range so.FrontendInputs {
|
||||
def, err := st.Marshal(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
frontendInputs[key] = def.ToPB()
|
||||
}
|
||||
|
||||
req := gateway.SolveRequest{
|
||||
Frontend: so.Frontend,
|
||||
FrontendOpt: so.FrontendAttrs,
|
||||
FrontendInputs: frontendInputs,
|
||||
}
|
||||
so.Frontend = ""
|
||||
so.FrontendAttrs = nil
|
||||
so.FrontendInputs = nil
|
||||
|
||||
ch, done := progress.NewChannel(pw)
|
||||
defer func() { <-done }()
|
||||
rr, err := c.Solve(ctx, nil, so, ch)
|
||||
|
||||
rr, err := c.Build(ctx, so, "buildx", func(ctx context.Context, c gateway.Client) (*gateway.Result, error) {
|
||||
res, err := c.Solve(ctx, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
results.Set(resultKey(dp.driverIndex, k), res)
|
||||
return res, nil
|
||||
}, ch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -847,13 +937,27 @@ func Build(ctx context.Context, drivers []DriverInfo, opt map[string]Options, do
|
||||
return errors.Errorf("tag is needed when pushing to registry")
|
||||
}
|
||||
pw := progress.ResetTime(pw)
|
||||
for _, name := range strings.Split(pushNames, ",") {
|
||||
pushList := strings.Split(pushNames, ",")
|
||||
for _, name := range pushList {
|
||||
if err := progress.Wrap(fmt.Sprintf("pushing %s with docker", name), pw.Write, func(l progress.SubLogger) error {
|
||||
return pushWithMoby(ctx, d, name, l)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
remoteDigest, err := remoteDigestWithMoby(ctx, d, pushList[0])
|
||||
if err == nil && remoteDigest != "" {
|
||||
// old daemons might not have containerimage.config.digest set
|
||||
// in response so use containerimage.digest value for it if available
|
||||
if _, ok := rr.ExporterResponse[exptypes.ExporterImageConfigDigestKey]; !ok {
|
||||
if v, ok := rr.ExporterResponse[exptypes.ExporterImageDigestKey]; ok {
|
||||
rr.ExporterResponse[exptypes.ExporterImageConfigDigestKey] = v
|
||||
}
|
||||
}
|
||||
rr.ExporterResponse[exptypes.ExporterImageDigestKey] = remoteDigest
|
||||
} else if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -958,6 +1062,29 @@ func pushWithMoby(ctx context.Context, d driver.Driver, name string, l progress.
|
||||
return nil
|
||||
}
|
||||
|
||||
func remoteDigestWithMoby(ctx context.Context, d driver.Driver, name string) (string, error) {
|
||||
api := d.Config().DockerAPI
|
||||
if api == nil {
|
||||
return "", errors.Errorf("invalid empty Docker API reference") // should never happen
|
||||
}
|
||||
creds, err := imagetools.RegistryAuthForRef(name, d.Config().Auth)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
image, _, err := api.ImageInspectWithRaw(ctx, name)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if len(image.RepoDigests) == 0 {
|
||||
return "", nil
|
||||
}
|
||||
remoteImage, err := api.DistributionInspect(ctx, name, creds)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return remoteImage.Descriptor.Digest.String(), nil
|
||||
}
|
||||
|
||||
func createTempDockerfile(r io.Reader) (string, error) {
|
||||
dir, err := ioutil.TempDir("", "dockerfile")
|
||||
if err != nil {
|
||||
@@ -1080,6 +1207,36 @@ func LoadInputs(ctx context.Context, d driver.Driver, inp Inputs, pw progress.Wr
|
||||
|
||||
target.FrontendAttrs["filename"] = dockerfileName
|
||||
|
||||
for k, v := range inp.NamedContexts {
|
||||
target.FrontendAttrs["frontend.caps"] = "moby.buildkit.frontend.contexts+forward"
|
||||
if v.State != nil {
|
||||
target.FrontendAttrs["context:"+k] = "input:" + k
|
||||
if target.FrontendInputs == nil {
|
||||
target.FrontendInputs = make(map[string]llb.State)
|
||||
}
|
||||
target.FrontendInputs[k] = *v.State
|
||||
continue
|
||||
}
|
||||
|
||||
if urlutil.IsGitURL(v.Path) || urlutil.IsURL(v.Path) || strings.HasPrefix(v.Path, "docker-image://") || strings.HasPrefix(v.Path, "target:") {
|
||||
target.FrontendAttrs["context:"+k] = v.Path
|
||||
continue
|
||||
}
|
||||
st, err := os.Stat(v.Path)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to get build context %v", k)
|
||||
}
|
||||
if !st.IsDir() {
|
||||
return nil, errors.Wrapf(syscall.ENOTDIR, "failed to get build context path %v", v)
|
||||
}
|
||||
localName := k
|
||||
if k == "context" || k == "dockerfile" {
|
||||
localName = "_" + k // underscore to avoid collisions
|
||||
}
|
||||
target.LocalDirs[localName] = v.Path
|
||||
target.FrontendAttrs["context:"+k] = "local:" + localName
|
||||
}
|
||||
|
||||
release := func() {
|
||||
for _, dir := range toRemove {
|
||||
os.RemoveAll(dir)
|
||||
@@ -1088,6 +1245,96 @@ func LoadInputs(ctx context.Context, d driver.Driver, inp Inputs, pw progress.Wr
|
||||
return release, nil
|
||||
}
|
||||
|
||||
func resultKey(index int, name string) string {
|
||||
return fmt.Sprintf("%d-%s", index, name)
|
||||
}
|
||||
|
||||
func waitContextDeps(ctx context.Context, index int, results *waitmap.Map, so *client.SolveOpt) error {
|
||||
m := map[string]string{}
|
||||
for k, v := range so.FrontendAttrs {
|
||||
if strings.HasPrefix(k, "context:") && strings.HasPrefix(v, "target:") {
|
||||
target := resultKey(index, strings.TrimPrefix(v, "target:"))
|
||||
m[target] = k
|
||||
}
|
||||
}
|
||||
if len(m) == 0 {
|
||||
return nil
|
||||
}
|
||||
keys := make([]string, 0, len(m))
|
||||
for k := range m {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
res, err := results.Get(ctx, keys...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for k, v := range m {
|
||||
r, ok := res[k]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
rr, ok := r.(*gateway.Result)
|
||||
if !ok {
|
||||
return errors.Errorf("invalid result type %T", rr)
|
||||
}
|
||||
if so.FrontendAttrs == nil {
|
||||
so.FrontendAttrs = map[string]string{}
|
||||
}
|
||||
if so.FrontendInputs == nil {
|
||||
so.FrontendInputs = map[string]llb.State{}
|
||||
}
|
||||
if len(rr.Refs) > 0 {
|
||||
for platform, r := range rr.Refs {
|
||||
st, err := r.ToState()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
so.FrontendInputs[k+"::"+platform] = st
|
||||
so.FrontendAttrs[v+"::"+platform] = "input:" + k + "::" + platform
|
||||
metadata := make(map[string][]byte)
|
||||
if dt, ok := rr.Metadata[exptypes.ExporterImageConfigKey+"/"+platform]; ok {
|
||||
metadata[exptypes.ExporterImageConfigKey] = dt
|
||||
}
|
||||
if dt, ok := rr.Metadata[exptypes.ExporterBuildInfo+"/"+platform]; ok {
|
||||
metadata[exptypes.ExporterBuildInfo] = dt
|
||||
}
|
||||
if len(metadata) > 0 {
|
||||
dt, err := json.Marshal(metadata)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
so.FrontendAttrs["input-metadata:"+k+"::"+platform] = string(dt)
|
||||
}
|
||||
}
|
||||
delete(so.FrontendAttrs, v)
|
||||
}
|
||||
if rr.Ref != nil {
|
||||
st, err := rr.Ref.ToState()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
so.FrontendInputs[k] = st
|
||||
so.FrontendAttrs[v] = "input:" + k
|
||||
metadata := make(map[string][]byte)
|
||||
if dt, ok := rr.Metadata[exptypes.ExporterImageConfigKey]; ok {
|
||||
metadata[exptypes.ExporterImageConfigKey] = dt
|
||||
}
|
||||
if dt, ok := rr.Metadata[exptypes.ExporterBuildInfo]; ok {
|
||||
metadata[exptypes.ExporterBuildInfo] = dt
|
||||
}
|
||||
if len(metadata) > 0 {
|
||||
dt, err := json.Marshal(metadata)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
so.FrontendAttrs["input-metadata:"+k] = string(dt)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func notSupported(d driver.Driver, f driver.Feature) error {
|
||||
return errors.Errorf("%s feature is currently not supported for %s driver. Please switch to a different driver (eg. \"docker buildx create --use\")", f, d.Factory().Name())
|
||||
}
|
||||
|
@@ -43,21 +43,19 @@ func init() {
|
||||
}
|
||||
|
||||
func main() {
|
||||
if os.Getenv("DOCKER_CLI_PLUGIN_ORIGINAL_CLI_COMMAND") == "" {
|
||||
if len(os.Args) < 2 || os.Args[1] != manager.MetadataSubcommandName {
|
||||
dockerCli, err := command.NewDockerCli()
|
||||
if err != nil {
|
||||
fmt.Fprintln(os.Stderr, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
opts := cliflags.NewClientOptions()
|
||||
dockerCli.Initialize(opts)
|
||||
rootCmd := commands.NewRootCmd(os.Args[0], false, dockerCli)
|
||||
if err := rootCmd.Execute(); err != nil {
|
||||
os.Exit(1)
|
||||
}
|
||||
os.Exit(0)
|
||||
if plugin.RunningStandalone() {
|
||||
dockerCli, err := command.NewDockerCli()
|
||||
if err != nil {
|
||||
fmt.Fprintln(os.Stderr, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
opts := cliflags.NewClientOptions()
|
||||
dockerCli.Initialize(opts)
|
||||
rootCmd := commands.NewRootCmd(os.Args[0], false, dockerCli)
|
||||
if err := rootCmd.Execute(); err != nil {
|
||||
os.Exit(1)
|
||||
}
|
||||
os.Exit(0)
|
||||
}
|
||||
|
||||
dockerCli, err := command.NewDockerCli()
|
||||
|
@@ -13,7 +13,6 @@ import (
|
||||
"github.com/docker/buildx/util/progress"
|
||||
"github.com/docker/buildx/util/tracing"
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/docker/pkg/ioutils"
|
||||
"github.com/moby/buildkit/util/appcontext"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
@@ -38,7 +37,6 @@ func runBake(dockerCli command.Cli, targets []string, in bakeOptions) (err error
|
||||
}()
|
||||
|
||||
var url string
|
||||
var noTarget bool
|
||||
cmdContext := "cwd://"
|
||||
|
||||
if len(targets) > 0 {
|
||||
@@ -49,7 +47,6 @@ func runBake(dockerCli command.Cli, targets []string, in bakeOptions) (err error
|
||||
if bake.IsRemoteURL(targets[0]) {
|
||||
cmdContext = targets[0]
|
||||
targets = targets[1:]
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -57,7 +54,6 @@ func runBake(dockerCli command.Cli, targets []string, in bakeOptions) (err error
|
||||
|
||||
if len(targets) == 0 {
|
||||
targets = []string{"default"}
|
||||
noTarget = true
|
||||
}
|
||||
|
||||
overrides := in.overrides
|
||||
@@ -79,7 +75,7 @@ func runBake(dockerCli command.Cli, targets []string, in bakeOptions) (err error
|
||||
|
||||
ctx2, cancel := context.WithCancel(context.TODO())
|
||||
defer cancel()
|
||||
printer := progress.NewPrinter(ctx2, os.Stderr, in.progress)
|
||||
printer := progress.NewPrinter(ctx2, os.Stderr, os.Stderr, in.progress)
|
||||
|
||||
defer func() {
|
||||
if printer != nil {
|
||||
@@ -107,7 +103,7 @@ func runBake(dockerCli command.Cli, targets []string, in bakeOptions) (err error
|
||||
return err
|
||||
}
|
||||
|
||||
t, g, err := bake.ReadTargets(ctx, files, targets, overrides, map[string]string{
|
||||
tgts, grps, err := bake.ReadTargets(ctx, files, targets, overrides, map[string]string{
|
||||
// Don't forget to update documentation if you add a new
|
||||
// built-in variable: docs/reference/buildx_bake.md#built-in-variables
|
||||
"BAKE_CMD_CONTEXT": cmdContext,
|
||||
@@ -118,31 +114,24 @@ func runBake(dockerCli command.Cli, targets []string, in bakeOptions) (err error
|
||||
}
|
||||
|
||||
// this function can update target context string from the input so call before printOnly check
|
||||
bo, err := bake.TargetsToBuildOpt(t, inp)
|
||||
bo, err := bake.TargetsToBuildOpt(tgts, inp)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if in.printOnly {
|
||||
defGroup := map[string][]string{
|
||||
"default": targets,
|
||||
}
|
||||
if noTarget {
|
||||
for _, group := range g {
|
||||
if group.Name != "default" {
|
||||
continue
|
||||
}
|
||||
defGroup = map[string][]string{
|
||||
"default": group.Targets,
|
||||
}
|
||||
var defg map[string]*bake.Group
|
||||
if len(grps) == 1 {
|
||||
defg = map[string]*bake.Group{
|
||||
"default": grps[0],
|
||||
}
|
||||
}
|
||||
dt, err := json.MarshalIndent(struct {
|
||||
Group map[string][]string `json:"group,omitempty"`
|
||||
Group map[string]*bake.Group `json:"group,omitempty"`
|
||||
Target map[string]*bake.Target `json:"target"`
|
||||
}{
|
||||
defGroup,
|
||||
t,
|
||||
defg,
|
||||
tgts,
|
||||
}, "", " ")
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -158,19 +147,15 @@ func runBake(dockerCli command.Cli, targets []string, in bakeOptions) (err error
|
||||
|
||||
resp, err := build.Build(ctx, dis, bo, dockerAPI(dockerCli), confutil.ConfigDir(dockerCli), printer)
|
||||
if err != nil {
|
||||
return err
|
||||
return wrapBuildError(err, true)
|
||||
}
|
||||
|
||||
if len(in.metadataFile) > 0 && resp != nil {
|
||||
mdata := map[string]map[string]string{}
|
||||
for k, r := range resp {
|
||||
mdata[k] = r.ExporterResponse
|
||||
if len(in.metadataFile) > 0 {
|
||||
dt := make(map[string]interface{})
|
||||
for t, r := range resp {
|
||||
dt[t] = decodeExporterResponse(r.ExporterResponse)
|
||||
}
|
||||
mdatab, err := json.MarshalIndent(mdata, "", " ")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := ioutils.AtomicWriteFile(in.metadataFile, mdatab, 0644); err != nil {
|
||||
if err := writeMetadataFile(in.metadataFile, dt); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -201,10 +186,10 @@ func bakeCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
|
||||
flags := cmd.Flags()
|
||||
|
||||
flags.StringArrayVarP(&options.files, "file", "f", []string{}, "Build definition file")
|
||||
flags.BoolVar(&options.exportLoad, "load", false, "Shorthand for `--set=*.output=type=docker`")
|
||||
flags.BoolVar(&options.exportLoad, "load", false, `Shorthand for "--set=*.output=type=docker"`)
|
||||
flags.BoolVar(&options.printOnly, "print", false, "Print the options without building")
|
||||
flags.BoolVar(&options.exportPush, "push", false, "Shorthand for `--set=*.output=type=registry`")
|
||||
flags.StringArrayVar(&options.overrides, "set", nil, "Override target value (e.g., `targetpattern.key=value`)")
|
||||
flags.BoolVar(&options.exportPush, "push", false, `Shorthand for "--set=*.output=type=registry"`)
|
||||
flags.StringArrayVar(&options.overrides, "set", nil, `Override target value (e.g., "targetpattern.key=value")`)
|
||||
|
||||
commonBuildFlags(&options.commonOptions, flags)
|
||||
|
||||
|
@@ -1,9 +1,12 @@
|
||||
package commands
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
@@ -14,18 +17,24 @@ import (
|
||||
"github.com/docker/buildx/util/platformutil"
|
||||
"github.com/docker/buildx/util/progress"
|
||||
"github.com/docker/buildx/util/tracing"
|
||||
"github.com/docker/cli-docs-tool/annotation"
|
||||
"github.com/docker/cli/cli"
|
||||
"github.com/docker/cli/cli/command"
|
||||
dockeropts "github.com/docker/cli/opts"
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/docker/docker/pkg/ioutils"
|
||||
"github.com/docker/go-units"
|
||||
"github.com/moby/buildkit/client"
|
||||
"github.com/moby/buildkit/session/auth/authprovider"
|
||||
"github.com/moby/buildkit/solver/errdefs"
|
||||
"github.com/moby/buildkit/util/appcontext"
|
||||
"github.com/moby/buildkit/util/grpcerrors"
|
||||
"github.com/morikuni/aec"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/pflag"
|
||||
"google.golang.org/grpc/codes"
|
||||
)
|
||||
|
||||
const defaultTargetName = "default"
|
||||
@@ -34,24 +43,26 @@ type buildOptions struct {
|
||||
contextPath string
|
||||
dockerfileName string
|
||||
|
||||
allow []string
|
||||
buildArgs []string
|
||||
cacheFrom []string
|
||||
cacheTo []string
|
||||
cgroupParent string
|
||||
extraHosts []string
|
||||
imageIDFile string
|
||||
labels []string
|
||||
networkMode string
|
||||
outputs []string
|
||||
platforms []string
|
||||
quiet bool
|
||||
secrets []string
|
||||
shmSize dockeropts.MemBytes
|
||||
ssh []string
|
||||
tags []string
|
||||
target string
|
||||
ulimits *dockeropts.UlimitOpt
|
||||
allow []string
|
||||
buildArgs []string
|
||||
cacheFrom []string
|
||||
cacheTo []string
|
||||
cgroupParent string
|
||||
contexts []string
|
||||
extraHosts []string
|
||||
imageIDFile string
|
||||
labels []string
|
||||
networkMode string
|
||||
noCacheFilter []string
|
||||
outputs []string
|
||||
platforms []string
|
||||
quiet bool
|
||||
secrets []string
|
||||
shmSize dockeropts.MemBytes
|
||||
ssh []string
|
||||
tags []string
|
||||
target string
|
||||
ulimits *dockeropts.UlimitOpt
|
||||
commonOptions
|
||||
}
|
||||
|
||||
@@ -89,29 +100,40 @@ func runBuild(dockerCli command.Cli, in buildOptions) (err error) {
|
||||
pull = *in.pull
|
||||
}
|
||||
|
||||
if noCache && len(in.noCacheFilter) > 0 {
|
||||
return errors.Errorf("--no-cache and --no-cache-filter cannot currently be used together")
|
||||
}
|
||||
|
||||
if in.quiet && in.progress != "auto" && in.progress != "quiet" {
|
||||
return errors.Errorf("progress=%s and quiet cannot be used together", in.progress)
|
||||
} else if in.quiet {
|
||||
in.progress = "quiet"
|
||||
}
|
||||
|
||||
contexts, err := parseContextNames(in.contexts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
opts := build.Options{
|
||||
Inputs: build.Inputs{
|
||||
ContextPath: in.contextPath,
|
||||
DockerfilePath: in.dockerfileName,
|
||||
InStream: os.Stdin,
|
||||
NamedContexts: contexts,
|
||||
},
|
||||
BuildArgs: listToMap(in.buildArgs, true),
|
||||
ExtraHosts: in.extraHosts,
|
||||
ImageIDFile: in.imageIDFile,
|
||||
Labels: listToMap(in.labels, false),
|
||||
NetworkMode: in.networkMode,
|
||||
NoCache: noCache,
|
||||
Pull: pull,
|
||||
ShmSize: in.shmSize,
|
||||
Tags: in.tags,
|
||||
Target: in.target,
|
||||
Ulimits: in.ulimits,
|
||||
BuildArgs: listToMap(in.buildArgs, true),
|
||||
ExtraHosts: in.extraHosts,
|
||||
ImageIDFile: in.imageIDFile,
|
||||
Labels: listToMap(in.labels, false),
|
||||
NetworkMode: in.networkMode,
|
||||
NoCache: noCache,
|
||||
NoCacheFilter: in.noCacheFilter,
|
||||
Pull: pull,
|
||||
ShmSize: in.shmSize,
|
||||
Tags: in.tags,
|
||||
Target: in.target,
|
||||
Ulimits: in.ulimits,
|
||||
}
|
||||
|
||||
platforms, err := platformutil.Parse(in.platforms)
|
||||
@@ -204,6 +226,7 @@ func runBuild(dockerCli command.Cli, in buildOptions) (err error) {
|
||||
}
|
||||
|
||||
imageID, err := buildTargets(ctx, dockerCli, map[string]build.Options{defaultTargetName: opts}, in.progress, contextPathHash, in.builder, in.metadataFile)
|
||||
err = wrapBuildError(err, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -223,7 +246,7 @@ func buildTargets(ctx context.Context, dockerCli command.Cli, opts map[string]bu
|
||||
ctx2, cancel := context.WithCancel(context.TODO())
|
||||
defer cancel()
|
||||
|
||||
printer := progress.NewPrinter(ctx2, os.Stderr, progressMode)
|
||||
printer := progress.NewPrinter(ctx2, os.Stderr, os.Stderr, progressMode)
|
||||
|
||||
resp, err := build.Build(ctx, dis, opts, dockerAPI(dockerCli), confutil.ConfigDir(dockerCli), printer)
|
||||
err1 := printer.Wait()
|
||||
@@ -235,18 +258,56 @@ func buildTargets(ctx context.Context, dockerCli command.Cli, opts map[string]bu
|
||||
}
|
||||
|
||||
if len(metadataFile) > 0 && resp != nil {
|
||||
mdatab, err := json.MarshalIndent(resp[defaultTargetName].ExporterResponse, "", " ")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if err := ioutils.AtomicWriteFile(metadataFile, mdatab, 0644); err != nil {
|
||||
if err := writeMetadataFile(metadataFile, decodeExporterResponse(resp[defaultTargetName].ExporterResponse)); err != nil {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
|
||||
printWarnings(os.Stderr, printer.Warnings(), progressMode)
|
||||
|
||||
return resp[defaultTargetName].ExporterResponse["containerimage.digest"], err
|
||||
}
|
||||
|
||||
func printWarnings(w io.Writer, warnings []client.VertexWarning, mode string) {
|
||||
if len(warnings) == 0 || mode == progress.PrinterModeQuiet {
|
||||
return
|
||||
}
|
||||
fmt.Fprintf(w, "\n ")
|
||||
sb := &bytes.Buffer{}
|
||||
if len(warnings) == 1 {
|
||||
fmt.Fprintf(sb, "1 warning found")
|
||||
} else {
|
||||
fmt.Fprintf(sb, "%d warnings found", len(warnings))
|
||||
}
|
||||
if logrus.GetLevel() < logrus.DebugLevel {
|
||||
fmt.Fprintf(sb, " (use --debug to expand)")
|
||||
}
|
||||
fmt.Fprintf(sb, ":\n")
|
||||
fmt.Fprint(w, aec.Apply(sb.String(), aec.YellowF))
|
||||
|
||||
for _, warn := range warnings {
|
||||
fmt.Fprintf(w, " - %s\n", warn.Short)
|
||||
if logrus.GetLevel() < logrus.DebugLevel {
|
||||
continue
|
||||
}
|
||||
for _, d := range warn.Detail {
|
||||
fmt.Fprintf(w, "%s\n", d)
|
||||
}
|
||||
if warn.URL != "" {
|
||||
fmt.Fprintf(w, "More info: %s\n", warn.URL)
|
||||
}
|
||||
if warn.SourceInfo != nil && warn.Range != nil {
|
||||
src := errdefs.Source{
|
||||
Info: warn.SourceInfo,
|
||||
Ranges: warn.Range,
|
||||
}
|
||||
src.Print(w)
|
||||
}
|
||||
fmt.Fprintf(w, "\n")
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
func newBuildOptions() buildOptions {
|
||||
ulimits := make(map[string]*units.Ulimit)
|
||||
return buildOptions{
|
||||
@@ -277,51 +338,54 @@ func buildCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
|
||||
|
||||
flags := cmd.Flags()
|
||||
|
||||
flags.StringSliceVar(&options.extraHosts, "add-host", []string{}, "Add a custom host-to-IP mapping (format: `host:ip`)")
|
||||
flags.SetAnnotation("add-host", "docs.external.url", []string{"https://docs.docker.com/engine/reference/commandline/build/#add-entries-to-container-hosts-file---add-host"})
|
||||
flags.StringSliceVar(&options.extraHosts, "add-host", []string{}, `Add a custom host-to-IP mapping (format: "host:ip")`)
|
||||
flags.SetAnnotation("add-host", annotation.ExternalURL, []string{"https://docs.docker.com/engine/reference/commandline/build/#add-entries-to-container-hosts-file---add-host"})
|
||||
|
||||
flags.StringSliceVar(&options.allow, "allow", []string{}, "Allow extra privileged entitlement (e.g., `network.host`, `security.insecure`)")
|
||||
flags.StringSliceVar(&options.allow, "allow", []string{}, `Allow extra privileged entitlement (e.g., "network.host", "security.insecure")`)
|
||||
|
||||
flags.StringArrayVar(&options.buildArgs, "build-arg", []string{}, "Set build-time variables")
|
||||
flags.SetAnnotation("build-arg", "docs.external.url", []string{"https://docs.docker.com/engine/reference/commandline/build/#set-build-time-variables---build-arg"})
|
||||
|
||||
flags.StringArrayVar(&options.cacheFrom, "cache-from", []string{}, "External cache sources (e.g., `user/app:cache`, `type=local,src=path/to/dir`)")
|
||||
flags.StringArrayVar(&options.cacheFrom, "cache-from", []string{}, `External cache sources (e.g., "user/app:cache", "type=local,src=path/to/dir")`)
|
||||
|
||||
flags.StringArrayVar(&options.cacheTo, "cache-to", []string{}, "Cache export destinations (e.g., `user/app:cache`, `type=local,dest=path/to/dir`)")
|
||||
flags.StringArrayVar(&options.cacheTo, "cache-to", []string{}, `Cache export destinations (e.g., "user/app:cache", "type=local,dest=path/to/dir")`)
|
||||
|
||||
flags.StringVar(&options.cgroupParent, "cgroup-parent", "", "Optional parent cgroup for the container")
|
||||
flags.SetAnnotation("cgroup-parent", "docs.external.url", []string{"https://docs.docker.com/engine/reference/commandline/build/#use-a-custom-parent-cgroup---cgroup-parent"})
|
||||
flags.SetAnnotation("cgroup-parent", annotation.ExternalURL, []string{"https://docs.docker.com/engine/reference/commandline/build/#use-a-custom-parent-cgroup---cgroup-parent"})
|
||||
|
||||
flags.StringVarP(&options.dockerfileName, "file", "f", "", "Name of the Dockerfile (default: `PATH/Dockerfile`)")
|
||||
flags.SetAnnotation("file", "docs.external.url", []string{"https://docs.docker.com/engine/reference/commandline/build/#specify-a-dockerfile--f"})
|
||||
flags.StringArrayVar(&options.contexts, "build-context", []string{}, "Additional build contexts (e.g., name=path)")
|
||||
|
||||
flags.StringVarP(&options.dockerfileName, "file", "f", "", `Name of the Dockerfile (default: "PATH/Dockerfile")`)
|
||||
flags.SetAnnotation("file", annotation.ExternalURL, []string{"https://docs.docker.com/engine/reference/commandline/build/#specify-a-dockerfile--f"})
|
||||
|
||||
flags.StringVar(&options.imageIDFile, "iidfile", "", "Write the image ID to the file")
|
||||
|
||||
flags.StringArrayVar(&options.labels, "label", []string{}, "Set metadata for an image")
|
||||
|
||||
flags.BoolVar(&options.exportLoad, "load", false, "Shorthand for `--output=type=docker`")
|
||||
flags.BoolVar(&options.exportLoad, "load", false, `Shorthand for "--output=type=docker"`)
|
||||
|
||||
flags.StringVar(&options.networkMode, "network", "default", "Set the networking mode for the RUN instructions during build")
|
||||
flags.StringVar(&options.networkMode, "network", "default", `Set the networking mode for the "RUN" instructions during build`)
|
||||
|
||||
flags.StringArrayVarP(&options.outputs, "output", "o", []string{}, "Output destination (format: `type=local,dest=path`)")
|
||||
flags.StringArrayVar(&options.noCacheFilter, "no-cache-filter", []string{}, "Do not cache specified stages")
|
||||
|
||||
flags.StringArrayVarP(&options.outputs, "output", "o", []string{}, `Output destination (format: "type=local,dest=path")`)
|
||||
|
||||
flags.StringArrayVar(&options.platforms, "platform", platformsDefault, "Set target platform for build")
|
||||
|
||||
flags.BoolVar(&options.exportPush, "push", false, "Shorthand for `--output=type=registry`")
|
||||
flags.BoolVar(&options.exportPush, "push", false, `Shorthand for "--output=type=registry"`)
|
||||
|
||||
flags.BoolVarP(&options.quiet, "quiet", "q", false, "Suppress the build output and print image ID on success")
|
||||
|
||||
flags.StringArrayVar(&options.secrets, "secret", []string{}, "Secret file to expose to the build (format: `id=mysecret,src=/local/secret`)")
|
||||
flags.StringArrayVar(&options.secrets, "secret", []string{}, `Secret to expose to the build (format: "id=mysecret[,src=/local/secret]")`)
|
||||
|
||||
flags.Var(&options.shmSize, "shm-size", "Size of `/dev/shm`")
|
||||
flags.Var(&options.shmSize, "shm-size", `Size of "/dev/shm"`)
|
||||
|
||||
flags.StringArrayVar(&options.ssh, "ssh", []string{}, "SSH agent socket or keys to expose to the build (format: `default|<id>[=<socket>|<key>[,<key>]]`)")
|
||||
flags.StringArrayVar(&options.ssh, "ssh", []string{}, `SSH agent socket or keys to expose to the build (format: "default|<id>[=<socket>|<key>[,<key>]]")`)
|
||||
|
||||
flags.StringArrayVarP(&options.tags, "tag", "t", []string{}, "Name and optionally a tag (format: `name:tag`)")
|
||||
flags.SetAnnotation("tag", "docs.external.url", []string{"https://docs.docker.com/engine/reference/commandline/build/#tag-an-image--t"})
|
||||
flags.StringArrayVarP(&options.tags, "tag", "t", []string{}, `Name and optionally a tag (format: "name:tag")`)
|
||||
flags.SetAnnotation("tag", annotation.ExternalURL, []string{"https://docs.docker.com/engine/reference/commandline/build/#tag-an-image--t"})
|
||||
|
||||
flags.StringVar(&options.target, "target", "", "Set the target build stage to build.")
|
||||
flags.SetAnnotation("target", "docs.external.url", []string{"https://docs.docker.com/engine/reference/commandline/build/#specifying-target-build-stage---target"})
|
||||
flags.StringVar(&options.target, "target", "", "Set the target build stage to build")
|
||||
flags.SetAnnotation("target", annotation.ExternalURL, []string{"https://docs.docker.com/engine/reference/commandline/build/#specifying-target-build-stage---target"})
|
||||
|
||||
flags.Var(options.ulimits, "ulimit", "Ulimit options")
|
||||
|
||||
@@ -349,7 +413,7 @@ func buildCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
|
||||
flags.StringVarP(&ignore, "memory", "m", "", "Memory limit")
|
||||
flags.MarkHidden("memory")
|
||||
|
||||
flags.StringVar(&ignore, "memory-swap", "", "Swap limit equal to memory plus swap: `-1` to enable unlimited swap")
|
||||
flags.StringVar(&ignore, "memory-swap", "", `Swap limit equal to memory plus swap: "-1" to enable unlimited swap`)
|
||||
flags.MarkHidden("memory-swap")
|
||||
|
||||
flags.Int64VarP(&ignoreInt, "cpu-shares", "c", 0, "CPU shares (relative weight)")
|
||||
@@ -361,10 +425,10 @@ func buildCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
|
||||
flags.Int64Var(&ignoreInt, "cpu-quota", 0, "Limit the CPU CFS (Completely Fair Scheduler) quota")
|
||||
flags.MarkHidden("cpu-quota")
|
||||
|
||||
flags.StringVar(&ignore, "cpuset-cpus", "", "CPUs in which to allow execution (`0-3`, `0,1`)")
|
||||
flags.StringVar(&ignore, "cpuset-cpus", "", `CPUs in which to allow execution ("0-3", "0,1")`)
|
||||
flags.MarkHidden("cpuset-cpus")
|
||||
|
||||
flags.StringVar(&ignore, "cpuset-mems", "", "MEMs in which to allow execution (`0-3`, `0,1`)")
|
||||
flags.StringVar(&ignore, "cpuset-mems", "", `MEMs in which to allow execution ("0-3", "0,1")`)
|
||||
flags.MarkHidden("cpuset-mems")
|
||||
|
||||
flags.BoolVar(&ignoreBool, "rm", true, "Remove intermediate containers after a successful build")
|
||||
@@ -379,8 +443,8 @@ func buildCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
|
||||
|
||||
func commonBuildFlags(options *commonOptions, flags *pflag.FlagSet) {
|
||||
options.noCache = flags.Bool("no-cache", false, "Do not use cache when building the image")
|
||||
flags.StringVar(&options.progress, "progress", "auto", "Set type of progress output (`auto`, `plain`, `tty`). Use plain to show container output")
|
||||
options.pull = flags.Bool("pull", false, "Always attempt to pull a newer version of the image")
|
||||
flags.StringVar(&options.progress, "progress", "auto", `Set type of progress output ("auto", "plain", "tty"). Use plain to show container output`)
|
||||
options.pull = flags.Bool("pull", false, "Always attempt to pull all referenced images")
|
||||
flags.StringVar(&options.metadataFile, "metadata-file", "", "Write build result metadata to the file")
|
||||
}
|
||||
|
||||
@@ -392,7 +456,6 @@ func checkWarnedFlags(f *pflag.Flag) {
|
||||
switch t {
|
||||
case "flag-warn":
|
||||
logrus.Warn(m[0])
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -416,3 +479,80 @@ func listToMap(values []string, defaultEnv bool) map[string]string {
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func parseContextNames(values []string) (map[string]build.NamedContext, error) {
|
||||
if len(values) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
result := make(map[string]build.NamedContext, len(values))
|
||||
for _, value := range values {
|
||||
kv := strings.SplitN(value, "=", 2)
|
||||
if len(kv) != 2 {
|
||||
return nil, errors.Errorf("invalid context value: %s, expected key=value", value)
|
||||
}
|
||||
named, err := reference.ParseNormalizedNamed(kv[0])
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "invalid context name %s", kv[0])
|
||||
}
|
||||
name := strings.TrimSuffix(reference.FamiliarString(named), ":latest")
|
||||
result[name] = build.NamedContext{Path: kv[1]}
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func writeMetadataFile(filename string, dt interface{}) error {
|
||||
b, err := json.MarshalIndent(dt, "", " ")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return ioutils.AtomicWriteFile(filename, b, 0644)
|
||||
}
|
||||
|
||||
func decodeExporterResponse(exporterResponse map[string]string) map[string]interface{} {
|
||||
out := make(map[string]interface{})
|
||||
for k, v := range exporterResponse {
|
||||
dt, err := base64.StdEncoding.DecodeString(v)
|
||||
if err != nil {
|
||||
out[k] = v
|
||||
continue
|
||||
}
|
||||
var raw map[string]interface{}
|
||||
if err = json.Unmarshal(dt, &raw); err != nil || len(raw) == 0 {
|
||||
out[k] = v
|
||||
continue
|
||||
}
|
||||
out[k] = json.RawMessage(dt)
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func wrapBuildError(err error, bake bool) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
st, ok := grpcerrors.AsGRPCStatus(err)
|
||||
if ok {
|
||||
if st.Code() == codes.Unimplemented && strings.Contains(st.Message(), "unsupported frontend capability moby.buildkit.frontend.contexts") {
|
||||
msg := "current frontend does not support --build-context."
|
||||
if bake {
|
||||
msg = "current frontend does not support defining additional contexts for targets."
|
||||
}
|
||||
msg += " Named contexts are supported since Dockerfile v1.4. Use #syntax directive in Dockerfile or update to latest BuildKit."
|
||||
return &wrapped{err, msg}
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
type wrapped struct {
|
||||
err error
|
||||
msg string
|
||||
}
|
||||
|
||||
func (w *wrapped) Error() string {
|
||||
return w.msg
|
||||
}
|
||||
|
||||
func (w *wrapped) Unwrap() error {
|
||||
return w.err
|
||||
}
|
||||
|
@@ -212,7 +212,7 @@ func createCmd(dockerCli command.Cli) *cobra.Command {
|
||||
if len(drivers.String()) > 0 {
|
||||
drivers.WriteString(", ")
|
||||
}
|
||||
drivers.WriteString(fmt.Sprintf("`%s`", d.Name()))
|
||||
drivers.WriteString(fmt.Sprintf(`"%s"`, d.Name()))
|
||||
}
|
||||
|
||||
cmd := &cobra.Command{
|
||||
|
@@ -4,16 +4,18 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"strings"
|
||||
"text/tabwriter"
|
||||
"time"
|
||||
|
||||
"github.com/docker/buildx/build"
|
||||
"github.com/docker/cli/cli"
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/cli/opts"
|
||||
"github.com/docker/go-units"
|
||||
"github.com/moby/buildkit/client"
|
||||
"github.com/moby/buildkit/util/appcontext"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/tonistiigi/units"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
@@ -125,20 +127,20 @@ func printKV(w io.Writer, k string, v interface{}) {
|
||||
func printVerbose(tw *tabwriter.Writer, du []*client.UsageInfo) {
|
||||
for _, di := range du {
|
||||
printKV(tw, "ID", di.ID)
|
||||
if di.Parent != "" {
|
||||
printKV(tw, "Parent", di.Parent)
|
||||
if len(di.Parents) != 0 {
|
||||
printKV(tw, "Parent", strings.Join(di.Parents, ","))
|
||||
}
|
||||
printKV(tw, "Created at", di.CreatedAt)
|
||||
printKV(tw, "Mutable", di.Mutable)
|
||||
printKV(tw, "Reclaimable", !di.InUse)
|
||||
printKV(tw, "Shared", di.Shared)
|
||||
printKV(tw, "Size", fmt.Sprintf("%.2f", units.Bytes(di.Size)))
|
||||
printKV(tw, "Size", units.HumanSize(float64(di.Size)))
|
||||
if di.Description != "" {
|
||||
printKV(tw, "Description", di.Description)
|
||||
}
|
||||
printKV(tw, "Usage count", di.UsageCount)
|
||||
if di.LastUsedAt != nil {
|
||||
printKV(tw, "Last used", di.LastUsedAt)
|
||||
printKV(tw, "Last used", units.HumanDuration(time.Since(*di.LastUsedAt))+" ago")
|
||||
}
|
||||
if di.RecordType != "" {
|
||||
printKV(tw, "Type", di.RecordType)
|
||||
@@ -159,11 +161,15 @@ func printTableRow(tw *tabwriter.Writer, di *client.UsageInfo) {
|
||||
if di.Mutable {
|
||||
id += "*"
|
||||
}
|
||||
size := fmt.Sprintf("%.2f", units.Bytes(di.Size))
|
||||
size := units.HumanSize(float64(di.Size))
|
||||
if di.Shared {
|
||||
size += "*"
|
||||
}
|
||||
fmt.Fprintf(tw, "%-71s\t%-11v\t%s\t\n", id, !di.InUse, size)
|
||||
lastAccessed := ""
|
||||
if di.LastUsedAt != nil {
|
||||
lastAccessed = units.HumanDuration(time.Since(*di.LastUsedAt)) + " ago"
|
||||
}
|
||||
fmt.Fprintf(tw, "%-40s\t%-5v\t%-10s\t%s\n", id, !di.InUse, size, lastAccessed)
|
||||
}
|
||||
|
||||
func printSummary(tw *tabwriter.Writer, dus [][]*client.UsageInfo) {
|
||||
@@ -186,11 +192,11 @@ func printSummary(tw *tabwriter.Writer, dus [][]*client.UsageInfo) {
|
||||
}
|
||||
|
||||
if shared > 0 {
|
||||
fmt.Fprintf(tw, "Shared:\t%.2f\n", units.Bytes(shared))
|
||||
fmt.Fprintf(tw, "Private:\t%.2f\n", units.Bytes(total-shared))
|
||||
fmt.Fprintf(tw, "Shared:\t%s\n", units.HumanSize(float64(shared)))
|
||||
fmt.Fprintf(tw, "Private:\t%s\n", units.HumanSize(float64(total-shared)))
|
||||
}
|
||||
|
||||
fmt.Fprintf(tw, "Reclaimable:\t%.2f\n", units.Bytes(reclaimable))
|
||||
fmt.Fprintf(tw, "Total:\t%.2f\n", units.Bytes(total))
|
||||
fmt.Fprintf(tw, "Reclaimable:\t%s\n", units.HumanSize(float64(reclaimable)))
|
||||
fmt.Fprintf(tw, "Total:\t%s\n", units.HumanSize(float64(total)))
|
||||
tw.Flush()
|
||||
}
|
||||
|
@@ -1,28 +1,30 @@
|
||||
package commands
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/containerd/containerd/images"
|
||||
"github.com/docker/buildx/store"
|
||||
"github.com/docker/buildx/store/storeutil"
|
||||
"github.com/docker/buildx/util/imagetools"
|
||||
"github.com/docker/cli-docs-tool/annotation"
|
||||
"github.com/docker/cli/cli"
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/moby/buildkit/util/appcontext"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
type inspectOptions struct {
|
||||
raw bool
|
||||
builder string
|
||||
format string
|
||||
raw bool
|
||||
}
|
||||
|
||||
func runInspect(dockerCli command.Cli, in inspectOptions, name string) error {
|
||||
ctx := appcontext.Context()
|
||||
|
||||
if in.format != "" && in.raw {
|
||||
return errors.Errorf("format and raw cannot be used together")
|
||||
}
|
||||
|
||||
txn, release, err := storeutil.GetStore(dockerCli)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -47,28 +49,13 @@ func runInspect(dockerCli command.Cli, in inspectOptions, name string) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r := imagetools.New(imageopt)
|
||||
|
||||
dt, desc, err := r.Get(ctx, name)
|
||||
p, err := imagetools.NewPrinter(ctx, imageopt, name, in.format)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if in.raw {
|
||||
fmt.Printf("%s", dt) // avoid newline to keep digest
|
||||
return nil
|
||||
}
|
||||
|
||||
switch desc.MediaType {
|
||||
// case images.MediaTypeDockerSchema2Manifest, specs.MediaTypeImageManifest:
|
||||
// TODO: handle distribution manifest and schema1
|
||||
case images.MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageIndex:
|
||||
return imagetools.PrintManifestList(dt, desc, name, os.Stdout)
|
||||
default:
|
||||
fmt.Printf("%s\n", dt)
|
||||
}
|
||||
|
||||
return nil
|
||||
return p.Print(in.raw, dockerCli.Out())
|
||||
}
|
||||
|
||||
func inspectCmd(dockerCli command.Cli, rootOpts RootOptions) *cobra.Command {
|
||||
@@ -76,7 +63,7 @@ func inspectCmd(dockerCli command.Cli, rootOpts RootOptions) *cobra.Command {
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "inspect [OPTIONS] NAME",
|
||||
Short: "Show details of image in the registry",
|
||||
Short: "Show details of an image in the registry",
|
||||
Args: cli.ExactArgs(1),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
options.builder = rootOpts.Builder
|
||||
@@ -85,7 +72,11 @@ func inspectCmd(dockerCli command.Cli, rootOpts RootOptions) *cobra.Command {
|
||||
}
|
||||
|
||||
flags := cmd.Flags()
|
||||
flags.BoolVar(&options.raw, "raw", false, "Show original JSON manifest")
|
||||
|
||||
flags.StringVar(&options.format, "format", "", "Format the output using the given Go template")
|
||||
flags.SetAnnotation("format", annotation.DefaultValue, []string{`"{{.Manifest}}"`})
|
||||
|
||||
flags.BoolVar(&options.raw, "raw", false, "Show original, unformatted JSON manifest")
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
@@ -16,8 +16,8 @@ func RootCmd(dockerCli command.Cli, opts RootOptions) *cobra.Command {
|
||||
}
|
||||
|
||||
cmd.AddCommand(
|
||||
inspectCmd(dockerCli, opts),
|
||||
createCmd(dockerCli, opts),
|
||||
inspectCmd(dockerCli, opts),
|
||||
)
|
||||
|
||||
return cmd
|
||||
|
@@ -12,11 +12,11 @@ import (
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/cli/opts"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
"github.com/docker/go-units"
|
||||
"github.com/moby/buildkit/client"
|
||||
"github.com/moby/buildkit/util/appcontext"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/tonistiigi/units"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
@@ -119,7 +119,7 @@ func runPrune(dockerCli command.Cli, opts pruneOptions) error {
|
||||
<-printed
|
||||
|
||||
tw = tabwriter.NewWriter(os.Stdout, 1, 8, 1, '\t', 0)
|
||||
fmt.Fprintf(tw, "Total:\t%.2f\n", units.Bytes(total))
|
||||
fmt.Fprintf(tw, "Total:\t%s\n", units.HumanSize(float64(total)))
|
||||
tw.Flush()
|
||||
return nil
|
||||
}
|
||||
@@ -139,7 +139,7 @@ func pruneCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
|
||||
|
||||
flags := cmd.Flags()
|
||||
flags.BoolVarP(&options.all, "all", "a", false, "Remove all unused images, not just dangling ones")
|
||||
flags.Var(&options.filter, "filter", "Provide filter values (e.g., `until=24h`)")
|
||||
flags.Var(&options.filter, "filter", `Provide filter values (e.g., "until=24h")`)
|
||||
flags.Var(&options.keepStorage, "keep-storage", "Amount of disk space to keep for cache")
|
||||
flags.BoolVar(&options.verbose, "verbose", false, "Provide a more verbose output")
|
||||
flags.BoolVarP(&options.force, "force", "f", false, "Do not prompt for confirmation")
|
||||
|
@@ -2,35 +2,53 @@ package commands
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/docker/buildx/store"
|
||||
"github.com/docker/buildx/store/storeutil"
|
||||
"github.com/docker/cli/cli"
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/moby/buildkit/util/appcontext"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
type rmOptions struct {
|
||||
builder string
|
||||
keepState bool
|
||||
builder string
|
||||
keepState bool
|
||||
keepDaemon bool
|
||||
allInactive bool
|
||||
force bool
|
||||
}
|
||||
|
||||
const (
|
||||
rmInactiveWarning = `WARNING! This will remove all builders that are not in running state. Are you sure you want to continue?`
|
||||
)
|
||||
|
||||
func runRm(dockerCli command.Cli, in rmOptions) error {
|
||||
ctx := appcontext.Context()
|
||||
|
||||
if in.allInactive && !in.force && !command.PromptForConfirmation(dockerCli.In(), dockerCli.Out(), rmInactiveWarning) {
|
||||
return nil
|
||||
}
|
||||
|
||||
txn, release, err := storeutil.GetStore(dockerCli)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer release()
|
||||
|
||||
if in.allInactive {
|
||||
return rmAllInactive(ctx, txn, dockerCli, in)
|
||||
}
|
||||
|
||||
if in.builder != "" {
|
||||
ng, err := storeutil.GetNodeGroup(txn, dockerCli, in.builder)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err1 := rm(ctx, dockerCli, ng, in.keepState)
|
||||
err1 := rm(ctx, dockerCli, in, ng)
|
||||
if err := txn.Remove(ng.Name); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -42,7 +60,7 @@ func runRm(dockerCli command.Cli, in rmOptions) error {
|
||||
return err
|
||||
}
|
||||
if ng != nil {
|
||||
err1 := rm(ctx, dockerCli, ng, in.keepState)
|
||||
err1 := rm(ctx, dockerCli, in, ng)
|
||||
if err := txn.Remove(ng.Name); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -62,6 +80,9 @@ func rmCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
options.builder = rootOpts.builder
|
||||
if len(args) > 0 {
|
||||
if options.allInactive {
|
||||
return errors.New("cannot specify builder name when --all-inactive is set")
|
||||
}
|
||||
options.builder = args[0]
|
||||
}
|
||||
return runRm(dockerCli, options)
|
||||
@@ -70,23 +91,30 @@ func rmCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
|
||||
|
||||
flags := cmd.Flags()
|
||||
flags.BoolVar(&options.keepState, "keep-state", false, "Keep BuildKit state")
|
||||
flags.BoolVar(&options.keepDaemon, "keep-daemon", false, "Keep the buildkitd daemon running")
|
||||
flags.BoolVar(&options.allInactive, "all-inactive", false, "Remove all inactive builders")
|
||||
flags.BoolVarP(&options.force, "force", "f", false, "Do not prompt for confirmation")
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func rm(ctx context.Context, dockerCli command.Cli, ng *store.NodeGroup, keepState bool) error {
|
||||
func rm(ctx context.Context, dockerCli command.Cli, in rmOptions, ng *store.NodeGroup) error {
|
||||
dis, err := driversForNodeGroup(ctx, dockerCli, ng, "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, di := range dis {
|
||||
if di.Driver != nil {
|
||||
if di.Driver == nil {
|
||||
continue
|
||||
}
|
||||
// Do not stop the buildkitd daemon when --keep-daemon is provided
|
||||
if !in.keepDaemon {
|
||||
if err := di.Driver.Stop(ctx, true); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := di.Driver.Rm(ctx, true, !keepState); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := di.Driver.Rm(ctx, true, !in.keepState, !in.keepDaemon); err != nil {
|
||||
return err
|
||||
}
|
||||
if di.Err != nil {
|
||||
err = di.Err
|
||||
@@ -94,3 +122,42 @@ func rm(ctx context.Context, dockerCli command.Cli, ng *store.NodeGroup, keepSta
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func rmAllInactive(ctx context.Context, txn *store.Txn, dockerCli command.Cli, in rmOptions) error {
|
||||
ctx, cancel := context.WithTimeout(ctx, 20*time.Second)
|
||||
defer cancel()
|
||||
|
||||
ll, err := txn.List()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
builders := make([]*nginfo, len(ll))
|
||||
for i, ng := range ll {
|
||||
builders[i] = &nginfo{ng: ng}
|
||||
}
|
||||
|
||||
eg, _ := errgroup.WithContext(ctx)
|
||||
for _, b := range builders {
|
||||
func(b *nginfo) {
|
||||
eg.Go(func() error {
|
||||
if err := loadNodeGroupData(ctx, dockerCli, b); err != nil {
|
||||
return errors.Wrapf(err, "cannot load %s", b.ng.Name)
|
||||
}
|
||||
if b.ng.Dynamic {
|
||||
return nil
|
||||
}
|
||||
if b.inactive() {
|
||||
rmerr := rm(ctx, dockerCli, in, b.ng)
|
||||
if err := txn.Remove(b.ng.Name); err != nil {
|
||||
return err
|
||||
}
|
||||
return rmerr
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}(b)
|
||||
}
|
||||
|
||||
return eg.Wait()
|
||||
}
|
||||
|
@@ -4,8 +4,11 @@ import (
|
||||
"os"
|
||||
|
||||
imagetoolscmd "github.com/docker/buildx/commands/imagetools"
|
||||
"github.com/docker/buildx/util/logutil"
|
||||
"github.com/docker/cli-docs-tool/annotation"
|
||||
"github.com/docker/cli/cli-plugins/plugin"
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/pflag"
|
||||
)
|
||||
@@ -15,6 +18,9 @@ func NewRootCmd(name string, isPlugin bool, dockerCli command.Cli) *cobra.Comman
|
||||
Short: "Docker Buildx",
|
||||
Long: `Extended build capabilities with BuildKit`,
|
||||
Use: name,
|
||||
Annotations: map[string]string{
|
||||
annotation.CodeDelimiter: `"`,
|
||||
},
|
||||
}
|
||||
if isPlugin {
|
||||
cmd.PersistentPreRunE = func(cmd *cobra.Command, args []string) error {
|
||||
@@ -22,6 +28,26 @@ func NewRootCmd(name string, isPlugin bool, dockerCli command.Cli) *cobra.Comman
|
||||
}
|
||||
}
|
||||
|
||||
logrus.SetFormatter(&logutil.Formatter{})
|
||||
|
||||
logrus.AddHook(logutil.NewFilter([]logrus.Level{
|
||||
logrus.DebugLevel,
|
||||
},
|
||||
"serving grpc connection",
|
||||
"stopping session",
|
||||
"using default config store",
|
||||
))
|
||||
|
||||
// filter out useless commandConn.CloseWrite warning message that can occur
|
||||
// when listing builder instances with "buildx ls" for those that are
|
||||
// unreachable: "commandConn.CloseWrite: commandconn: failed to wait: signal: killed"
|
||||
// https://github.com/docker/cli/blob/3fb4fb83dfb5db0c0753a8316f21aea54dab32c5/cli/connhelper/commandconn/commandconn.go#L203-L214
|
||||
logrus.AddHook(logutil.NewFilter([]logrus.Level{
|
||||
logrus.WarnLevel,
|
||||
},
|
||||
"commandConn.CloseWrite:",
|
||||
))
|
||||
|
||||
addCommands(cmd, dockerCli)
|
||||
return cmd
|
||||
}
|
||||
|
@@ -8,13 +8,13 @@ import (
|
||||
|
||||
"github.com/docker/buildx/build"
|
||||
"github.com/docker/buildx/driver"
|
||||
ctxkube "github.com/docker/buildx/driver/kubernetes/context"
|
||||
"github.com/docker/buildx/store"
|
||||
"github.com/docker/buildx/store/storeutil"
|
||||
"github.com/docker/buildx/util/platformutil"
|
||||
"github.com/docker/buildx/util/progress"
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/cli/cli/context/docker"
|
||||
"github.com/docker/cli/cli/context/kubernetes"
|
||||
ctxstore "github.com/docker/cli/cli/context/store"
|
||||
dopts "github.com/docker/cli/opts"
|
||||
dockerclient "github.com/docker/docker/client"
|
||||
@@ -73,8 +73,9 @@ func driversForNodeGroup(ctx context.Context, dockerCli command.Cli, ng *store.N
|
||||
func(i int, n store.Node) {
|
||||
eg.Go(func() error {
|
||||
di := build.DriverInfo{
|
||||
Name: n.Name,
|
||||
Platform: n.Platforms,
|
||||
Name: n.Name,
|
||||
Platform: n.Platforms,
|
||||
ProxyConfig: storeutil.GetProxyConfig(dockerCli),
|
||||
}
|
||||
defer func() {
|
||||
dis[i] = di
|
||||
@@ -149,7 +150,7 @@ func configFromContext(endpointName string, s ctxstore.Reader) (clientcmd.Client
|
||||
}
|
||||
return clientcmd.NewDefaultClientConfig(*apiConfig, &clientcmd.ConfigOverrides{}), nil
|
||||
}
|
||||
return kubernetes.ConfigFromContext(endpointName, s)
|
||||
return ctxkube.ConfigFromContext(endpointName, s)
|
||||
}
|
||||
|
||||
// clientForEndpoint returns a docker client for an endpoint
|
||||
@@ -264,9 +265,10 @@ func getDefaultDrivers(ctx context.Context, dockerCli command.Cli, defaultOnly b
|
||||
}
|
||||
return []build.DriverInfo{
|
||||
{
|
||||
Name: "default",
|
||||
Driver: d,
|
||||
ImageOpt: imageopt,
|
||||
Name: "default",
|
||||
Driver: d,
|
||||
ImageOpt: imageopt,
|
||||
ProxyConfig: storeutil.GetProxyConfig(dockerCli),
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
@@ -290,9 +292,7 @@ func loadInfoData(ctx context.Context, d *dinfo) error {
|
||||
return errors.Wrap(err, "listing workers")
|
||||
}
|
||||
for _, w := range workers {
|
||||
for _, p := range w.Platforms {
|
||||
d.platforms = append(d.platforms, p)
|
||||
}
|
||||
d.platforms = append(d.platforms, w.Platforms...)
|
||||
}
|
||||
d.platforms = platformutil.Dedupe(d.platforms)
|
||||
}
|
||||
@@ -391,6 +391,17 @@ type nginfo struct {
|
||||
err error
|
||||
}
|
||||
|
||||
// inactive checks if all nodes are inactive for this builder
|
||||
func (n *nginfo) inactive() bool {
|
||||
for idx := range n.ng.Nodes {
|
||||
d := n.drivers[idx]
|
||||
if d.info != nil && d.info.Status == driver.Running {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func boot(ctx context.Context, ngi *nginfo) (bool, error) {
|
||||
toBoot := make([]int, 0, len(ngi.drivers))
|
||||
for i, d := range ngi.drivers {
|
||||
@@ -405,7 +416,7 @@ func boot(ctx context.Context, ngi *nginfo) (bool, error) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
printer := progress.NewPrinter(context.TODO(), os.Stderr, "auto")
|
||||
printer := progress.NewPrinter(context.TODO(), os.Stderr, os.Stderr, "auto")
|
||||
|
||||
baseCtx := ctx
|
||||
eg, _ := errgroup.WithContext(ctx)
|
||||
|
@@ -89,6 +89,11 @@ target "mod-outdated" {
|
||||
inherits = ["_common"]
|
||||
dockerfile = "./hack/dockerfiles/vendor.Dockerfile"
|
||||
target = "outdated"
|
||||
args = {
|
||||
// used to invalidate cache for outdated run stage
|
||||
// can be dropped when https://github.com/moby/buildkit/issues/1213 fixed
|
||||
_RANDOM = uuidv4()
|
||||
}
|
||||
output = ["type=cacheonly"]
|
||||
}
|
||||
|
||||
|
48
docs/guides/cicd.md
Normal file
48
docs/guides/cicd.md
Normal file
@@ -0,0 +1,48 @@
|
||||
# CI/CD
|
||||
|
||||
## GitHub Actions
|
||||
|
||||
Docker provides a [GitHub Action that will build and push your image](https://github.com/docker/build-push-action/#about)
|
||||
using Buildx. Here is a simple workflow:
|
||||
|
||||
```yaml
|
||||
name: ci
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- 'main'
|
||||
|
||||
jobs:
|
||||
docker:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
-
|
||||
name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v1
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
-
|
||||
name: Login to DockerHub
|
||||
uses: docker/login-action@v1
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
-
|
||||
name: Build and push
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
push: true
|
||||
tags: user/app:latest
|
||||
```
|
||||
|
||||
In this example we are also using 3 other actions:
|
||||
|
||||
* [`setup-buildx`](https://github.com/docker/setup-buildx-action) action will create and boot a builder using by
|
||||
default the `docker-container` [builder driver](../reference/buildx_create.md#driver).
|
||||
This is **not required but recommended** using it to be able to build multi-platform images, export cache, etc.
|
||||
* [`setup-qemu`](https://github.com/docker/setup-qemu-action) action can be useful if you want
|
||||
to add emulation support with QEMU to be able to build against more platforms.
|
||||
* [`login`](https://github.com/docker/login-action) action will take care to log
|
||||
in against a Docker registry.
|
23
docs/guides/cni-networking.md
Normal file
23
docs/guides/cni-networking.md
Normal file
@@ -0,0 +1,23 @@
|
||||
# CNI networking
|
||||
|
||||
It can be useful to use a bridge network for your builder if for example you
|
||||
encounter a network port contention during multiple builds. If you're using
|
||||
the BuildKit image, CNI is not yet available in it, but you can create
|
||||
[a custom BuildKit image with CNI support](https://github.com/moby/buildkit/blob/master/docs/cni-networking.md).
|
||||
|
||||
Now build this image:
|
||||
|
||||
```console
|
||||
$ docker buildx build --tag buildkit-cni:local --load .
|
||||
```
|
||||
|
||||
Then [create a `docker-container` builder](../reference/buildx_create.md) that
|
||||
will use this image:
|
||||
|
||||
```console
|
||||
$ docker buildx create --use \
|
||||
--name mybuilder \
|
||||
--driver docker-container \
|
||||
--driver-opt "image=buildkit-cni:local" \
|
||||
--buildkitd-flags "--oci-worker-net=cni"
|
||||
```
|
48
docs/guides/custom-network.md
Normal file
48
docs/guides/custom-network.md
Normal file
@@ -0,0 +1,48 @@
|
||||
# Using a custom network
|
||||
|
||||
[Create a network](https://docs.docker.com/engine/reference/commandline/network_create/)
|
||||
named `foonet`:
|
||||
|
||||
```console
|
||||
$ docker network create foonet
|
||||
```
|
||||
|
||||
[Create a `docker-container` builder](../reference/buildx_create.md) named
|
||||
`mybuilder` that will use this network:
|
||||
|
||||
```console
|
||||
$ docker buildx create --use \
|
||||
--name mybuilder \
|
||||
--driver docker-container \
|
||||
--driver-opt "network=foonet"
|
||||
```
|
||||
|
||||
Boot and [inspect `mybuilder`](../reference/buildx_inspect.md):
|
||||
|
||||
```console
|
||||
$ docker buildx inspect --bootstrap
|
||||
```
|
||||
|
||||
[Inspect the builder container](https://docs.docker.com/engine/reference/commandline/inspect/)
|
||||
and see what network is being used:
|
||||
|
||||
```console
|
||||
$ docker inspect buildx_buildkit_mybuilder0 --format={{.NetworkSettings.Networks}}
|
||||
map[foonet:0xc00018c0c0]
|
||||
```
|
||||
|
||||
## What's `buildx_buildkit_mybuilder0`?
|
||||
|
||||
`buildx_buildkit_mybuilder0` is the container name. It can be broken down like this:
|
||||
|
||||
* `buildx_buildkit_` is a hardcoded prefix
|
||||
* `mybuilder0` is the name of the node (defaults to builder name + position in the list of nodes)
|
||||
|
||||
```console
|
||||
$ docker buildx ls
|
||||
NAME/NODE DRIVER/ENDPOINT STATUS PLATFORMS
|
||||
mybuilder * docker-container
|
||||
mybuilder0 unix:///var/run/docker.sock running linux/amd64, linux/arm64, linux/riscv64, linux/ppc64le, linux/s390x, linux/386, linux/mips64le, linux/mips64, linux/arm/v7, linux/arm/v6
|
||||
default docker
|
||||
default default running linux/amd64, linux/arm64, linux/riscv64, linux/ppc64le, linux/s390x, linux/386, linux/arm/v7, linux/arm/v6
|
||||
```
|
63
docs/guides/custom-registry-config.md
Normal file
63
docs/guides/custom-registry-config.md
Normal file
@@ -0,0 +1,63 @@
|
||||
# Using a custom registry configuration
|
||||
|
||||
If you [create a `docker-container` or `kubernetes` builder](../reference/buildx_create.md) and
|
||||
have specified certificates for registries in the [BuildKit daemon configuration](https://github.com/moby/buildkit/blob/master/docs/buildkitd.toml.md),
|
||||
the files will be copied into the container under `/etc/buildkit/certs` and
|
||||
configuration will be updated to reflect that.
|
||||
|
||||
Take the following `buildkitd.toml` configuration that will be used for
|
||||
pushing an image to this registry using self-signed certificates:
|
||||
|
||||
```toml"
|
||||
debug = true
|
||||
[registry."myregistry.com"]
|
||||
ca=["/etc/certs/myregistry.pem"]
|
||||
[[registry."myregistry.com".keypair]]
|
||||
key="/etc/certs/myregistry_key.pem"
|
||||
cert="/etc/certs/myregistry_cert.pem"
|
||||
```
|
||||
> `/etc/buildkitd.toml`
|
||||
|
||||
Here we have configured a self-signed certificate for `myregistry.com` registry.
|
||||
|
||||
Now [create a `docker-container` builder](../reference/buildx_create.md)
|
||||
that will use this BuildKit configuration:
|
||||
|
||||
```console
|
||||
$ docker buildx create --use \
|
||||
--name mybuilder \
|
||||
--driver docker-container \
|
||||
--config /etc/buildkitd.toml
|
||||
```
|
||||
|
||||
Inspecting the builder container, you can see that buildkitd configuration
|
||||
has changed:
|
||||
|
||||
```console
|
||||
$ docker exec -it buildx_buildkit_mybuilder0 cat /etc/buildkit/buildkitd.toml
|
||||
```
|
||||
```toml
|
||||
debug = true
|
||||
|
||||
[registry]
|
||||
|
||||
[registry."myregistry.com"]
|
||||
ca = ["/etc/buildkit/certs/myregistry.com/myregistry.pem"]
|
||||
|
||||
[[registry."myregistry.com".keypair]]
|
||||
cert = "/etc/buildkit/certs/myregistry.com/myregistry_cert.pem"
|
||||
key = "/etc/buildkit/certs/myregistry.com/myregistry_key.pem"
|
||||
```
|
||||
|
||||
And certificates copied inside the container:
|
||||
|
||||
```console
|
||||
$ docker exec -it buildx_buildkit_mybuilder0 ls /etc/buildkit/certs/myregistry.com/
|
||||
myregistry.pem myregistry_cert.pem myregistry_key.pem
|
||||
```
|
||||
|
||||
Now you should be able to push to the registry with this builder:
|
||||
|
||||
```console
|
||||
$ docker buildx build --push --tag myregistry.com/myimage:latest .
|
||||
```
|
31
docs/guides/opentelemetry.md
Normal file
31
docs/guides/opentelemetry.md
Normal file
@@ -0,0 +1,31 @@
|
||||
# OpenTelemetry support
|
||||
|
||||
To capture the trace to [Jaeger](https://github.com/jaegertracing/jaeger), set
|
||||
`JAEGER_TRACE` environment variable to the collection address using a `driver-opt`.
|
||||
|
||||
First create a Jaeger container:
|
||||
|
||||
```console
|
||||
$ docker run -d --name jaeger -p "6831:6831/udp" -p "16686:16686" jaegertracing/all-in-one
|
||||
```
|
||||
|
||||
Then [create a `docker-container` builder](../reference/buildx_create.md)
|
||||
that will use the Jaeger instance via the `JAEGER_TRACE` env var:
|
||||
|
||||
```console
|
||||
$ docker buildx create --use \
|
||||
--name mybuilder \
|
||||
--driver docker-container \
|
||||
--driver-opt "network=host" \
|
||||
--driver-opt "env.JAEGER_TRACE=localhost:6831"
|
||||
```
|
||||
|
||||
Boot and [inspect `mybuilder`](../reference/buildx_inspect.md):
|
||||
|
||||
```console
|
||||
$ docker buildx inspect --bootstrap
|
||||
```
|
||||
|
||||
Buildx commands should be traced at `http://127.0.0.1:16686/`:
|
||||
|
||||

|
60
docs/guides/registry-mirror.md
Normal file
60
docs/guides/registry-mirror.md
Normal file
@@ -0,0 +1,60 @@
|
||||
# Registry mirror
|
||||
|
||||
You can define a registry mirror to use for your builds by providing a [BuildKit daemon configuration](https://github.com/moby/buildkit/blob/master/docs/buildkitd.toml.md)
|
||||
while creating a builder with the [`--config` flags](../reference/buildx_create.md#config).
|
||||
|
||||
```toml
|
||||
debug = true
|
||||
[registry."docker.io"]
|
||||
mirrors = ["mirror.gcr.io"]
|
||||
```
|
||||
> `/etc/buildkitd.toml`
|
||||
|
||||
> :information_source: `debug = true` has been added to be able to debug requests
|
||||
in the BuildKit daemon and see if the mirror is effectively used.
|
||||
|
||||
Then [create a `docker-container` builder](../reference/buildx_create.md)
|
||||
that will use this BuildKit configuration:
|
||||
|
||||
```console
|
||||
$ docker buildx create --use \
|
||||
--name mybuilder \
|
||||
--driver docker-container \
|
||||
--config /etc/buildkitd.toml
|
||||
```
|
||||
|
||||
Boot and [inspect `mybuilder`](../reference/buildx_inspect.md):
|
||||
|
||||
```console
|
||||
$ docker buildx inspect --bootstrap
|
||||
```
|
||||
|
||||
Build an image:
|
||||
|
||||
```console
|
||||
$ docker buildx build --load . -f-<<EOF
|
||||
FROM alpine
|
||||
RUN echo "hello world"
|
||||
EOF
|
||||
```
|
||||
|
||||
Now let's check the BuildKit logs in the builder container:
|
||||
|
||||
```console
|
||||
$ docker logs buildx_buildkit_mybuilder0
|
||||
```
|
||||
```text
|
||||
...
|
||||
time="2022-02-06T17:47:48Z" level=debug msg="do request" request.header.accept="application/vnd.docker.container.image.v1+json, */*" request.header.user-agent=containerd/1.5.8+unknown request.method=GET spanID=9460e5b6e64cec91 traceID=b162d3040ddf86d6614e79c66a01a577
|
||||
time="2022-02-06T17:47:48Z" level=debug msg="fetch response received" response.header.accept-ranges=bytes response.header.age=1356 response.header.alt-svc="h3=\":443\"; ma=2592000,h3-29=\":443\"; ma=2592000,h3-Q050=\":443\"; ma=2592000,h3-Q046=\":443\"; ma=2592000,h3-Q043=\":443\"; ma=2592000,quic=\":443\"; ma=2592000; v=\"46,43\"" response.header.cache-control="public, max-age=3600" response.header.content-length=1469 response.header.content-type=application/octet-stream response.header.date="Sun, 06 Feb 2022 17:25:17 GMT" response.header.etag="\"774380abda8f4eae9a149e5d5d3efc83\"" response.header.expires="Sun, 06 Feb 2022 18:25:17 GMT" response.header.last-modified="Wed, 24 Nov 2021 21:07:57 GMT" response.header.server=UploadServer response.header.x-goog-generation=1637788077652182 response.header.x-goog-hash="crc32c=V3DSrg==" response.header.x-goog-hash.1="md5=d0OAq9qPTq6aFJ5dXT78gw==" response.header.x-goog-metageneration=1 response.header.x-goog-storage-class=STANDARD response.header.x-goog-stored-content-encoding=identity response.header.x-goog-stored-content-length=1469 response.header.x-guploader-uploadid=ADPycduqQipVAXc3tzXmTzKQ2gTT6CV736B2J628smtD1iDytEyiYCgvvdD8zz9BT1J1sASUq9pW_ctUyC4B-v2jvhIxnZTlKg response.status="200 OK" spanID=9460e5b6e64cec91 traceID=b162d3040ddf86d6614e79c66a01a577
|
||||
time="2022-02-06T17:47:48Z" level=debug msg="fetch response received" response.header.accept-ranges=bytes response.header.age=760 response.header.alt-svc="h3=\":443\"; ma=2592000,h3-29=\":443\"; ma=2592000,h3-Q050=\":443\"; ma=2592000,h3-Q046=\":443\"; ma=2592000,h3-Q043=\":443\"; ma=2592000,quic=\":443\"; ma=2592000; v=\"46,43\"" response.header.cache-control="public, max-age=3600" response.header.content-length=1471 response.header.content-type=application/octet-stream response.header.date="Sun, 06 Feb 2022 17:35:13 GMT" response.header.etag="\"35d688bd15327daafcdb4d4395e616a8\"" response.header.expires="Sun, 06 Feb 2022 18:35:13 GMT" response.header.last-modified="Wed, 24 Nov 2021 21:07:12 GMT" response.header.server=UploadServer response.header.x-goog-generation=1637788032100793 response.header.x-goog-hash="crc32c=aWgRjA==" response.header.x-goog-hash.1="md5=NdaIvRUyfar8201DleYWqA==" response.header.x-goog-metageneration=1 response.header.x-goog-storage-class=STANDARD response.header.x-goog-stored-content-encoding=identity response.header.x-goog-stored-content-length=1471 response.header.x-guploader-uploadid=ADPycdtR-gJYwC7yHquIkJWFFG8FovDySvtmRnZBqlO3yVDanBXh_VqKYt400yhuf0XbQ3ZMB9IZV2vlcyHezn_Pu3a1SMMtiw response.status="200 OK" spanID=9460e5b6e64cec91 traceID=b162d3040ddf86d6614e79c66a01a577
|
||||
time="2022-02-06T17:47:48Z" level=debug msg=fetch spanID=9460e5b6e64cec91 traceID=b162d3040ddf86d6614e79c66a01a577
|
||||
time="2022-02-06T17:47:48Z" level=debug msg=fetch spanID=9460e5b6e64cec91 traceID=b162d3040ddf86d6614e79c66a01a577
|
||||
time="2022-02-06T17:47:48Z" level=debug msg=fetch spanID=9460e5b6e64cec91 traceID=b162d3040ddf86d6614e79c66a01a577
|
||||
time="2022-02-06T17:47:48Z" level=debug msg=fetch spanID=9460e5b6e64cec91 traceID=b162d3040ddf86d6614e79c66a01a577
|
||||
time="2022-02-06T17:47:48Z" level=debug msg="do request" request.header.accept="application/vnd.docker.image.rootfs.diff.tar.gzip, */*" request.header.user-agent=containerd/1.5.8+unknown request.method=GET spanID=9460e5b6e64cec91 traceID=b162d3040ddf86d6614e79c66a01a577
|
||||
time="2022-02-06T17:47:48Z" level=debug msg="fetch response received" response.header.accept-ranges=bytes response.header.age=1356 response.header.alt-svc="h3=\":443\"; ma=2592000,h3-29=\":443\"; ma=2592000,h3-Q050=\":443\"; ma=2592000,h3-Q046=\":443\"; ma=2592000,h3-Q043=\":443\"; ma=2592000,quic=\":443\"; ma=2592000; v=\"46,43\"" response.header.cache-control="public, max-age=3600" response.header.content-length=2818413 response.header.content-type=application/octet-stream response.header.date="Sun, 06 Feb 2022 17:25:17 GMT" response.header.etag="\"1d55e7be5a77c4a908ad11bc33ebea1c\"" response.header.expires="Sun, 06 Feb 2022 18:25:17 GMT" response.header.last-modified="Wed, 24 Nov 2021 21:07:06 GMT" response.header.server=UploadServer response.header.x-goog-generation=1637788026431708 response.header.x-goog-hash="crc32c=ZojF+g==" response.header.x-goog-hash.1="md5=HVXnvlp3xKkIrRG8M+vqHA==" response.header.x-goog-metageneration=1 response.header.x-goog-storage-class=STANDARD response.header.x-goog-stored-content-encoding=identity response.header.x-goog-stored-content-length=2818413 response.header.x-guploader-uploadid=ADPycdsebqxiTBJqZ0bv9zBigjFxgQydD2ESZSkKchpE0ILlN9Ibko3C5r4fJTJ4UR9ddp-UBd-2v_4eRpZ8Yo2llW_j4k8WhQ response.status="200 OK" spanID=9460e5b6e64cec91 traceID=b162d3040ddf86d6614e79c66a01a577
|
||||
...
|
||||
```
|
||||
|
||||
As you can see, requests come from the GCR registry mirror (`response.header.x-goog*`).
|
33
docs/guides/resource-limiting.md
Normal file
33
docs/guides/resource-limiting.md
Normal file
@@ -0,0 +1,33 @@
|
||||
# Resource limiting
|
||||
|
||||
## Max parallelism
|
||||
|
||||
You can limit the parallelism of the BuildKit solver, which is particularly useful
|
||||
for low-powered machines, using a [BuildKit daemon configuration](https://github.com/moby/buildkit/blob/master/docs/buildkitd.toml.md)
|
||||
while creating a builder with the [`--config` flags](../reference/buildx_create.md#config).
|
||||
|
||||
```toml
|
||||
[worker.oci]
|
||||
max-parallelism = 4
|
||||
```
|
||||
> `/etc/buildkitd.toml`
|
||||
|
||||
Now you can [create a `docker-container` builder](../reference/buildx_create.md)
|
||||
that will use this BuildKit configuration to limit parallelism.
|
||||
|
||||
```console
|
||||
$ docker buildx create --use \
|
||||
--name mybuilder \
|
||||
--driver docker-container \
|
||||
--config /etc/buildkitd.toml
|
||||
```
|
||||
|
||||
## Limit on TCP connections
|
||||
|
||||
We are also now limiting TCP connections to **4 per registry** with an additional
|
||||
connection not used for layer pulls and pushes. This limitation will be able to
|
||||
manage TCP connection per host to avoid your build being stuck while pulling
|
||||
images. The additional connection is used for metadata requests
|
||||
(image config retrieval) to enhance the overall build time.
|
||||
|
||||
More info: [moby/buildkit#2259](https://github.com/moby/buildkit/pull/2259)
|
@@ -29,9 +29,9 @@ Extended build capabilities with BuildKit
|
||||
|
||||
### Options
|
||||
|
||||
| Name | Description |
|
||||
| --- | --- |
|
||||
| [`--builder string`](#builder) | Override the configured builder instance |
|
||||
| Name | Type | Default | Description |
|
||||
| --- | --- | --- | --- |
|
||||
| [`--builder`](#builder) | `string` | | Override the configured builder instance |
|
||||
|
||||
|
||||
<!---MARKER_GEN_END-->
|
||||
|
@@ -13,18 +13,18 @@ Build from a file
|
||||
|
||||
### Options
|
||||
|
||||
| Name | Description |
|
||||
| --- | --- |
|
||||
| [`--builder string`](#builder) | Override the configured builder instance |
|
||||
| [`-f`](#file), [`--file stringArray`](#file) | Build definition file |
|
||||
| `--load` | Shorthand for `--set=*.output=type=docker` |
|
||||
| `--metadata-file string` | Write build result metadata to the file |
|
||||
| [`--no-cache`](#no-cache) | Do not use cache when building the image |
|
||||
| [`--print`](#print) | Print the options without building |
|
||||
| [`--progress string`](#progress) | Set type of progress output (`auto`, `plain`, `tty`). Use plain to show container output |
|
||||
| [`--pull`](#pull) | Always attempt to pull a newer version of the image |
|
||||
| `--push` | Shorthand for `--set=*.output=type=registry` |
|
||||
| [`--set stringArray`](#set) | Override target value (e.g., `targetpattern.key=value`) |
|
||||
| Name | Type | Default | Description |
|
||||
| --- | --- | --- | --- |
|
||||
| [`--builder`](#builder) | `string` | | Override the configured builder instance |
|
||||
| [`-f`](#file), [`--file`](#file) | `stringArray` | | Build definition file |
|
||||
| `--load` | | | Shorthand for `--set=*.output=type=docker` |
|
||||
| `--metadata-file` | `string` | | Write build result metadata to the file |
|
||||
| [`--no-cache`](#no-cache) | | | Do not use cache when building the image |
|
||||
| [`--print`](#print) | | | Print the options without building |
|
||||
| [`--progress`](#progress) | `string` | `auto` | Set type of progress output (`auto`, `plain`, `tty`). Use plain to show container output |
|
||||
| [`--pull`](#pull) | | | Always attempt to pull all referenced images |
|
||||
| `--push` | | | Shorthand for `--set=*.output=type=registry` |
|
||||
| [`--set`](#set) | `stringArray` | | Override target value (e.g., `targetpattern.key=value`) |
|
||||
|
||||
|
||||
<!---MARKER_GEN_END-->
|
||||
@@ -99,19 +99,23 @@ $ docker buildx bake -f docker-compose.dev.yaml backend database
|
||||
You can also use a remote `git` bake definition:
|
||||
|
||||
```console
|
||||
$ docker buildx bake "git://github.com/docker/cli#master" --print
|
||||
#1 [internal] load git source git://github.com/docker/cli#master
|
||||
#1 0.686 2776a6d694f988c0c1df61cad4bfac0f54e481c8 refs/heads/master
|
||||
#1 CACHED
|
||||
$ docker buildx bake "https://github.com/docker/cli.git#v20.10.11" --print
|
||||
#1 [internal] load git source https://github.com/docker/cli.git#v20.10.11
|
||||
#1 0.745 e8f1871b077b64bcb4a13334b7146492773769f7 refs/tags/v20.10.11
|
||||
#1 2.022 From https://github.com/docker/cli
|
||||
#1 2.022 * [new tag] v20.10.11 -> v20.10.11
|
||||
#1 DONE 2.9s
|
||||
{
|
||||
"group": {
|
||||
"default": [
|
||||
"binary"
|
||||
]
|
||||
"default": {
|
||||
"targets": [
|
||||
"binary"
|
||||
]
|
||||
}
|
||||
},
|
||||
"target": {
|
||||
"binary": {
|
||||
"context": "git://github.com/docker/cli#master",
|
||||
"context": "https://github.com/docker/cli.git#v20.10.11",
|
||||
"dockerfile": "Dockerfile",
|
||||
"args": {
|
||||
"BASE_VARIANT": "alpine",
|
||||
@@ -130,7 +134,7 @@ $ docker buildx bake "git://github.com/docker/cli#master" --print
|
||||
}
|
||||
```
|
||||
|
||||
As you can see the context is fixed to `git://github.com/docker/cli` even if
|
||||
As you can see the context is fixed to `https://github.com/docker/cli.git` even if
|
||||
[no context is actually defined](https://github.com/docker/cli/blob/2776a6d694f988c0c1df61cad4bfac0f54e481c8/docker-bake.hcl#L17-L26)
|
||||
in the definition.
|
||||
|
||||
@@ -151,13 +155,8 @@ EOT
|
||||
```
|
||||
|
||||
```console
|
||||
$ docker buildx bake "git://github.com/tonistiigi/buildx#remote-test" --print
|
||||
$ docker buildx bake "https://github.com/tonistiigi/buildx.git#remote-test" --print
|
||||
{
|
||||
"group": {
|
||||
"default": [
|
||||
"default"
|
||||
]
|
||||
},
|
||||
"target": {
|
||||
"default": {
|
||||
"context": ".",
|
||||
@@ -170,7 +169,7 @@ $ docker buildx bake "git://github.com/tonistiigi/buildx#remote-test" --print
|
||||
|
||||
```console
|
||||
$ touch foo bar
|
||||
$ docker buildx bake "git://github.com/tonistiigi/buildx#remote-test"
|
||||
$ docker buildx bake "https://github.com/tonistiigi/buildx.git#remote-test"
|
||||
...
|
||||
> [4/4] RUN ls -l && stop:
|
||||
#8 0.101 total 0
|
||||
@@ -180,19 +179,14 @@ $ docker buildx bake "git://github.com/tonistiigi/buildx#remote-test"
|
||||
```
|
||||
|
||||
```console
|
||||
$ docker buildx bake "git://github.com/tonistiigi/buildx#remote-test" "git://github.com/docker/cli#master" --print
|
||||
#1 [internal] load git source git://github.com/tonistiigi/buildx#remote-test
|
||||
#1 0.401 577303add004dd7efeb13434d69ea030d35f7888 refs/heads/remote-test
|
||||
$ docker buildx bake "https://github.com/tonistiigi/buildx.git#remote-test" "https://github.com/docker/cli.git#v20.10.11" --print
|
||||
#1 [internal] load git source https://github.com/tonistiigi/buildx.git#remote-test
|
||||
#1 0.429 577303add004dd7efeb13434d69ea030d35f7888 refs/heads/remote-test
|
||||
#1 CACHED
|
||||
{
|
||||
"group": {
|
||||
"default": [
|
||||
"default"
|
||||
]
|
||||
},
|
||||
"target": {
|
||||
"default": {
|
||||
"context": "git://github.com/docker/cli#master",
|
||||
"context": "https://github.com/docker/cli.git#v20.10.11",
|
||||
"dockerfile": "Dockerfile",
|
||||
"dockerfile-inline": "FROM alpine\nWORKDIR /src\nCOPY . .\nRUN ls -l \u0026\u0026 stop\n"
|
||||
}
|
||||
@@ -201,7 +195,7 @@ $ docker buildx bake "git://github.com/tonistiigi/buildx#remote-test" "git://git
|
||||
```
|
||||
|
||||
```console
|
||||
$ docker buildx bake "git://github.com/tonistiigi/buildx#remote-test" "git://github.com/docker/cli#master"
|
||||
$ docker buildx bake "https://github.com/tonistiigi/buildx.git#remote-test" "https://github.com/docker/cli.git#v20.10.11"
|
||||
...
|
||||
> [4/4] RUN ls -l && stop:
|
||||
#8 0.136 drwxrwxrwx 5 root root 4096 Jul 27 18:31 kubernetes
|
||||
@@ -229,9 +223,11 @@ format, without starting a build.
|
||||
$ docker buildx bake -f docker-bake.hcl --print db
|
||||
{
|
||||
"group": {
|
||||
"default": [
|
||||
"db"
|
||||
]
|
||||
"default": {
|
||||
"targets": [
|
||||
"db"
|
||||
]
|
||||
}
|
||||
},
|
||||
"target": {
|
||||
"db": {
|
||||
@@ -268,7 +264,6 @@ $ docker buildx bake --progress=plain
|
||||
...
|
||||
```
|
||||
|
||||
|
||||
### <a name="pull"></a> Always attempt to pull a newer version of the image (--pull)
|
||||
|
||||
Same as `build --pull`.
|
||||
@@ -282,9 +277,6 @@ Same as `build --pull`.
|
||||
Override target configurations from command line. The pattern matching syntax
|
||||
is defined in https://golang.org/pkg/path/#Match.
|
||||
|
||||
|
||||
**Examples**
|
||||
|
||||
```console
|
||||
$ docker buildx bake --set target.args.mybuildarg=value
|
||||
$ docker buildx bake --set target.platform=linux/arm64
|
||||
@@ -317,8 +309,7 @@ groups to inherit from.
|
||||
Note: Design of bake command is work in progress, the user experience may change
|
||||
based on feedback.
|
||||
|
||||
|
||||
**Example HCL definition**
|
||||
HCL definition example:
|
||||
|
||||
```hcl
|
||||
group "default" {
|
||||
@@ -343,8 +334,8 @@ target "db" {
|
||||
|
||||
Complete list of valid target fields:
|
||||
|
||||
`args`, `cache-from`, `cache-to`, `context`, `dockerfile`, `inherits`, `labels`,
|
||||
`no-cache`, `output`, `platform`, `pull`, `secrets`, `ssh`, `tags`, `target`
|
||||
`args`, `cache-from`, `cache-to`, `context`, `contexts`, `dockerfile`, `inherits`, `labels`,
|
||||
`no-cache`, `no-cache-filter`, `output`, `platform`, `pull`, `secrets`, `ssh`, `tags`, `target`
|
||||
|
||||
### Global scope attributes
|
||||
|
||||
@@ -372,9 +363,11 @@ You can use this file directly:
|
||||
$ docker buildx bake --print app
|
||||
{
|
||||
"group": {
|
||||
"default": [
|
||||
"app"
|
||||
]
|
||||
"default": {
|
||||
"targets": [
|
||||
"app"
|
||||
]
|
||||
}
|
||||
},
|
||||
"target": {
|
||||
"app": {
|
||||
@@ -402,9 +395,11 @@ And invoke bake together with both of the files:
|
||||
$ docker buildx bake -f docker-bake.hcl -f env.hcl --print app
|
||||
{
|
||||
"group": {
|
||||
"default": [
|
||||
"app"
|
||||
]
|
||||
"default": {
|
||||
"targets": [
|
||||
"app"
|
||||
]
|
||||
}
|
||||
},
|
||||
"target": {
|
||||
"app": {
|
||||
@@ -450,13 +445,37 @@ target "webapp" {
|
||||
}
|
||||
```
|
||||
|
||||
alternatively, in json format:
|
||||
|
||||
```json
|
||||
{
|
||||
"variable": {
|
||||
"TAG": {
|
||||
"default": "latest"
|
||||
}
|
||||
}
|
||||
"group": {
|
||||
"default": {
|
||||
"targets": ["webapp"]
|
||||
}
|
||||
},
|
||||
"target": {
|
||||
"webapp": {
|
||||
"tags": ["docker.io/username/webapp:${TAG}"]
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
```console
|
||||
$ docker buildx bake --print webapp
|
||||
{
|
||||
"group": {
|
||||
"default": [
|
||||
"webapp"
|
||||
]
|
||||
"default": {
|
||||
"targets": [
|
||||
"webapp"
|
||||
]
|
||||
}
|
||||
},
|
||||
"target": {
|
||||
"webapp": {
|
||||
@@ -474,9 +493,11 @@ $ docker buildx bake --print webapp
|
||||
$ TAG=$(git rev-parse --short HEAD) docker buildx bake --print webapp
|
||||
{
|
||||
"group": {
|
||||
"default": [
|
||||
"webapp"
|
||||
]
|
||||
"default": {
|
||||
"targets": [
|
||||
"webapp"
|
||||
]
|
||||
}
|
||||
},
|
||||
"target": {
|
||||
"webapp": {
|
||||
@@ -492,7 +513,7 @@ $ TAG=$(git rev-parse --short HEAD) docker buildx bake --print webapp
|
||||
|
||||
#### Using the `add` function
|
||||
|
||||
You can use [`go-cty` stdlib functions]([go-cty](https://github.com/zclconf/go-cty/tree/main/cty/function/stdlib)).
|
||||
You can use [`go-cty` stdlib functions](https://github.com/zclconf/go-cty/tree/main/cty/function/stdlib).
|
||||
Here we are using the `add` function.
|
||||
|
||||
```hcl
|
||||
@@ -516,9 +537,11 @@ target "webapp" {
|
||||
$ docker buildx bake --print webapp
|
||||
{
|
||||
"group": {
|
||||
"default": [
|
||||
"webapp"
|
||||
]
|
||||
"default": {
|
||||
"targets": [
|
||||
"webapp"
|
||||
]
|
||||
}
|
||||
},
|
||||
"target": {
|
||||
"webapp": {
|
||||
@@ -559,9 +582,11 @@ target "webapp" {
|
||||
$ docker buildx bake --print webapp
|
||||
{
|
||||
"group": {
|
||||
"default": [
|
||||
"webapp"
|
||||
]
|
||||
"default": {
|
||||
"targets": [
|
||||
"webapp"
|
||||
]
|
||||
}
|
||||
},
|
||||
"target": {
|
||||
"webapp": {
|
||||
@@ -604,9 +629,11 @@ target "webapp" {
|
||||
$ docker buildx bake --print webapp
|
||||
{
|
||||
"group": {
|
||||
"default": [
|
||||
"webapp"
|
||||
]
|
||||
"default": {
|
||||
"targets": [
|
||||
"webapp"
|
||||
]
|
||||
}
|
||||
},
|
||||
"target": {
|
||||
"webapp": {
|
||||
@@ -645,9 +672,11 @@ target "webapp" {
|
||||
$ docker buildx bake --print webapp
|
||||
{
|
||||
"group": {
|
||||
"default": [
|
||||
"webapp"
|
||||
]
|
||||
"default": {
|
||||
"targets": [
|
||||
"webapp"
|
||||
]
|
||||
}
|
||||
},
|
||||
"target": {
|
||||
"webapp": {
|
||||
@@ -700,9 +729,11 @@ target "app" {
|
||||
$ docker buildx bake -f docker-bake1.hcl -f docker-bake2.hcl --print app
|
||||
{
|
||||
"group": {
|
||||
"default": [
|
||||
"app"
|
||||
]
|
||||
"default": {
|
||||
"targets": [
|
||||
"app"
|
||||
]
|
||||
}
|
||||
},
|
||||
"target": {
|
||||
"app": {
|
||||
@@ -744,9 +775,11 @@ target "app" {
|
||||
$ docker buildx bake --print app
|
||||
{
|
||||
"group": {
|
||||
"default": [
|
||||
"app"
|
||||
]
|
||||
"default": {
|
||||
"targets": [
|
||||
"app"
|
||||
]
|
||||
}
|
||||
},
|
||||
"target": {
|
||||
"app": {
|
||||
@@ -761,6 +794,77 @@ $ docker buildx bake --print app
|
||||
}
|
||||
```
|
||||
|
||||
### Defining additional build contexts and linking targets
|
||||
|
||||
In addition to the main `context` key that defines the build context each target can also define additional named contexts with a map defined with key `contexts`. These values map to the `--build-context` flag in the [build command](buildx_build.md#build-context).
|
||||
|
||||
Inside the Dockerfile these contexts can be used with the `FROM` instruction or `--from` flag.
|
||||
|
||||
The value can be a local source directory, container image (with docker-image:// prefix), Git URL, HTTP URL or a name of another target in the Bake file (with target: prefix).
|
||||
|
||||
#### Pinning alpine image
|
||||
|
||||
```Dockerfile
|
||||
# Dockerfile
|
||||
FROM alpine
|
||||
RUN echo "Hello world"
|
||||
```
|
||||
|
||||
```hcl
|
||||
# docker-bake.hcl
|
||||
target "app" {
|
||||
contexts = {
|
||||
alpine = "docker-image://alpine:3.13"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### Using a secondary source directory
|
||||
|
||||
```Dockerfile
|
||||
# Dockerfile
|
||||
|
||||
FROM scratch AS src
|
||||
|
||||
FROM golang
|
||||
COPY --from=src . .
|
||||
```
|
||||
|
||||
```hcl
|
||||
# docker-bake.hcl
|
||||
target "app" {
|
||||
contexts = {
|
||||
src = "../path/to/source"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### Using a result of one target as a base image in another target
|
||||
|
||||
To use a result of one target as a build context of another, specity the target name with `target:` prefix.
|
||||
|
||||
```Dockerfile
|
||||
# Dockerfile
|
||||
FROM baseapp
|
||||
RUN echo "Hello world"
|
||||
```
|
||||
|
||||
```hcl
|
||||
# docker-bake.hcl
|
||||
|
||||
target "base" {
|
||||
dockerfile = "baseapp.Dockerfile"
|
||||
}
|
||||
|
||||
target "app" {
|
||||
contexts = {
|
||||
baseapp = "target:base"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Please note that in most cases you should just use a single multi-stage Dockerfile with multiple targets for similar behavior. This case is recommended when you have multiple Dockerfiles that can't be easily merged into one.
|
||||
|
||||
### Extension field with Compose
|
||||
|
||||
[Special extension](https://github.com/compose-spec/compose-spec/blob/master/spec.md#extension)
|
||||
@@ -810,6 +914,14 @@ services:
|
||||
```console
|
||||
$ docker buildx bake --print
|
||||
{
|
||||
"group": {
|
||||
"default": {
|
||||
"targets": [
|
||||
"aws",
|
||||
"addon"
|
||||
]
|
||||
}
|
||||
},
|
||||
"target": {
|
||||
"addon": {
|
||||
"context": ".",
|
||||
@@ -864,7 +976,7 @@ $ docker buildx bake --print
|
||||
Complete list of valid fields for `x-bake`:
|
||||
|
||||
`tags`, `cache-from`, `cache-to`, `secret`, `ssh`, `platforms`, `output`,
|
||||
`pull`, `no-cache`
|
||||
`pull`, `no-cache`, `no-cache-filter`
|
||||
|
||||
### Built-in variables
|
||||
|
||||
|
@@ -13,34 +13,36 @@ Start a build
|
||||
|
||||
### Options
|
||||
|
||||
| Name | Description |
|
||||
| --- | --- |
|
||||
| [`--add-host stringSlice`](https://docs.docker.com/engine/reference/commandline/build/#add-entries-to-container-hosts-file---add-host) | Add a custom host-to-IP mapping (format: `host:ip`) |
|
||||
| [`--allow stringSlice`](#allow) | Allow extra privileged entitlement (e.g., `network.host`, `security.insecure`) |
|
||||
| [`--build-arg stringArray`](https://docs.docker.com/engine/reference/commandline/build/#set-build-time-variables---build-arg) | Set build-time variables |
|
||||
| [`--builder string`](#builder) | Override the configured builder instance |
|
||||
| [`--cache-from stringArray`](#cache-from) | External cache sources (e.g., `user/app:cache`, `type=local,src=path/to/dir`) |
|
||||
| [`--cache-to stringArray`](#cache-to) | Cache export destinations (e.g., `user/app:cache`, `type=local,dest=path/to/dir`) |
|
||||
| [`--cgroup-parent string`](https://docs.docker.com/engine/reference/commandline/build/#use-a-custom-parent-cgroup---cgroup-parent) | Optional parent cgroup for the container |
|
||||
| [`-f`](https://docs.docker.com/engine/reference/commandline/build/#specify-a-dockerfile--f), [`--file string`](https://docs.docker.com/engine/reference/commandline/build/#specify-a-dockerfile--f) | Name of the Dockerfile (default: `PATH/Dockerfile`) |
|
||||
| `--iidfile string` | Write the image ID to the file |
|
||||
| `--label stringArray` | Set metadata for an image |
|
||||
| [`--load`](#load) | Shorthand for `--output=type=docker` |
|
||||
| `--metadata-file string` | Write build result metadata to the file |
|
||||
| `--network string` | Set the networking mode for the RUN instructions during build |
|
||||
| `--no-cache` | Do not use cache when building the image |
|
||||
| [`-o`](#output), [`--output stringArray`](#output) | Output destination (format: `type=local,dest=path`) |
|
||||
| [`--platform stringArray`](#platform) | Set target platform for build |
|
||||
| [`--progress string`](#progress) | Set type of progress output (`auto`, `plain`, `tty`). Use plain to show container output |
|
||||
| `--pull` | Always attempt to pull a newer version of the image |
|
||||
| [`--push`](#push) | Shorthand for `--output=type=registry` |
|
||||
| `-q`, `--quiet` | Suppress the build output and print image ID on success |
|
||||
| `--secret stringArray` | Secret file to expose to the build (format: `id=mysecret,src=/local/secret`) |
|
||||
| [`--shm-size bytes`](#shm-size) | Size of `/dev/shm` |
|
||||
| `--ssh stringArray` | SSH agent socket or keys to expose to the build (format: `default\|<id>[=<socket>\|<key>[,<key>]]`) |
|
||||
| [`-t`](https://docs.docker.com/engine/reference/commandline/build/#tag-an-image--t), [`--tag stringArray`](https://docs.docker.com/engine/reference/commandline/build/#tag-an-image--t) | Name and optionally a tag (format: `name:tag`) |
|
||||
| [`--target string`](https://docs.docker.com/engine/reference/commandline/build/#specifying-target-build-stage---target) | Set the target build stage to build. |
|
||||
| [`--ulimit ulimit`](#ulimit) | Ulimit options |
|
||||
| Name | Type | Default | Description |
|
||||
| --- | --- | --- | --- |
|
||||
| [`--add-host`](https://docs.docker.com/engine/reference/commandline/build/#add-entries-to-container-hosts-file---add-host) | `stringSlice` | | Add a custom host-to-IP mapping (format: `host:ip`) |
|
||||
| [`--allow`](#allow) | `stringSlice` | | Allow extra privileged entitlement (e.g., `network.host`, `security.insecure`) |
|
||||
| [`--build-arg`](#build-arg) | `stringArray` | | Set build-time variables |
|
||||
| [`--build-context`](#build-context) | `stringArray` | | Additional build contexts (e.g., name=path) |
|
||||
| [`--builder`](#builder) | `string` | | Override the configured builder instance |
|
||||
| [`--cache-from`](#cache-from) | `stringArray` | | External cache sources (e.g., `user/app:cache`, `type=local,src=path/to/dir`) |
|
||||
| [`--cache-to`](#cache-to) | `stringArray` | | Cache export destinations (e.g., `user/app:cache`, `type=local,dest=path/to/dir`) |
|
||||
| [`--cgroup-parent`](https://docs.docker.com/engine/reference/commandline/build/#use-a-custom-parent-cgroup---cgroup-parent) | `string` | | Optional parent cgroup for the container |
|
||||
| [`-f`](https://docs.docker.com/engine/reference/commandline/build/#specify-a-dockerfile--f), [`--file`](https://docs.docker.com/engine/reference/commandline/build/#specify-a-dockerfile--f) | `string` | | Name of the Dockerfile (default: `PATH/Dockerfile`) |
|
||||
| `--iidfile` | `string` | | Write the image ID to the file |
|
||||
| `--label` | `stringArray` | | Set metadata for an image |
|
||||
| [`--load`](#load) | | | Shorthand for `--output=type=docker` |
|
||||
| [`--metadata-file`](#metadata-file) | `string` | | Write build result metadata to the file |
|
||||
| `--network` | `string` | `default` | Set the networking mode for the `RUN` instructions during build |
|
||||
| `--no-cache` | | | Do not use cache when building the image |
|
||||
| `--no-cache-filter` | `stringArray` | | Do not cache specified stages |
|
||||
| [`-o`](#output), [`--output`](#output) | `stringArray` | | Output destination (format: `type=local,dest=path`) |
|
||||
| [`--platform`](#platform) | `stringArray` | | Set target platform for build |
|
||||
| [`--progress`](#progress) | `string` | `auto` | Set type of progress output (`auto`, `plain`, `tty`). Use plain to show container output |
|
||||
| `--pull` | | | Always attempt to pull all referenced images |
|
||||
| [`--push`](#push) | | | Shorthand for `--output=type=registry` |
|
||||
| `-q`, `--quiet` | | | Suppress the build output and print image ID on success |
|
||||
| [`--secret`](#secret) | `stringArray` | | Secret to expose to the build (format: `id=mysecret[,src=/local/secret]`) |
|
||||
| [`--shm-size`](#shm-size) | `bytes` | `0` | Size of `/dev/shm` |
|
||||
| [`--ssh`](#ssh) | `stringArray` | | SSH agent socket or keys to expose to the build (format: `default\|<id>[=<socket>\|<key>[,<key>]]`) |
|
||||
| [`-t`](https://docs.docker.com/engine/reference/commandline/build/#tag-an-image--t), [`--tag`](https://docs.docker.com/engine/reference/commandline/build/#tag-an-image--t) | `stringArray` | | Name and optionally a tag (format: `name:tag`) |
|
||||
| [`--target`](https://docs.docker.com/engine/reference/commandline/build/#specifying-target-build-stage---target) | `string` | | Set the target build stage to build |
|
||||
| [`--ulimit`](#ulimit) | `ulimit` | | Ulimit options |
|
||||
|
||||
|
||||
<!---MARKER_GEN_END-->
|
||||
@@ -52,80 +54,200 @@ to the UI of `docker build` command and takes the same flags and arguments.
|
||||
|
||||
For documentation on most of these flags, refer to the [`docker build`
|
||||
documentation](https://docs.docker.com/engine/reference/commandline/build/). In
|
||||
here we’ll document a subset of the new flags.
|
||||
here we'll document a subset of the new flags.
|
||||
|
||||
## Examples
|
||||
|
||||
### <a name="allow"></a> Allow extra privileged entitlement (--allow)
|
||||
|
||||
```
|
||||
--allow=ENTITLEMENT
|
||||
```
|
||||
|
||||
Allow extra privileged entitlement. List of entitlements:
|
||||
|
||||
- `network.host` - Allows executions with host networking.
|
||||
- `security.insecure` - Allows executions without sandbox. See
|
||||
[related Dockerfile extensions](https://github.com/moby/buildkit/blob/master/frontend/dockerfile/docs/experimental.md#run---securityinsecuresandbox).
|
||||
|
||||
For entitlements to be enabled, the `buildkitd` daemon also needs to allow them
|
||||
with `--allow-insecure-entitlement` (see [`create --buildkitd-flags`](buildx_create.md#buildkitd-flags))
|
||||
|
||||
**Examples**
|
||||
|
||||
```console
|
||||
$ docker buildx create --use --name insecure-builder --buildkitd-flags '--allow-insecure-entitlement security.insecure'
|
||||
$ docker buildx build --allow security.insecure .
|
||||
```
|
||||
|
||||
### <a name="build-arg"></a> Set build-time variables (--build-arg)
|
||||
|
||||
Same as [`docker build` command](https://docs.docker.com/engine/reference/commandline/build/#set-build-time-variables---build-arg).
|
||||
|
||||
There are also useful built-in build args like:
|
||||
|
||||
* `BUILDKIT_CONTEXT_KEEP_GIT_DIR=<bool>` trigger git context to keep the `.git` directory
|
||||
* `BUILDKIT_INLINE_BUILDINFO_ATTRS=<bool>` inline build info attributes in image config or not
|
||||
* `BUILDKIT_INLINE_CACHE=<bool>` inline cache metadata to image config or not
|
||||
* `BUILDKIT_MULTI_PLATFORM=<bool>` opt into determnistic output regardless of multi-platform output or not
|
||||
|
||||
```console
|
||||
$ docker buildx build --build-arg BUILDKIT_MULTI_PLATFORM=1 .
|
||||
```
|
||||
|
||||
More built-in build args can be found in [dockerfile frontend docs](https://github.com/moby/buildkit/blob/master/frontend/dockerfile/docs/syntax.md#built-in-build-args).
|
||||
|
||||
### <a name="build-context"></a> Additional build contexts (--build-context)
|
||||
|
||||
```
|
||||
--build-context=name=VALUE
|
||||
```
|
||||
|
||||
Define additional build context with specified contents. In Dockerfile the context can be accessed when `FROM name` or `--from=name` is used.
|
||||
When Dockerfile defines a stage with the same name it is overwritten.
|
||||
|
||||
The value can be a local source directory, container image (with docker-image:// prefix), Git or HTTP URL.
|
||||
|
||||
Replace `alpine:latest` with a pinned one:
|
||||
|
||||
```console
|
||||
$ docker buildx build --build-context alpine=docker-image://alpine@sha256:0123456789 .
|
||||
```
|
||||
|
||||
Expose a secondary local source directory:
|
||||
|
||||
```console
|
||||
$ docker buildx build --build-context project=path/to/project/source .
|
||||
# docker buildx build --build-context project=https://github.com/myuser/project.git .
|
||||
```
|
||||
|
||||
```Dockerfile
|
||||
FROM alpine
|
||||
COPY --from=project myfile /
|
||||
```
|
||||
|
||||
### <a name="builder"></a> Override the configured builder instance (--builder)
|
||||
|
||||
Same as [`buildx --builder`](buildx.md#builder).
|
||||
|
||||
### <a name="platform"></a> Set the target platforms for the build (--platform)
|
||||
### <a name="cache-from"></a> Use an external cache source for a build (--cache-from)
|
||||
|
||||
```
|
||||
--platform=value[,value]
|
||||
--cache-from=[NAME|type=TYPE[,KEY=VALUE]]
|
||||
```
|
||||
|
||||
Set the target platform for the build. All `FROM` commands inside the Dockerfile
|
||||
without their own `--platform` flag will pull base images for this platform and
|
||||
this value will also be the platform of the resulting image. The default value
|
||||
will be the current platform of the buildkit daemon.
|
||||
Use an external cache source for a build. Supported types are `registry`,
|
||||
`local` and `gha`.
|
||||
|
||||
When using `docker-container` driver with `buildx`, this flag can accept multiple
|
||||
values as an input separated by a comma. With multiple values the result will be
|
||||
built for all of the specified platforms and joined together into a single manifest
|
||||
list.
|
||||
- [`registry` source](https://github.com/moby/buildkit#registry-push-image-and-cache-separately)
|
||||
can import cache from a cache manifest or (special) image configuration on the
|
||||
registry.
|
||||
- [`local` source](https://github.com/moby/buildkit#local-directory-1) can
|
||||
import cache from local files previously exported with `--cache-to`.
|
||||
- [`gha` source](https://github.com/moby/buildkit#github-actions-cache-experimental)
|
||||
can import cache from a previously exported cache with `--cache-to` in your
|
||||
GitHub repository
|
||||
|
||||
If the `Dockerfile` needs to invoke the `RUN` command, the builder needs runtime
|
||||
support for the specified platform. In a clean setup, you can only execute `RUN`
|
||||
commands for your system architecture.
|
||||
If your kernel supports [`binfmt_misc`](https://en.wikipedia.org/wiki/Binfmt_misc)
|
||||
launchers for secondary architectures, buildx will pick them up automatically.
|
||||
Docker desktop releases come with `binfmt_misc` automatically configured for `arm64`
|
||||
and `arm` architectures. You can see what runtime platforms your current builder
|
||||
instance supports by running `docker buildx inspect --bootstrap`.
|
||||
If no type is specified, `registry` exporter is used with a specified reference.
|
||||
|
||||
Inside a `Dockerfile`, you can access the current platform value through
|
||||
`TARGETPLATFORM` build argument. Please refer to the [`docker build`
|
||||
documentation](https://docs.docker.com/engine/reference/builder/#automatic-platform-args-in-the-global-scope)
|
||||
for the full description of automatic platform argument variants .
|
||||
|
||||
The formatting for the platform specifier is defined in the [containerd source
|
||||
code](https://github.com/containerd/containerd/blob/v1.4.3/platforms/platforms.go#L63).
|
||||
|
||||
**Examples**
|
||||
`docker` driver currently only supports importing build cache from the registry.
|
||||
|
||||
```console
|
||||
$ docker buildx build --platform=linux/arm64 .
|
||||
$ docker buildx build --platform=linux/amd64,linux/arm64,linux/arm/v7 .
|
||||
$ docker buildx build --platform=darwin .
|
||||
$ docker buildx build --cache-from=user/app:cache .
|
||||
$ docker buildx build --cache-from=user/app .
|
||||
$ docker buildx build --cache-from=type=registry,ref=user/app .
|
||||
$ docker buildx build --cache-from=type=local,src=path/to/cache .
|
||||
$ docker buildx build --cache-from=type=gha .
|
||||
```
|
||||
|
||||
### <a name="progress"></a> Set type of progress output (--progress)
|
||||
More info about cache exporters and available attributes: https://github.com/moby/buildkit#export-cache
|
||||
|
||||
### <a name="cache-to"></a> Export build cache to an external cache destination (--cache-to)
|
||||
|
||||
```
|
||||
--progress=VALUE
|
||||
--cache-to=[NAME|type=TYPE[,KEY=VALUE]]
|
||||
```
|
||||
|
||||
Set type of progress output (auto, plain, tty). Use plain to show container
|
||||
output (default "auto").
|
||||
Export build cache to an external cache destination. Supported types are
|
||||
`registry`, `local`, `inline` and `gha`.
|
||||
|
||||
> You can also use the `BUILDKIT_PROGRESS` environment variable to set
|
||||
> its value.
|
||||
- [`registry` type](https://github.com/moby/buildkit#registry-push-image-and-cache-separately) exports build cache to a cache manifest in the registry.
|
||||
- [`local` type](https://github.com/moby/buildkit#local-directory-1) type
|
||||
exports cache to a local directory on the client.
|
||||
- [`inline` type](https://github.com/moby/buildkit#inline-push-image-and-cache-together)
|
||||
type writes the cache metadata into the image configuration.
|
||||
- [`gha` type](https://github.com/moby/buildkit#github-actions-cache-experimental)
|
||||
type exports cache through the [Github Actions Cache service API](https://github.com/tonistiigi/go-actions-cache/blob/master/api.md#authentication).
|
||||
|
||||
The following example uses `plain` output during the build:
|
||||
`docker` driver currently only supports exporting inline cache metadata to image
|
||||
configuration. Alternatively, `--build-arg BUILDKIT_INLINE_CACHE=1` can be used
|
||||
to trigger inline cache exporter.
|
||||
|
||||
Attribute key:
|
||||
|
||||
- `mode` - Specifies how many layers are exported with the cache. `min` on only
|
||||
exports layers already in the final build stage, `max` exports layers for
|
||||
all stages. Metadata is always exported for the whole build.
|
||||
|
||||
```console
|
||||
$ docker buildx build --load --progress=plain .
|
||||
$ docker buildx build --cache-to=user/app:cache .
|
||||
$ docker buildx build --cache-to=type=inline .
|
||||
$ docker buildx build --cache-to=type=registry,ref=user/app .
|
||||
$ docker buildx build --cache-to=type=local,dest=path/to/cache .
|
||||
$ docker buildx build --cache-to=type=gha .
|
||||
```
|
||||
|
||||
#1 [internal] load build definition from Dockerfile
|
||||
#1 transferring dockerfile: 227B 0.0s done
|
||||
#1 DONE 0.1s
|
||||
More info about cache exporters and available attributes: https://github.com/moby/buildkit#export-cache
|
||||
|
||||
#2 [internal] load .dockerignore
|
||||
#2 transferring context: 129B 0.0s done
|
||||
#2 DONE 0.0s
|
||||
...
|
||||
### <a name="load"></a> Load the single-platform build result to `docker images` (--load)
|
||||
|
||||
Shorthand for [`--output=type=docker`](#docker). Will automatically load the
|
||||
single-platform build result to `docker images`.
|
||||
|
||||
### <a name="metadata-file"></a> Write build result metadata to the file (--metadata-file)
|
||||
|
||||
To output build metadata such as the image digest, pass the `--metadata-file` flag.
|
||||
The metadata will be written as a JSON object to the specified file. The
|
||||
directory of the specified file must already exist and be writable.
|
||||
|
||||
```console
|
||||
$ docker buildx build --load --metadata-file metadata.json .
|
||||
$ cat metadata.json
|
||||
```
|
||||
```json
|
||||
{
|
||||
"containerimage.buildinfo": {
|
||||
"frontend": "dockerfile.v0",
|
||||
"attrs": {
|
||||
"context": "https://github.com/crazy-max/buildkit-buildsources-test.git#master",
|
||||
"filename": "Dockerfile",
|
||||
"source": "docker/dockerfile:master"
|
||||
},
|
||||
"sources": [
|
||||
{
|
||||
"type": "docker-image",
|
||||
"ref": "docker.io/docker/buildx-bin:0.6.1@sha256:a652ced4a4141977c7daaed0a074dcd9844a78d7d2615465b12f433ae6dd29f0",
|
||||
"pin": "sha256:a652ced4a4141977c7daaed0a074dcd9844a78d7d2615465b12f433ae6dd29f0"
|
||||
},
|
||||
{
|
||||
"type": "docker-image",
|
||||
"ref": "docker.io/library/alpine:3.13",
|
||||
"pin": "sha256:026f721af4cf2843e07bba648e158fb35ecc876d822130633cc49f707f0fc88c"
|
||||
}
|
||||
]
|
||||
},
|
||||
"containerimage.config.digest": "sha256:2937f66a9722f7f4a2df583de2f8cb97fc9196059a410e7f00072fc918930e66",
|
||||
"containerimage.descriptor": {
|
||||
"annotations": {
|
||||
"config.digest": "sha256:2937f66a9722f7f4a2df583de2f8cb97fc9196059a410e7f00072fc918930e66",
|
||||
"org.opencontainers.image.created": "2022-02-08T21:28:03Z"
|
||||
},
|
||||
"digest": "sha256:19ffeab6f8bc9293ac2c3fdf94ebe28396254c993aea0b5a542cfb02e0883fa3",
|
||||
"mediaType": "application/vnd.oci.image.manifest.v1+json",
|
||||
"size": 506
|
||||
},
|
||||
"containerimage.digest": "sha256:19ffeab6f8bc9293ac2c3fdf94ebe28396254c993aea0b5a542cfb02e0883fa3"
|
||||
}
|
||||
```
|
||||
|
||||
### <a name="output"></a> Set the export action for the build result (-o, --output)
|
||||
@@ -146,8 +268,6 @@ If just the path is specified as a value, `buildx` will use the local exporter
|
||||
with this path as the destination. If the value is "-", `buildx` will use `tar`
|
||||
exporter and write to `stdout`.
|
||||
|
||||
**Examples**
|
||||
|
||||
```console
|
||||
$ docker buildx build -o . .
|
||||
$ docker buildx build -o outdir .
|
||||
@@ -201,7 +321,7 @@ The most common usecase for multi-platform images is to directly push to a regis
|
||||
Attribute keys:
|
||||
|
||||
- `dest` - destination path where tarball will be written. If not specified the
|
||||
tar will be loaded automatically to the current docker instance.
|
||||
tar will be loaded automatically to the current docker instance.
|
||||
- `context` - name for the docker context where to import the result
|
||||
|
||||
#### `image`
|
||||
@@ -219,118 +339,163 @@ Attribute keys:
|
||||
|
||||
The `registry` exporter is a shortcut for `type=image,push=true`.
|
||||
|
||||
### <a name="platform"></a> Set the target platforms for the build (--platform)
|
||||
|
||||
```
|
||||
--platform=value[,value]
|
||||
```
|
||||
|
||||
Set the target platform for the build. All `FROM` commands inside the Dockerfile
|
||||
without their own `--platform` flag will pull base images for this platform and
|
||||
this value will also be the platform of the resulting image. The default value
|
||||
will be the current platform of the buildkit daemon.
|
||||
|
||||
When using `docker-container` driver with `buildx`, this flag can accept multiple
|
||||
values as an input separated by a comma. With multiple values the result will be
|
||||
built for all of the specified platforms and joined together into a single manifest
|
||||
list.
|
||||
|
||||
If the `Dockerfile` needs to invoke the `RUN` command, the builder needs runtime
|
||||
support for the specified platform. In a clean setup, you can only execute `RUN`
|
||||
commands for your system architecture.
|
||||
If your kernel supports [`binfmt_misc`](https://en.wikipedia.org/wiki/Binfmt_misc)
|
||||
launchers for secondary architectures, buildx will pick them up automatically.
|
||||
Docker desktop releases come with `binfmt_misc` automatically configured for `arm64`
|
||||
and `arm` architectures. You can see what runtime platforms your current builder
|
||||
instance supports by running `docker buildx inspect --bootstrap`.
|
||||
|
||||
Inside a `Dockerfile`, you can access the current platform value through
|
||||
`TARGETPLATFORM` build argument. Please refer to the [`docker build`
|
||||
documentation](https://docs.docker.com/engine/reference/builder/#automatic-platform-args-in-the-global-scope)
|
||||
for the full description of automatic platform argument variants .
|
||||
|
||||
The formatting for the platform specifier is defined in the [containerd source
|
||||
code](https://github.com/containerd/containerd/blob/v1.4.3/platforms/platforms.go#L63).
|
||||
|
||||
```console
|
||||
$ docker buildx build --platform=linux/arm64 .
|
||||
$ docker buildx build --platform=linux/amd64,linux/arm64,linux/arm/v7 .
|
||||
$ docker buildx build --platform=darwin .
|
||||
```
|
||||
|
||||
### <a name="progress"></a> Set type of progress output (--progress)
|
||||
|
||||
```
|
||||
--progress=VALUE
|
||||
```
|
||||
|
||||
Set type of progress output (auto, plain, tty). Use plain to show container
|
||||
output (default "auto").
|
||||
|
||||
> You can also use the `BUILDKIT_PROGRESS` environment variable to set
|
||||
> its value.
|
||||
|
||||
The following example uses `plain` output during the build:
|
||||
|
||||
```console
|
||||
$ docker buildx build --load --progress=plain .
|
||||
|
||||
#1 [internal] load build definition from Dockerfile
|
||||
#1 transferring dockerfile: 227B 0.0s done
|
||||
#1 DONE 0.1s
|
||||
|
||||
#2 [internal] load .dockerignore
|
||||
#2 transferring context: 129B 0.0s done
|
||||
#2 DONE 0.0s
|
||||
...
|
||||
```
|
||||
|
||||
### <a name="push"></a> Push the build result to a registry (--push)
|
||||
|
||||
Shorthand for [`--output=type=registry`](#registry). Will automatically push the
|
||||
build result to registry.
|
||||
|
||||
### <a name="load"></a> Load the single-platform build result to `docker images` (--load)
|
||||
|
||||
Shorthand for [`--output=type=docker`](#docker). Will automatically load the
|
||||
single-platform build result to `docker images`.
|
||||
|
||||
### <a name="cache-from"></a> Use an external cache source for a build (--cache-from)
|
||||
### <a name="secret"></a> Secret to expose to the build (--secret)
|
||||
|
||||
```
|
||||
--cache-from=[NAME|type=TYPE[,KEY=VALUE]]
|
||||
--secret=[type=TYPE[,KEY=VALUE]
|
||||
```
|
||||
|
||||
Use an external cache source for a build. Supported types are `registry`,
|
||||
`local` and `gha`.
|
||||
Exposes secret to the build. The secret can be used by the build using
|
||||
[`RUN --mount=type=secret` mount](https://github.com/moby/buildkit/blob/master/frontend/dockerfile/docs/syntax.md#run---mounttypesecret).
|
||||
|
||||
- [`registry` source](https://github.com/moby/buildkit#registry-push-image-and-cache-separately)
|
||||
can import cache from a cache manifest or (special) image configuration on the
|
||||
registry.
|
||||
- [`local` source](https://github.com/moby/buildkit#local-directory-1) can
|
||||
import cache from local files previously exported with `--cache-to`.
|
||||
- [`gha` source](https://github.com/moby/buildkit#github-actions-cache-experimental)
|
||||
can import cache from a previously exported cache with `--cache-to` in your
|
||||
GitHub repository
|
||||
If `type` is unset it will be detected. Supported types are:
|
||||
|
||||
If no type is specified, `registry` exporter is used with a specified reference.
|
||||
#### `file`
|
||||
|
||||
`docker` driver currently only supports importing build cache from the registry.
|
||||
Attribute keys:
|
||||
|
||||
**Examples**
|
||||
- `id` - ID of the secret. Defaults to basename of the `src` path.
|
||||
- `src`, `source` - Secret filename. `id` used if unset.
|
||||
|
||||
```dockerfile
|
||||
# syntax=docker/dockerfile:1.3
|
||||
FROM python:3
|
||||
RUN pip install awscli
|
||||
RUN --mount=type=secret,id=aws,target=/root/.aws/credentials \
|
||||
aws s3 cp s3://... ...
|
||||
```
|
||||
|
||||
```console
|
||||
$ docker buildx build --cache-from=user/app:cache .
|
||||
$ docker buildx build --cache-from=user/app .
|
||||
$ docker buildx build --cache-from=type=registry,ref=user/app .
|
||||
$ docker buildx build --cache-from=type=local,src=path/to/cache .
|
||||
$ docker buildx build --cache-from=type=gha .
|
||||
$ docker buildx build --secret id=aws,src=$HOME/.aws/credentials .
|
||||
```
|
||||
|
||||
More info about cache exporters and available attributes: https://github.com/moby/buildkit#export-cache
|
||||
#### `env`
|
||||
|
||||
### <a name="cache-to"></a> Export build cache to an external cache destination (--cache-to)
|
||||
Attribute keys:
|
||||
|
||||
- `id` - ID of the secret. Defaults to `env` name.
|
||||
- `env` - Secret environment variable. `id` used if unset, otherwise will look for `src`, `source` if `id` unset.
|
||||
|
||||
```dockerfile
|
||||
# syntax=docker/dockerfile:1.3
|
||||
FROM node:alpine
|
||||
RUN --mount=type=bind,target=. \
|
||||
--mount=type=secret,id=SECRET_TOKEN \
|
||||
SECRET_TOKEN=$(cat /run/secrets/SECRET_TOKEN) yarn run test
|
||||
```
|
||||
--cache-to=[NAME|type=TYPE[,KEY=VALUE]]
|
||||
```
|
||||
|
||||
Export build cache to an external cache destination. Supported types are
|
||||
`registry`, `local`, `inline` and `gha`.
|
||||
|
||||
- [`registry` type](https://github.com/moby/buildkit#registry-push-image-and-cache-separately) exports build cache to a cache manifest in the registry.
|
||||
- [`local` type](https://github.com/moby/buildkit#local-directory-1) type
|
||||
exports cache to a local directory on the client.
|
||||
- [`inline` type](https://github.com/moby/buildkit#inline-push-image-and-cache-together)
|
||||
type writes the cache metadata into the image configuration.
|
||||
- [`gha` type](https://github.com/moby/buildkit#github-actions-cache-experimental)
|
||||
type exports cache through the [Github Actions Cache service API](https://github.com/tonistiigi/go-actions-cache/blob/master/api.md#authentication).
|
||||
|
||||
`docker` driver currently only supports exporting inline cache metadata to image
|
||||
configuration. Alternatively, `--build-arg BUILDKIT_INLINE_CACHE=1` can be used
|
||||
to trigger inline cache exporter.
|
||||
|
||||
Attribute key:
|
||||
|
||||
- `mode` - Specifies how many layers are exported with the cache. `min` on only
|
||||
exports layers already in the final build stage, `max` exports layers for
|
||||
all stages. Metadata is always exported for the whole build.
|
||||
|
||||
**Examples**
|
||||
|
||||
```console
|
||||
$ docker buildx build --cache-to=user/app:cache .
|
||||
$ docker buildx build --cache-to=type=inline .
|
||||
$ docker buildx build --cache-to=type=registry,ref=user/app .
|
||||
$ docker buildx build --cache-to=type=local,dest=path/to/cache .
|
||||
$ docker buildx build --cache-to=type=gha .
|
||||
$ SECRET_TOKEN=token docker buildx build --secret id=SECRET_TOKEN .
|
||||
```
|
||||
|
||||
More info about cache exporters and available attributes: https://github.com/moby/buildkit#export-cache
|
||||
|
||||
### <a name="allow"></a> Allow extra privileged entitlement (--allow)
|
||||
|
||||
```
|
||||
--allow=ENTITLEMENT
|
||||
```
|
||||
|
||||
Allow extra privileged entitlement. List of entitlements:
|
||||
|
||||
- `network.host` - Allows executions with host networking.
|
||||
- `security.insecure` - Allows executions without sandbox. See
|
||||
[related Dockerfile extensions](https://github.com/moby/buildkit/blob/master/frontend/dockerfile/docs/experimental.md#run---securityinsecuresandbox).
|
||||
|
||||
For entitlements to be enabled, the `buildkitd` daemon also needs to allow them
|
||||
with `--allow-insecure-entitlement` (see [`create --buildkitd-flags`](buildx_create.md#buildkitd-flags))
|
||||
|
||||
**Examples**
|
||||
|
||||
```console
|
||||
$ docker buildx create --use --name insecure-builder --buildkitd-flags '--allow-insecure-entitlement security.insecure'
|
||||
$ docker buildx build --allow security.insecure .
|
||||
```
|
||||
|
||||
### <a name="shm-size"></a> Size of `/dev/shm` (--shm-size)
|
||||
### <a name="shm-size"></a> Size of /dev/shm (--shm-size)
|
||||
|
||||
The format is `<number><unit>`. `number` must be greater than `0`. Unit is
|
||||
optional and can be `b` (bytes), `k` (kilobytes), `m` (megabytes), or `g`
|
||||
(gigabytes). If you omit the unit, the system uses bytes.
|
||||
|
||||
### <a name="ssh"></a> SSH agent socket or keys to expose to the build (--ssh)
|
||||
|
||||
```
|
||||
--ssh=default|<id>[=<socket>|<key>[,<key>]]
|
||||
```
|
||||
|
||||
This can be useful when some commands in your Dockerfile need specific SSH
|
||||
authentication (e.g., cloning a private repository).
|
||||
|
||||
`--ssh` exposes SSH agent socket or keys to the build and can be used with the
|
||||
[`RUN --mount=type=ssh` mount](https://github.com/moby/buildkit/blob/master/frontend/dockerfile/docs/syntax.md#run---mounttypessh).
|
||||
|
||||
Example to access Gitlab using an SSH agent socket:
|
||||
|
||||
```dockerfile
|
||||
# syntax=docker/dockerfile:1.3
|
||||
FROM alpine
|
||||
RUN apk add --no-cache openssh-client
|
||||
RUN mkdir -p -m 0700 ~/.ssh && ssh-keyscan gitlab.com >> ~/.ssh/known_hosts
|
||||
RUN --mount=type=ssh ssh -q -T git@gitlab.com 2>&1 | tee /hello
|
||||
# "Welcome to GitLab, @GITLAB_USERNAME_ASSOCIATED_WITH_SSHKEY" should be printed here
|
||||
# with the type of build progress is defined as `plain`.
|
||||
```
|
||||
|
||||
```console
|
||||
$ eval $(ssh-agent)
|
||||
$ ssh-add ~/.ssh/id_rsa
|
||||
(Input your passphrase here)
|
||||
$ docker buildx build --ssh default=$SSH_AUTH_SOCK .
|
||||
```
|
||||
|
||||
### <a name="ulimit"></a> Set ulimits (--ulimit)
|
||||
|
||||
`--ulimit` is specified with a soft and hard limit as such:
|
||||
|
@@ -9,19 +9,19 @@ Create a new builder instance
|
||||
|
||||
### Options
|
||||
|
||||
| Name | Description |
|
||||
| --- | --- |
|
||||
| [`--append`](#append) | Append a node to builder instead of changing it |
|
||||
| `--bootstrap` | Boot builder after creation |
|
||||
| [`--buildkitd-flags string`](#buildkitd-flags) | Flags for buildkitd daemon |
|
||||
| [`--config string`](#config) | BuildKit config file |
|
||||
| [`--driver string`](#driver) | Driver to use (available: `docker`, `docker-container`, `kubernetes`) |
|
||||
| [`--driver-opt stringArray`](#driver-opt) | Options for the driver |
|
||||
| [`--leave`](#leave) | Remove a node from builder instead of changing it |
|
||||
| [`--name string`](#name) | Builder instance name |
|
||||
| [`--node string`](#node) | Create/modify node with given name |
|
||||
| [`--platform stringArray`](#platform) | Fixed platforms for current node |
|
||||
| [`--use`](#use) | Set the current builder instance |
|
||||
| Name | Type | Default | Description |
|
||||
| --- | --- | --- | --- |
|
||||
| [`--append`](#append) | | | Append a node to builder instead of changing it |
|
||||
| `--bootstrap` | | | Boot builder after creation |
|
||||
| [`--buildkitd-flags`](#buildkitd-flags) | `string` | | Flags for buildkitd daemon |
|
||||
| [`--config`](#config) | `string` | | BuildKit config file |
|
||||
| [`--driver`](#driver) | `string` | | Driver to use (available: `docker`, `docker-container`, `kubernetes`) |
|
||||
| [`--driver-opt`](#driver-opt) | `stringArray` | | Options for the driver |
|
||||
| [`--leave`](#leave) | | | Remove a node from builder instead of changing it |
|
||||
| [`--name`](#name) | `string` | | Builder instance name |
|
||||
| [`--node`](#node) | `string` | | Create/modify node with given name |
|
||||
| [`--platform`](#platform) | `stringArray` | | Fixed platforms for current node |
|
||||
| [`--use`](#use) | | | Set the current builder instance |
|
||||
|
||||
|
||||
<!---MARKER_GEN_END-->
|
||||
@@ -47,8 +47,6 @@ The `--append` flag changes the action of the command to append a new node to an
|
||||
existing builder specified by `--name`. Buildx will choose an appropriate node
|
||||
for a build based on the platforms it supports.
|
||||
|
||||
**Examples**
|
||||
|
||||
```console
|
||||
$ docker buildx create mycontext1
|
||||
eager_beaver
|
||||
@@ -67,8 +65,6 @@ Adds flags when starting the buildkitd daemon. They take precedence over the
|
||||
configuration file specified by [`--config`](#config). See `buildkitd --help`
|
||||
for the available flags.
|
||||
|
||||
**Example**
|
||||
|
||||
```
|
||||
--buildkitd-flags '--debug --debugaddr 0.0.0.0:6666'
|
||||
```
|
||||
@@ -132,46 +128,22 @@ Passes additional driver-specific options. Details for each driver:
|
||||
|
||||
- `docker` - No driver options
|
||||
- `docker-container`
|
||||
- `image=IMAGE` - Sets the container image to be used for running buildkit.
|
||||
- `network=NETMODE` - Sets the network mode for running the buildkit container.
|
||||
- `cgroup-parent=CGROUP` - Sets the cgroup parent of the buildkit container if docker is using the "cgroupfs" driver. Defaults to `/docker/buildx`.
|
||||
- `image=IMAGE` - Sets the container image to be used for running buildkit.
|
||||
- `network=NETMODE` - Sets the network mode for running the buildkit container.
|
||||
- `cgroup-parent=CGROUP` - Sets the cgroup parent of the buildkit container if docker is using the "cgroupfs" driver. Defaults to `/docker/buildx`.
|
||||
- `kubernetes`
|
||||
- `image=IMAGE` - Sets the container image to be used for running buildkit.
|
||||
- `namespace=NS` - Sets the Kubernetes namespace. Defaults to the current namespace.
|
||||
- `replicas=N` - Sets the number of `Pod` replicas. Defaults to 1.
|
||||
- `requests.cpu` - Sets the request CPU value specified in units of Kubernetes CPU. Example `requests.cpu=100m`, `requests.cpu=2`
|
||||
- `requests.memory` - Sets the request memory value specified in bytes or with a valid suffix. Example `requests.memory=500Mi`, `requests.memory=4G`
|
||||
- `limits.cpu` - Sets the limit CPU value specified in units of Kubernetes CPU. Example `limits.cpu=100m`, `limits.cpu=2`
|
||||
- `limits.memory` - Sets the limit memory value specified in bytes or with a valid suffix. Example `limits.memory=500Mi`, `limits.memory=4G`
|
||||
- `nodeselector="label1=value1,label2=value2"` - Sets the kv of `Pod` nodeSelector. No Defaults. Example `nodeselector=kubernetes.io/arch=arm64`
|
||||
- `rootless=(true|false)` - Run the container as a non-root user without `securityContext.privileged`. [Using Ubuntu host kernel is recommended](https://github.com/moby/buildkit/blob/master/docs/rootless.md). Defaults to false.
|
||||
- `loadbalance=(sticky|random)` - Load-balancing strategy. If set to "sticky", the pod is chosen using the hash of the context path. Defaults to "sticky"
|
||||
- `qemu.install=(true|false)` - Install QEMU emulation for multi platforms support.
|
||||
- `qemu.image=IMAGE` - Sets the QEMU emulation image. Defaults to `tonistiigi/binfmt:latest`
|
||||
|
||||
**Examples**
|
||||
|
||||
#### Use a custom network
|
||||
|
||||
```console
|
||||
$ docker network create foonet
|
||||
$ docker buildx create --name builder --driver docker-container --driver-opt network=foonet --use
|
||||
$ docker buildx inspect --bootstrap
|
||||
$ docker inspect buildx_buildkit_builder0 --format={{.NetworkSettings.Networks}}
|
||||
map[foonet:0xc00018c0c0]
|
||||
```
|
||||
|
||||
#### OpenTelemetry support
|
||||
|
||||
To capture the trace to [Jaeger](https://github.com/jaegertracing/jaeger), set
|
||||
`JAEGER_TRACE` environment variable to the collection address using the `driver-opt`:
|
||||
|
||||
```console
|
||||
$ docker run -d --name jaeger -p 6831:6831/udp -p 16686:16686 jaegertracing/all-in-one
|
||||
$ docker buildx create --name builder --driver docker-container --driver-opt network=host --driver-opt env.JAEGER_TRACE=localhost:6831 --use
|
||||
$ docker buildx inspect --bootstrap
|
||||
# buildx command should be traced at http://127.0.0.1:16686/
|
||||
```
|
||||
- `image=IMAGE` - Sets the container image to be used for running buildkit.
|
||||
- `namespace=NS` - Sets the Kubernetes namespace. Defaults to the current namespace.
|
||||
- `replicas=N` - Sets the number of `Pod` replicas. Defaults to 1.
|
||||
- `requests.cpu` - Sets the request CPU value specified in units of Kubernetes CPU. Example `requests.cpu=100m`, `requests.cpu=2`
|
||||
- `requests.memory` - Sets the request memory value specified in bytes or with a valid suffix. Example `requests.memory=500Mi`, `requests.memory=4G`
|
||||
- `limits.cpu` - Sets the limit CPU value specified in units of Kubernetes CPU. Example `limits.cpu=100m`, `limits.cpu=2`
|
||||
- `limits.memory` - Sets the limit memory value specified in bytes or with a valid suffix. Example `limits.memory=500Mi`, `limits.memory=4G`
|
||||
- `nodeselector="label1=value1,label2=value2"` - Sets the kv of `Pod` nodeSelector. No Defaults. Example `nodeselector=kubernetes.io/arch=arm64`
|
||||
- `rootless=(true|false)` - Run the container as a non-root user without `securityContext.privileged`. [Using Ubuntu host kernel is recommended](https://github.com/moby/buildkit/blob/master/docs/rootless.md). Defaults to false.
|
||||
- `loadbalance=(sticky|random)` - Load-balancing strategy. If set to "sticky", the pod is chosen using the hash of the context path. Defaults to "sticky"
|
||||
- `qemu.install=(true|false)` - Install QEMU emulation for multi platforms support.
|
||||
- `qemu.image=IMAGE` - Sets the QEMU emulation image. Defaults to `tonistiigi/binfmt:latest`
|
||||
|
||||
### <a name="leave"></a> Remove a node from a builder (--leave)
|
||||
|
||||
@@ -179,8 +151,6 @@ The `--leave` flag changes the action of the command to remove a node from a
|
||||
builder. The builder needs to be specified with `--name` and node that is removed
|
||||
is set with `--node`.
|
||||
|
||||
**Examples**
|
||||
|
||||
```console
|
||||
$ docker buildx create --name mybuilder --node mybuilder0 --leave
|
||||
```
|
||||
@@ -204,7 +174,7 @@ The `--node` flag specifies the name of the node to be created or modified. If
|
||||
none is specified, it is the name of the builder it belongs to, with an index
|
||||
number suffix.
|
||||
|
||||
### <a name="platform"></a> Set the platforms supported by the node
|
||||
### <a name="platform"></a> Set the platforms supported by the node (--platform)
|
||||
|
||||
```
|
||||
--platform PLATFORMS
|
||||
@@ -216,14 +186,12 @@ will also automatically detect the platforms it supports, but manual values take
|
||||
priority over the detected ones and can be used when multiple nodes support
|
||||
building for the same platform.
|
||||
|
||||
**Examples**
|
||||
|
||||
```console
|
||||
$ docker buildx create --platform linux/amd64
|
||||
$ docker buildx create --platform linux/arm64,linux/arm/v8
|
||||
```
|
||||
|
||||
### <a name="use"></a> Automatically switch to the newly created builder
|
||||
### <a name="use"></a> Automatically switch to the newly created builder (--use)
|
||||
|
||||
The `--use` flag automatically switches the current builder to the newly created
|
||||
one. Equivalent to running `docker buildx use $(docker buildx create ...)`.
|
||||
|
@@ -9,11 +9,11 @@ Disk usage
|
||||
|
||||
### Options
|
||||
|
||||
| Name | Description |
|
||||
| --- | --- |
|
||||
| [`--builder string`](#builder) | Override the configured builder instance |
|
||||
| `--filter filter` | Provide filter values |
|
||||
| `--verbose` | Provide a more verbose output |
|
||||
| Name | Type | Default | Description |
|
||||
| --- | --- | --- | --- |
|
||||
| [`--builder`](#builder) | `string` | | Override the configured builder instance |
|
||||
| `--filter` | `filter` | | Provide filter values |
|
||||
| `--verbose` | | | Provide a more verbose output |
|
||||
|
||||
|
||||
<!---MARKER_GEN_END-->
|
||||
|
@@ -12,14 +12,14 @@ Commands to work on images in registry
|
||||
| Name | Description |
|
||||
| --- | --- |
|
||||
| [`create`](buildx_imagetools_create.md) | Create a new image based on source images |
|
||||
| [`inspect`](buildx_imagetools_inspect.md) | Show details of image in the registry |
|
||||
| [`inspect`](buildx_imagetools_inspect.md) | Show details of an image in the registry |
|
||||
|
||||
|
||||
### Options
|
||||
|
||||
| Name | Description |
|
||||
| --- | --- |
|
||||
| [`--builder string`](#builder) | Override the configured builder instance |
|
||||
| Name | Type | Default | Description |
|
||||
| --- | --- | --- | --- |
|
||||
| [`--builder`](#builder) | `string` | | Override the configured builder instance |
|
||||
|
||||
|
||||
<!---MARKER_GEN_END-->
|
||||
|
@@ -9,22 +9,19 @@ Create a new image based on source images
|
||||
|
||||
### Options
|
||||
|
||||
| Name | Description |
|
||||
| --- | --- |
|
||||
| [`--append`](#append) | Append to existing manifest |
|
||||
| [`--builder string`](#builder) | Override the configured builder instance |
|
||||
| [`--dry-run`](#dry-run) | Show final image instead of pushing |
|
||||
| [`-f`](#file), [`--file stringArray`](#file) | Read source descriptor from file |
|
||||
| [`-t`](#tag), [`--tag stringArray`](#tag) | Set reference for new image |
|
||||
| Name | Type | Default | Description |
|
||||
| --- | --- | --- | --- |
|
||||
| [`--append`](#append) | | | Append to existing manifest |
|
||||
| [`--builder`](#builder) | `string` | | Override the configured builder instance |
|
||||
| [`--dry-run`](#dry-run) | | | Show final image instead of pushing |
|
||||
| [`-f`](#file), [`--file`](#file) | `stringArray` | | Read source descriptor from file |
|
||||
| [`-t`](#tag), [`--tag`](#tag) | `stringArray` | | Set reference for new image |
|
||||
|
||||
|
||||
<!---MARKER_GEN_END-->
|
||||
|
||||
## Description
|
||||
|
||||
Imagetools contains commands for working with manifest lists in the registry.
|
||||
These commands are useful for inspecting multi-platform build results.
|
||||
|
||||
Create a new manifest list based on source manifests. The source manifests can
|
||||
be manifest lists or single platform distribution manifests and must already
|
||||
exist in the registry where the new manifest is created. If only one source is
|
||||
@@ -57,16 +54,15 @@ or a JSON of OCI descriptor object.
|
||||
In order to define annotations or additional platform properties like `os.version` and
|
||||
`os.features` you need to add them in the OCI descriptor object encoded in JSON.
|
||||
|
||||
```
|
||||
docker buildx imagetools inspect --raw alpine | jq '.manifests[0] | .platform."os.version"="10.1"' > descr.json
|
||||
docker buildx imagetools create -f descr.json myuser/image
|
||||
```console
|
||||
$ docker buildx imagetools inspect --raw alpine | jq '.manifests[0] | .platform."os.version"="10.1"' > descr.json
|
||||
$ docker buildx imagetools create -f descr.json myuser/image
|
||||
```
|
||||
|
||||
The descriptor in the file is merged with existing descriptor in the registry if it exists.
|
||||
|
||||
The supported fields for the descriptor are defined in [OCI spec](https://github.com/opencontainers/image-spec/blob/master/descriptor.md#properties) .
|
||||
|
||||
|
||||
### <a name="tag"></a> Set reference for new image (-t, --tag)
|
||||
|
||||
```
|
||||
@@ -75,10 +71,7 @@ The supported fields for the descriptor are defined in [OCI spec](https://github
|
||||
|
||||
Use the `-t` or `--tag` flag to set the name of the image to be created.
|
||||
|
||||
**Examples**
|
||||
|
||||
```console
|
||||
$ docker buildx imagetools create --dry-run alpine@sha256:5c40b3c27b9f13c873fefb2139765c56ce97fd50230f1f2d5c91e55dec171907 sha256:c4ba6347b0e4258ce6a6de2401619316f982b7bcc529f73d2a410d0097730204
|
||||
|
||||
$ docker buildx imagetools create -t tonistiigi/myapp -f image1 -f image2
|
||||
```
|
||||
|
@@ -5,40 +5,57 @@ docker buildx imagetools inspect [OPTIONS] NAME
|
||||
```
|
||||
|
||||
<!---MARKER_GEN_START-->
|
||||
Show details of image in the registry
|
||||
Show details of an image in the registry
|
||||
|
||||
### Options
|
||||
|
||||
| Name | Description |
|
||||
| --- | --- |
|
||||
| [`--builder string`](#builder) | Override the configured builder instance |
|
||||
| [`--raw`](#raw) | Show original JSON manifest |
|
||||
| Name | Type | Default | Description |
|
||||
| --- | --- | --- | --- |
|
||||
| [`--builder`](#builder) | `string` | | Override the configured builder instance |
|
||||
| [`--format`](#format) | `string` | `{{.Manifest}}` | Format the output using the given Go template |
|
||||
| [`--raw`](#raw) | | | Show original, unformatted JSON manifest |
|
||||
|
||||
|
||||
<!---MARKER_GEN_END-->
|
||||
|
||||
## Description
|
||||
|
||||
Show details of image in the registry.
|
||||
|
||||
Example:
|
||||
Show details of an image in the registry.
|
||||
|
||||
```console
|
||||
$ docker buildx imagetools inspect alpine
|
||||
|
||||
Name: docker.io/library/alpine:latest
|
||||
MediaType: application/vnd.docker.distribution.manifest.list.v2+json
|
||||
Digest: sha256:28ef97b8686a0b5399129e9b763d5b7e5ff03576aa5580d6f4182a49c5fe1913
|
||||
Digest: sha256:21a3deaa0d32a8057914f36584b5288d2e5ecc984380bc0118285c70fa8c9300
|
||||
|
||||
Manifests:
|
||||
Name: docker.io/library/alpine:latest@sha256:5c40b3c27b9f13c873fefb2139765c56ce97fd50230f1f2d5c91e55dec171907
|
||||
Name: docker.io/library/alpine:latest@sha256:e7d88de73db3d3fd9b2d63aa7f447a10fd0220b7cbf39803c803f2af9ba256b3
|
||||
MediaType: application/vnd.docker.distribution.manifest.v2+json
|
||||
Platform: linux/amd64
|
||||
|
||||
Name: docker.io/library/alpine:latest@sha256:c4ba6347b0e4258ce6a6de2401619316f982b7bcc529f73d2a410d0097730204
|
||||
Name: docker.io/library/alpine:latest@sha256:e047bc2af17934d38c5a7fa9f46d443f1de3a7675546402592ef805cfa929f9d
|
||||
MediaType: application/vnd.docker.distribution.manifest.v2+json
|
||||
Platform: linux/arm/v6
|
||||
...
|
||||
|
||||
Name: docker.io/library/alpine:latest@sha256:8483ecd016885d8dba70426fda133c30466f661bb041490d525658f1aac73822
|
||||
MediaType: application/vnd.docker.distribution.manifest.v2+json
|
||||
Platform: linux/arm/v7
|
||||
|
||||
Name: docker.io/library/alpine:latest@sha256:c74f1b1166784193ea6c8f9440263b9be6cae07dfe35e32a5df7a31358ac2060
|
||||
MediaType: application/vnd.docker.distribution.manifest.v2+json
|
||||
Platform: linux/arm64/v8
|
||||
|
||||
Name: docker.io/library/alpine:latest@sha256:2689e157117d2da668ad4699549e55eba1ceb79cb7862368b30919f0488213f4
|
||||
MediaType: application/vnd.docker.distribution.manifest.v2+json
|
||||
Platform: linux/386
|
||||
|
||||
Name: docker.io/library/alpine:latest@sha256:2042a492bcdd847a01cd7f119cd48caa180da696ed2aedd085001a78664407d6
|
||||
MediaType: application/vnd.docker.distribution.manifest.v2+json
|
||||
Platform: linux/ppc64le
|
||||
|
||||
Name: docker.io/library/alpine:latest@sha256:49e322ab6690e73a4909f787bcbdb873631264ff4a108cddfd9f9c249ba1d58e
|
||||
MediaType: application/vnd.docker.distribution.manifest.v2+json
|
||||
Platform: linux/s390x
|
||||
```
|
||||
|
||||
## Examples
|
||||
@@ -47,7 +64,569 @@ Manifests:
|
||||
|
||||
Same as [`buildx --builder`](buildx.md#builder).
|
||||
|
||||
### <a name="format"></a> Format the output (--format)
|
||||
|
||||
Format the output using the given Go template. Defaults to `{{.Manifest}}` if
|
||||
unset. Following fields are available:
|
||||
|
||||
* `.Name`: provides the reference of the image
|
||||
* `.Manifest`: provides the manifest or manifest list
|
||||
* `.Image`: provides the image config
|
||||
* `.BuildInfo`: provides [build info from image config](https://github.com/moby/buildkit/blob/master/docs/build-repro.md#image-config)
|
||||
|
||||
#### `.Name`
|
||||
|
||||
```console
|
||||
$ docker buildx imagetools inspect alpine --format "{{.Name}}"
|
||||
Name: docker.io/library/alpine:latest
|
||||
```
|
||||
|
||||
#### `.Manifest`
|
||||
|
||||
```console
|
||||
$ docker buildx imagetools inspect crazymax/loop --format "{{.Manifest}}"
|
||||
Name: docker.io/crazymax/loop:latest
|
||||
MediaType: application/vnd.docker.distribution.manifest.v2+json
|
||||
Digest: sha256:08602e7340970e92bde5e0a2e887c1fde4d9ae753d1e05efb4c8ef3b609f97f1
|
||||
```
|
||||
|
||||
```console
|
||||
$ docker buildx imagetools inspect moby/buildkit:master --format "{{.Manifest}}"
|
||||
Name: docker.io/moby/buildkit:master
|
||||
MediaType: application/vnd.docker.distribution.manifest.list.v2+json
|
||||
Digest: sha256:3183f7ce54d1efb44c34b84f428ae10aaf141e553c6b52a7ff44cc7083a05a66
|
||||
|
||||
Manifests:
|
||||
Name: docker.io/moby/buildkit:master@sha256:667d28c9fb33820ce686887a717a148e89fa77f9097f9352996bbcce99d352b1
|
||||
MediaType: application/vnd.docker.distribution.manifest.v2+json
|
||||
Platform: linux/amd64
|
||||
|
||||
Name: docker.io/moby/buildkit:master@sha256:71789527b64ab3d7b3de01d364b449cd7f7a3da758218fbf73b9c9aae05a6775
|
||||
MediaType: application/vnd.docker.distribution.manifest.v2+json
|
||||
Platform: linux/arm/v7
|
||||
|
||||
Name: docker.io/moby/buildkit:master@sha256:fb64667e1ce6ab0d05478f3a8402af07b27737598dcf9a510fb1d792b13a66be
|
||||
MediaType: application/vnd.docker.distribution.manifest.v2+json
|
||||
Platform: linux/arm64
|
||||
|
||||
Name: docker.io/moby/buildkit:master@sha256:1c3ddf95a0788e23f72f25800c05abc4458946685e2b66788c3d978cde6da92b
|
||||
MediaType: application/vnd.docker.distribution.manifest.v2+json
|
||||
Platform: linux/s390x
|
||||
|
||||
Name: docker.io/moby/buildkit:master@sha256:05bcde6d460a284e5bc88026cd070277e8380355de3126cbc8fe8a452708c6b1
|
||||
MediaType: application/vnd.docker.distribution.manifest.v2+json
|
||||
Platform: linux/ppc64le
|
||||
|
||||
Name: docker.io/moby/buildkit:master@sha256:c04c57765304ab84f4f9807fff3e11605c3a60e16435c734b02c723680f6bd6e
|
||||
MediaType: application/vnd.docker.distribution.manifest.v2+json
|
||||
Platform: linux/riscv64
|
||||
```
|
||||
|
||||
#### `.BuildInfo`
|
||||
|
||||
```console
|
||||
$ docker buildx imagetools inspect crazymax/buildx:buildinfo --format "{{.BuildInfo}}"
|
||||
Name: docker.io/crazymax/buildx:buildinfo
|
||||
Frontend: dockerfile.v0
|
||||
Attrs:
|
||||
filename: Dockerfile
|
||||
source: docker/dockerfile-upstream:master-labs
|
||||
build-arg:bar: foo
|
||||
build-arg:foo: bar
|
||||
Sources:
|
||||
Type: docker-image
|
||||
Ref: docker.io/docker/buildx-bin:0.6.1@sha256:a652ced4a4141977c7daaed0a074dcd9844a78d7d2615465b12f433ae6dd29f0
|
||||
Pin: sha256:a652ced4a4141977c7daaed0a074dcd9844a78d7d2615465b12f433ae6dd29f0
|
||||
|
||||
Type: docker-image
|
||||
Ref: docker.io/library/alpine:3.13
|
||||
Pin: sha256:026f721af4cf2843e07bba648e158fb35ecc876d822130633cc49f707f0fc88c
|
||||
|
||||
Type: docker-image
|
||||
Ref: docker.io/moby/buildkit:v0.9.0
|
||||
Pin: sha256:8dc668e7f66db1c044aadbed306020743516a94848793e0f81f94a087ee78cab
|
||||
|
||||
Type: docker-image
|
||||
Ref: docker.io/tonistiigi/xx@sha256:21a61be4744f6531cb5f33b0e6f40ede41fa3a1b8c82d5946178f80cc84bfc04
|
||||
Pin: sha256:21a61be4744f6531cb5f33b0e6f40ede41fa3a1b8c82d5946178f80cc84bfc04
|
||||
|
||||
Type: http
|
||||
Ref: https://raw.githubusercontent.com/moby/moby/master/README.md
|
||||
Pin: sha256:419455202b0ef97e480d7f8199b26a721a417818bc0e2d106975f74323f25e6c
|
||||
```
|
||||
|
||||
#### JSON output
|
||||
|
||||
A `json` go template func is also available if you want to render fields as
|
||||
JSON bytes:
|
||||
|
||||
```console
|
||||
$ docker buildx imagetools inspect crazymax/loop --format "{{json .Manifest}}"
|
||||
```
|
||||
```json
|
||||
{
|
||||
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
|
||||
"digest": "sha256:08602e7340970e92bde5e0a2e887c1fde4d9ae753d1e05efb4c8ef3b609f97f1",
|
||||
"size": 949
|
||||
}
|
||||
```
|
||||
|
||||
```console
|
||||
$ docker buildx imagetools inspect moby/buildkit:master --format "{{json .Manifest}}"
|
||||
```
|
||||
```json
|
||||
{
|
||||
"schemaVersion": 2,
|
||||
"mediaType": "application/vnd.docker.distribution.manifest.list.v2+json",
|
||||
"digest": "sha256:79d97f205e2799d99a3a8ae2a1ef17acb331e11784262c3faada847dc6972c52",
|
||||
"size": 2010,
|
||||
"manifests": [
|
||||
{
|
||||
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
|
||||
"digest": "sha256:bd1e78f06de26610fadf4eb9d04b1a45a545799d6342701726e952cc0c11c912",
|
||||
"size": 1158,
|
||||
"platform": {
|
||||
"architecture": "amd64",
|
||||
"os": "linux"
|
||||
}
|
||||
},
|
||||
{
|
||||
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
|
||||
"digest": "sha256:d37dcced63ec0965824fca644f0ac9efad8569434ec15b4c83adfcb3dcfc743b",
|
||||
"size": 1158,
|
||||
"platform": {
|
||||
"architecture": "arm",
|
||||
"os": "linux",
|
||||
"variant": "v7"
|
||||
}
|
||||
},
|
||||
{
|
||||
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
|
||||
"digest": "sha256:ce142eb2255e6af46f2809e159fd03081697c7605a3de03b9cbe9a52ddb244bf",
|
||||
"size": 1158,
|
||||
"platform": {
|
||||
"architecture": "arm64",
|
||||
"os": "linux"
|
||||
}
|
||||
},
|
||||
{
|
||||
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
|
||||
"digest": "sha256:f59bfb5062fff76ce464bfa4e25ebaaaac887d6818238e119d68613c456d360c",
|
||||
"size": 1158,
|
||||
"platform": {
|
||||
"architecture": "s390x",
|
||||
"os": "linux"
|
||||
}
|
||||
},
|
||||
{
|
||||
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
|
||||
"digest": "sha256:cc96426e0c50a78105d5637d31356db5dd6ec594f21b24276e534a32da09645c",
|
||||
"size": 1159,
|
||||
"platform": {
|
||||
"architecture": "ppc64le",
|
||||
"os": "linux"
|
||||
}
|
||||
},
|
||||
{
|
||||
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
|
||||
"digest": "sha256:39f9c1e2878e6c333acb23187d6b205ce82ed934c60da326cb2c698192631478",
|
||||
"size": 1158,
|
||||
"platform": {
|
||||
"architecture": "riscv64",
|
||||
"os": "linux"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
```console
|
||||
$ docker buildx imagetools inspect crazymax/buildx:buildinfo --format "{{json .BuildInfo}}"
|
||||
```
|
||||
```json
|
||||
{
|
||||
"frontend": "dockerfile.v0",
|
||||
"attrs": {
|
||||
"build-arg:bar": "foo",
|
||||
"build-arg:foo": "bar",
|
||||
"filename": "Dockerfile",
|
||||
"source": "crazymax/dockerfile:buildattrs"
|
||||
},
|
||||
"sources": [
|
||||
{
|
||||
"type": "docker-image",
|
||||
"ref": "docker.io/docker/buildx-bin:0.6.1@sha256:a652ced4a4141977c7daaed0a074dcd9844a78d7d2615465b12f433ae6dd29f0",
|
||||
"pin": "sha256:a652ced4a4141977c7daaed0a074dcd9844a78d7d2615465b12f433ae6dd29f0"
|
||||
},
|
||||
{
|
||||
"type": "docker-image",
|
||||
"ref": "docker.io/library/alpine:3.13@sha256:026f721af4cf2843e07bba648e158fb35ecc876d822130633cc49f707f0fc88c",
|
||||
"pin": "sha256:026f721af4cf2843e07bba648e158fb35ecc876d822130633cc49f707f0fc88c"
|
||||
},
|
||||
{
|
||||
"type": "docker-image",
|
||||
"ref": "docker.io/moby/buildkit:v0.9.0@sha256:8dc668e7f66db1c044aadbed306020743516a94848793e0f81f94a087ee78cab",
|
||||
"pin": "sha256:8dc668e7f66db1c044aadbed306020743516a94848793e0f81f94a087ee78cab"
|
||||
},
|
||||
{
|
||||
"type": "docker-image",
|
||||
"ref": "docker.io/tonistiigi/xx@sha256:21a61be4744f6531cb5f33b0e6f40ede41fa3a1b8c82d5946178f80cc84bfc04",
|
||||
"pin": "sha256:21a61be4744f6531cb5f33b0e6f40ede41fa3a1b8c82d5946178f80cc84bfc04"
|
||||
},
|
||||
{
|
||||
"type": "http",
|
||||
"ref": "https://raw.githubusercontent.com/moby/moby/master/README.md",
|
||||
"pin": "sha256:419455202b0ef97e480d7f8199b26a721a417818bc0e2d106975f74323f25e6c"
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
```console
|
||||
$ docker buildx imagetools inspect crazymax/buildx:buildinfo --format "{{json .}}"
|
||||
```
|
||||
```json
|
||||
{
|
||||
"name": "crazymax/buildx:buildinfo",
|
||||
"manifest": {
|
||||
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
|
||||
"digest": "sha256:899d2c7acbc124d406820857bb51d9089717bbe4e22b97eb4bc5789e99f09f83",
|
||||
"size": 2628
|
||||
},
|
||||
"image": {
|
||||
"created": "2022-02-24T12:27:43.627154558Z",
|
||||
"architecture": "amd64",
|
||||
"os": "linux",
|
||||
"config": {
|
||||
"Env": [
|
||||
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
|
||||
"DOCKER_TLS_CERTDIR=/certs",
|
||||
"DOCKER_CLI_EXPERIMENTAL=enabled"
|
||||
],
|
||||
"Entrypoint": [
|
||||
"docker-entrypoint.sh"
|
||||
],
|
||||
"Cmd": [
|
||||
"sh"
|
||||
]
|
||||
},
|
||||
"rootfs": {
|
||||
"type": "layers",
|
||||
"diff_ids": [
|
||||
"sha256:7fcb75871b2101082203959c83514ac8a9f4ecfee77a0fe9aa73bbe56afdf1b4",
|
||||
"sha256:d3c0b963ff5684160641f936d6a4aa14efc8ff27b6edac255c07f2d03ff92e82",
|
||||
"sha256:3f8d78f13fa9b1f35d3bc3f1351d03a027c38018c37baca73f93eecdea17f244",
|
||||
"sha256:8e6eb1137b182ae0c3f5d40ca46341fda2eaeeeb5fa516a9a2bf96171238e2e0",
|
||||
"sha256:fde4c869a56b54dd76d7352ddaa813fd96202bda30b9dceb2c2f2ad22fa2e6ce",
|
||||
"sha256:52025823edb284321af7846419899234b3c66219bf06061692b709875ed0760f",
|
||||
"sha256:50adb5982dbf6126c7cf279ac3181d1e39fc9116b610b947a3dadae6f7e7c5bc",
|
||||
"sha256:9801c319e1c66c5d295e78b2d3e80547e73c7e3c63a4b71e97c8ca357224af24",
|
||||
"sha256:dfbfac44d5d228c49b42194c8a2f470abd6916d072f612a6fb14318e94fde8ae",
|
||||
"sha256:3dfb74e19dedf61568b917c19b0fd3ee4580870027ca0b6054baf239855d1322",
|
||||
"sha256:b182e707c23e4f19be73f9022a99d2d1ca7bf1ca8f280d40e4d1c10a6f51550e"
|
||||
]
|
||||
},
|
||||
"history": [
|
||||
{
|
||||
"created": "2021-11-12T17:19:58.698676655Z",
|
||||
"created_by": "/bin/sh -c #(nop) ADD file:5a707b9d6cb5fff532e4c2141bc35707593f21da5528c9e71ae2ddb6ba4a4eb6 in / "
|
||||
},
|
||||
{
|
||||
"created": "2021-11-12T17:19:58.948920855Z",
|
||||
"created_by": "/bin/sh -c #(nop) CMD [\"/bin/sh\"]",
|
||||
"empty_layer": true
|
||||
},
|
||||
{
|
||||
"created": "2022-02-24T12:27:38.285594601Z",
|
||||
"created_by": "RUN /bin/sh -c apk --update --no-cache add bash ca-certificates openssh-client \u0026\u0026 rm -rf /tmp/* /var/cache/apk/* # buildkit",
|
||||
"comment": "buildkit.dockerfile.v0"
|
||||
},
|
||||
{
|
||||
"created": "2022-02-24T12:27:41.061874167Z",
|
||||
"created_by": "COPY /opt/docker/ /usr/local/bin/ # buildkit",
|
||||
"comment": "buildkit.dockerfile.v0"
|
||||
},
|
||||
{
|
||||
"created": "2022-02-24T12:27:41.174098947Z",
|
||||
"created_by": "COPY /usr/bin/buildctl /usr/local/bin/buildctl # buildkit",
|
||||
"comment": "buildkit.dockerfile.v0"
|
||||
},
|
||||
{
|
||||
"created": "2022-02-24T12:27:41.320343683Z",
|
||||
"created_by": "COPY /usr/bin/buildkit* /usr/local/bin/ # buildkit",
|
||||
"comment": "buildkit.dockerfile.v0"
|
||||
},
|
||||
{
|
||||
"created": "2022-02-24T12:27:41.447149933Z",
|
||||
"created_by": "COPY /buildx /usr/libexec/docker/cli-plugins/docker-buildx # buildkit",
|
||||
"comment": "buildkit.dockerfile.v0"
|
||||
},
|
||||
{
|
||||
"created": "2022-02-24T12:27:43.057722191Z",
|
||||
"created_by": "COPY /opt/docker-compose /usr/libexec/docker/cli-plugins/docker-compose # buildkit",
|
||||
"comment": "buildkit.dockerfile.v0"
|
||||
},
|
||||
{
|
||||
"created": "2022-02-24T12:27:43.145224134Z",
|
||||
"created_by": "ADD https://raw.githubusercontent.com/moby/moby/master/README.md / # buildkit",
|
||||
"comment": "buildkit.dockerfile.v0"
|
||||
},
|
||||
{
|
||||
"created": "2022-02-24T12:27:43.422212427Z",
|
||||
"created_by": "ENV DOCKER_TLS_CERTDIR=/certs",
|
||||
"comment": "buildkit.dockerfile.v0",
|
||||
"empty_layer": true
|
||||
},
|
||||
{
|
||||
"created": "2022-02-24T12:27:43.422212427Z",
|
||||
"created_by": "ENV DOCKER_CLI_EXPERIMENTAL=enabled",
|
||||
"comment": "buildkit.dockerfile.v0",
|
||||
"empty_layer": true
|
||||
},
|
||||
{
|
||||
"created": "2022-02-24T12:27:43.422212427Z",
|
||||
"created_by": "RUN /bin/sh -c docker --version \u0026\u0026 buildkitd --version \u0026\u0026 buildctl --version \u0026\u0026 docker buildx version \u0026\u0026 docker compose version \u0026\u0026 mkdir /certs /certs/client \u0026\u0026 chmod 1777 /certs /certs/client # buildkit",
|
||||
"comment": "buildkit.dockerfile.v0"
|
||||
},
|
||||
{
|
||||
"created": "2022-02-24T12:27:43.514320155Z",
|
||||
"created_by": "COPY rootfs/modprobe.sh /usr/local/bin/modprobe # buildkit",
|
||||
"comment": "buildkit.dockerfile.v0"
|
||||
},
|
||||
{
|
||||
"created": "2022-02-24T12:27:43.627154558Z",
|
||||
"created_by": "COPY rootfs/docker-entrypoint.sh /usr/local/bin/ # buildkit",
|
||||
"comment": "buildkit.dockerfile.v0"
|
||||
},
|
||||
{
|
||||
"created": "2022-02-24T12:27:43.627154558Z",
|
||||
"created_by": "ENTRYPOINT [\"docker-entrypoint.sh\"]",
|
||||
"comment": "buildkit.dockerfile.v0",
|
||||
"empty_layer": true
|
||||
},
|
||||
{
|
||||
"created": "2022-02-24T12:27:43.627154558Z",
|
||||
"created_by": "CMD [\"sh\"]",
|
||||
"comment": "buildkit.dockerfile.v0",
|
||||
"empty_layer": true
|
||||
}
|
||||
]
|
||||
},
|
||||
"buildinfo": {
|
||||
"frontend": "dockerfile.v0",
|
||||
"attrs": {
|
||||
"build-arg:bar": "foo",
|
||||
"build-arg:foo": "bar",
|
||||
"filename": "Dockerfile",
|
||||
"source": "docker/dockerfile-upstream:master-labs"
|
||||
},
|
||||
"sources": [
|
||||
{
|
||||
"type": "docker-image",
|
||||
"ref": "docker.io/docker/buildx-bin:0.6.1@sha256:a652ced4a4141977c7daaed0a074dcd9844a78d7d2615465b12f433ae6dd29f0",
|
||||
"pin": "sha256:a652ced4a4141977c7daaed0a074dcd9844a78d7d2615465b12f433ae6dd29f0"
|
||||
},
|
||||
{
|
||||
"type": "docker-image",
|
||||
"ref": "docker.io/library/alpine:3.13",
|
||||
"pin": "sha256:026f721af4cf2843e07bba648e158fb35ecc876d822130633cc49f707f0fc88c"
|
||||
},
|
||||
{
|
||||
"type": "docker-image",
|
||||
"ref": "docker.io/moby/buildkit:v0.9.0",
|
||||
"pin": "sha256:8dc668e7f66db1c044aadbed306020743516a94848793e0f81f94a087ee78cab"
|
||||
},
|
||||
{
|
||||
"type": "docker-image",
|
||||
"ref": "docker.io/tonistiigi/xx@sha256:21a61be4744f6531cb5f33b0e6f40ede41fa3a1b8c82d5946178f80cc84bfc04",
|
||||
"pin": "sha256:21a61be4744f6531cb5f33b0e6f40ede41fa3a1b8c82d5946178f80cc84bfc04"
|
||||
},
|
||||
{
|
||||
"type": "http",
|
||||
"ref": "https://raw.githubusercontent.com/moby/moby/master/README.md",
|
||||
"pin": "sha256:419455202b0ef97e480d7f8199b26a721a417818bc0e2d106975f74323f25e6c"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### Multi-platform
|
||||
|
||||
Multi-platform images are supported for `.Image` and `.BuildInfo` fields. If
|
||||
you want to pick up a specific platform, you can specify it using the `index`
|
||||
go template function:
|
||||
|
||||
```console
|
||||
$ docker buildx imagetools inspect --format '{{json (index .Image "linux/s390x")}}' moby/buildkit:master
|
||||
```
|
||||
```json
|
||||
{
|
||||
"created": "2022-02-25T17:13:27.89891722Z",
|
||||
"architecture": "s390x",
|
||||
"os": "linux",
|
||||
"config": {
|
||||
"Env": [
|
||||
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
|
||||
],
|
||||
"Entrypoint": [
|
||||
"buildkitd"
|
||||
],
|
||||
"Volumes": {
|
||||
"/var/lib/buildkit": {}
|
||||
}
|
||||
},
|
||||
"rootfs": {
|
||||
"type": "layers",
|
||||
"diff_ids": [
|
||||
"sha256:41048e32d0684349141cf05f629c5fc3c5915d1f3426b66dbb8953a540e01e1e",
|
||||
"sha256:2651209b9208fff6c053bc3c17353cb07874e50f1a9bc96d6afd03aef63de76a",
|
||||
"sha256:6741ed7e73039d853fa8902246a4c7e8bf9dd09652fd1b08251bc5f9e8876a7f",
|
||||
"sha256:92ac046adeeb65c86ae3f0b458dee04ad4a462e417661c04d77642c66494f69b"
|
||||
]
|
||||
},
|
||||
"history": [
|
||||
{
|
||||
"created": "2021-11-24T20:41:23.709681315Z",
|
||||
"created_by": "/bin/sh -c #(nop) ADD file:cd24c711a2ef431b3ff94f9a02bfc42f159bc60de1d0eceecafea4e8af02441d in / "
|
||||
},
|
||||
{
|
||||
"created": "2021-11-24T20:41:23.94211262Z",
|
||||
"created_by": "/bin/sh -c #(nop) CMD [\"/bin/sh\"]",
|
||||
"empty_layer": true
|
||||
},
|
||||
{
|
||||
"created": "2022-01-26T18:15:21.449825391Z",
|
||||
"created_by": "RUN /bin/sh -c apk add --no-cache fuse3 git openssh pigz xz \u0026\u0026 ln -s fusermount3 /usr/bin/fusermount # buildkit",
|
||||
"comment": "buildkit.dockerfile.v0"
|
||||
},
|
||||
{
|
||||
"created": "2022-02-24T00:34:00.924540012Z",
|
||||
"created_by": "COPY examples/buildctl-daemonless/buildctl-daemonless.sh /usr/bin/ # buildkit",
|
||||
"comment": "buildkit.dockerfile.v0"
|
||||
},
|
||||
{
|
||||
"created": "2022-02-25T17:13:27.89891722Z",
|
||||
"created_by": "VOLUME [/var/lib/buildkit]",
|
||||
"comment": "buildkit.dockerfile.v0",
|
||||
"empty_layer": true
|
||||
},
|
||||
{
|
||||
"created": "2022-02-25T17:13:27.89891722Z",
|
||||
"created_by": "COPY / /usr/bin/ # buildkit",
|
||||
"comment": "buildkit.dockerfile.v0"
|
||||
},
|
||||
{
|
||||
"created": "2022-02-25T17:13:27.89891722Z",
|
||||
"created_by": "ENTRYPOINT [\"buildkitd\"]",
|
||||
"comment": "buildkit.dockerfile.v0",
|
||||
"empty_layer": true
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
### <a name="raw"></a> Show original, unformatted JSON manifest (--raw)
|
||||
|
||||
Use the `--raw` option to print the original JSON bytes instead of the formatted
|
||||
output.
|
||||
Use the `--raw` option to print the unformatted JSON manifest bytes.
|
||||
|
||||
> `jq` is used here to get a better rendering of the output result.
|
||||
|
||||
```console
|
||||
$ docker buildx imagetools inspect --raw crazymax/loop | jq
|
||||
```
|
||||
```json
|
||||
{
|
||||
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
|
||||
"schemaVersion": 2,
|
||||
"config": {
|
||||
"mediaType": "application/vnd.docker.container.image.v1+json",
|
||||
"digest": "sha256:7ace7d324e79b360b2db8b820d83081863d96d22e734cdf297a8e7fd83f6ceb3",
|
||||
"size": 2298
|
||||
},
|
||||
"layers": [
|
||||
{
|
||||
"mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
|
||||
"digest": "sha256:5843afab387455b37944e709ee8c78d7520df80f8d01cf7f861aae63beeddb6b",
|
||||
"size": 2811478
|
||||
},
|
||||
{
|
||||
"mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
|
||||
"digest": "sha256:726d3732a87e1c430d67e8969de6b222a889d45e045ebae1a008a37ba38f3b1f",
|
||||
"size": 1776812
|
||||
},
|
||||
{
|
||||
"mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
|
||||
"digest": "sha256:5d7cf9b33148a8f220c84f27dd2cfae46aca019a3ea3fbf7274f6d6dbfae8f3b",
|
||||
"size": 382855
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
```console
|
||||
$ docker buildx imagetools inspect --raw moby/buildkit:master | jq
|
||||
```
|
||||
```json
|
||||
{
|
||||
"mediaType": "application/vnd.docker.distribution.manifest.list.v2+json",
|
||||
"schemaVersion": 2,
|
||||
"manifests": [
|
||||
{
|
||||
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
|
||||
"digest": "sha256:667d28c9fb33820ce686887a717a148e89fa77f9097f9352996bbcce99d352b1",
|
||||
"size": 1158,
|
||||
"platform": {
|
||||
"architecture": "amd64",
|
||||
"os": "linux"
|
||||
}
|
||||
},
|
||||
{
|
||||
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
|
||||
"digest": "sha256:71789527b64ab3d7b3de01d364b449cd7f7a3da758218fbf73b9c9aae05a6775",
|
||||
"size": 1158,
|
||||
"platform": {
|
||||
"architecture": "arm",
|
||||
"os": "linux",
|
||||
"variant": "v7"
|
||||
}
|
||||
},
|
||||
{
|
||||
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
|
||||
"digest": "sha256:fb64667e1ce6ab0d05478f3a8402af07b27737598dcf9a510fb1d792b13a66be",
|
||||
"size": 1158,
|
||||
"platform": {
|
||||
"architecture": "arm64",
|
||||
"os": "linux"
|
||||
}
|
||||
},
|
||||
{
|
||||
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
|
||||
"digest": "sha256:1c3ddf95a0788e23f72f25800c05abc4458946685e2b66788c3d978cde6da92b",
|
||||
"size": 1158,
|
||||
"platform": {
|
||||
"architecture": "s390x",
|
||||
"os": "linux"
|
||||
}
|
||||
},
|
||||
{
|
||||
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
|
||||
"digest": "sha256:05bcde6d460a284e5bc88026cd070277e8380355de3126cbc8fe8a452708c6b1",
|
||||
"size": 1159,
|
||||
"platform": {
|
||||
"architecture": "ppc64le",
|
||||
"os": "linux"
|
||||
}
|
||||
},
|
||||
{
|
||||
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
|
||||
"digest": "sha256:c04c57765304ab84f4f9807fff3e11605c3a60e16435c734b02c723680f6bd6e",
|
||||
"size": 1158,
|
||||
"platform": {
|
||||
"architecture": "riscv64",
|
||||
"os": "linux"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
@@ -9,10 +9,10 @@ Inspect current builder instance
|
||||
|
||||
### Options
|
||||
|
||||
| Name | Description |
|
||||
| --- | --- |
|
||||
| [`--bootstrap`](#bootstrap) | Ensure builder has booted before inspecting |
|
||||
| [`--builder string`](#builder) | Override the configured builder instance |
|
||||
| Name | Type | Default | Description |
|
||||
| --- | --- | --- | --- |
|
||||
| [`--bootstrap`](#bootstrap) | | | Ensure builder has booted before inspecting |
|
||||
| [`--builder`](#builder) | `string` | | Override the configured builder instance |
|
||||
|
||||
|
||||
<!---MARKER_GEN_END-->
|
||||
|
@@ -14,18 +14,17 @@ List builder instances
|
||||
|
||||
Lists all builder instances and the nodes for each instance
|
||||
|
||||
**Example**
|
||||
|
||||
```console
|
||||
$ docker buildx ls
|
||||
|
||||
NAME/NODE DRIVER/ENDPOINT STATUS PLATFORMS
|
||||
elated_tesla * docker-container
|
||||
elated_tesla0 unix:///var/run/docker.sock running linux/amd64
|
||||
elated_tesla1 ssh://ubuntu@1.2.3.4 running linux/arm64, linux/arm/v7, linux/arm/v6
|
||||
elated_tesla1 ssh://ubuntu@1.2.3.4 running linux/arm64*, linux/arm/v7, linux/arm/v6
|
||||
default docker
|
||||
default default running linux/amd64
|
||||
```
|
||||
|
||||
Each builder has one or more nodes associated with it. The current builder's
|
||||
name is marked with a `*`.
|
||||
name is marked with a `*` in `NAME/NODE` and explicit node to build against for
|
||||
the target platform marked with a `*` in the `PLATFORMS` column.
|
||||
|
@@ -9,14 +9,14 @@ Remove build cache
|
||||
|
||||
### Options
|
||||
|
||||
| Name | Description |
|
||||
| --- | --- |
|
||||
| `-a`, `--all` | Remove all unused images, not just dangling ones |
|
||||
| [`--builder string`](#builder) | Override the configured builder instance |
|
||||
| `--filter filter` | Provide filter values (e.g., `until=24h`) |
|
||||
| `-f`, `--force` | Do not prompt for confirmation |
|
||||
| `--keep-storage bytes` | Amount of disk space to keep for cache |
|
||||
| `--verbose` | Provide a more verbose output |
|
||||
| Name | Type | Default | Description |
|
||||
| --- | --- | --- | --- |
|
||||
| `-a`, `--all` | | | Remove all unused images, not just dangling ones |
|
||||
| [`--builder`](#builder) | `string` | | Override the configured builder instance |
|
||||
| `--filter` | `filter` | | Provide filter values (e.g., `until=24h`) |
|
||||
| `-f`, `--force` | | | Do not prompt for confirmation |
|
||||
| `--keep-storage` | `bytes` | `0` | Amount of disk space to keep for cache |
|
||||
| `--verbose` | | | Provide a more verbose output |
|
||||
|
||||
|
||||
<!---MARKER_GEN_END-->
|
||||
|
@@ -9,10 +9,13 @@ Remove a builder instance
|
||||
|
||||
### Options
|
||||
|
||||
| Name | Description |
|
||||
| --- | --- |
|
||||
| [`--builder string`](#builder) | Override the configured builder instance |
|
||||
| [`--keep-state`](#keep-state) | Keep BuildKit state |
|
||||
| Name | Type | Default | Description |
|
||||
| --- | --- | --- | --- |
|
||||
| [`--all-inactive`](#all-inactive) | | | Remove all inactive builders |
|
||||
| [`--builder`](#builder) | `string` | | Override the configured builder instance |
|
||||
| [`-f`](#force), [`--force`](#force) | | | Do not prompt for confirmation |
|
||||
| [`--keep-daemon`](#keep-daemon) | | | Keep the buildkitd daemon running |
|
||||
| [`--keep-state`](#keep-state) | | | Keep BuildKit state |
|
||||
|
||||
|
||||
<!---MARKER_GEN_END-->
|
||||
@@ -24,10 +27,32 @@ default builder.
|
||||
|
||||
## Examples
|
||||
|
||||
### <a name="all-inactive"></a> Remove all inactive builders (--all-inactive)
|
||||
|
||||
Remove builders that are not in running state.
|
||||
|
||||
```console
|
||||
$ docker buildx rm --all-inactive
|
||||
WARNING! This will remove all builders that are not in running state. Are you sure you want to continue? [y/N] y
|
||||
```
|
||||
|
||||
### <a name="builder"></a> Override the configured builder instance (--builder)
|
||||
|
||||
Same as [`buildx --builder`](buildx.md#builder).
|
||||
|
||||
### <a name="force"></a> Do not prompt for confirmation (--force)
|
||||
|
||||
Do not prompt for confirmation before removing inactive builders.
|
||||
|
||||
```console
|
||||
$ docker buildx rm --all-inactive --force
|
||||
```
|
||||
|
||||
### <a name="keep-daemon"></a> Keep the buildkitd daemon running (--keep-daemon)
|
||||
|
||||
Keep the buildkitd daemon running after the buildx context is removed. This is useful when you manage buildkitd daemons and buildx contexts independently.
|
||||
Currently, only supported by the [`docker-container` and `kubernetes` drivers](buildx_create.md#driver).
|
||||
|
||||
### <a name="keep-state"></a> Keep BuildKit state (--keep-state)
|
||||
|
||||
Keep BuildKit state, so it can be reused by a new builder with the same name.
|
||||
|
@@ -9,9 +9,9 @@ Stop builder instance
|
||||
|
||||
### Options
|
||||
|
||||
| Name | Description |
|
||||
| --- | --- |
|
||||
| [`--builder string`](#builder) | Override the configured builder instance |
|
||||
| Name | Type | Default | Description |
|
||||
| --- | --- | --- | --- |
|
||||
| [`--builder`](#builder) | `string` | | Override the configured builder instance |
|
||||
|
||||
|
||||
<!---MARKER_GEN_END-->
|
||||
|
@@ -9,11 +9,11 @@ Set the current builder instance
|
||||
|
||||
### Options
|
||||
|
||||
| Name | Description |
|
||||
| --- | --- |
|
||||
| [`--builder string`](#builder) | Override the configured builder instance |
|
||||
| `--default` | Set builder as default for current context |
|
||||
| `--global` | Builder persists context changes |
|
||||
| Name | Type | Default | Description |
|
||||
| --- | --- | --- | --- |
|
||||
| [`--builder`](#builder) | `string` | | Override the configured builder instance |
|
||||
| `--default` | | | Set builder as default for current context |
|
||||
| `--global` | | | Builder persists context changes |
|
||||
|
||||
|
||||
<!---MARKER_GEN_END-->
|
||||
|
@@ -10,10 +10,9 @@ Show buildx version information
|
||||
|
||||
<!---MARKER_GEN_END-->
|
||||
|
||||
## Examples
|
||||
|
||||
### View version information
|
||||
## Description
|
||||
|
||||
View version information
|
||||
|
||||
```console
|
||||
$ docker buildx version
|
||||
|
@@ -37,6 +37,7 @@ const (
|
||||
type Driver struct {
|
||||
driver.InitConfig
|
||||
factory driver.Factory
|
||||
userNSRemap bool // true if dockerd is running with userns-remap mode
|
||||
netMode string
|
||||
image string
|
||||
cgroupParent string
|
||||
@@ -112,7 +113,6 @@ func (d *Driver) create(ctx context.Context, l progress.SubLogger) error {
|
||||
if err := l.Wrap("creating container "+d.Name, func() error {
|
||||
hc := &container.HostConfig{
|
||||
Privileged: true,
|
||||
UsernsMode: "host",
|
||||
Mounts: []mount.Mount{
|
||||
{
|
||||
Type: mount.TypeVolume,
|
||||
@@ -121,6 +121,9 @@ func (d *Driver) create(ctx context.Context, l progress.SubLogger) error {
|
||||
},
|
||||
},
|
||||
}
|
||||
if d.userNSRemap {
|
||||
hc.UsernsMode = "host"
|
||||
}
|
||||
if d.netMode != "" {
|
||||
hc.NetworkMode = container.NetworkMode(d.netMode)
|
||||
}
|
||||
@@ -295,7 +298,7 @@ func (d *Driver) Stop(ctx context.Context, force bool) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Driver) Rm(ctx context.Context, force bool, rmVolume bool) error {
|
||||
func (d *Driver) Rm(ctx context.Context, force, rmVolume, rmDaemon bool) error {
|
||||
info, err := d.Info(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -305,20 +308,22 @@ func (d *Driver) Rm(ctx context.Context, force bool, rmVolume bool) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := d.DockerAPI.ContainerRemove(ctx, d.Name, dockertypes.ContainerRemoveOptions{
|
||||
RemoveVolumes: true,
|
||||
Force: force,
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, v := range container.Mounts {
|
||||
if v.Name == d.Name+volumeStateSuffix {
|
||||
if rmDaemon {
|
||||
if err := d.DockerAPI.ContainerRemove(ctx, d.Name, dockertypes.ContainerRemoveOptions{
|
||||
RemoveVolumes: true,
|
||||
Force: force,
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, v := range container.Mounts {
|
||||
if v.Name != d.Name+volumeStateSuffix {
|
||||
continue
|
||||
}
|
||||
if rmVolume {
|
||||
return d.DockerAPI.VolumeRemove(ctx, d.Name+volumeStateSuffix, false)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@@ -6,6 +6,7 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/docker/buildx/driver"
|
||||
dockertypes "github.com/docker/docker/api/types"
|
||||
dockerclient "github.com/docker/docker/client"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
@@ -40,6 +41,20 @@ func (f *factory) New(ctx context.Context, cfg driver.InitConfig) (driver.Driver
|
||||
return nil, errors.Errorf("%s driver requires docker API access", f.Name())
|
||||
}
|
||||
d := &Driver{factory: f, InitConfig: cfg}
|
||||
dockerInfo, err := cfg.DockerAPI.Info(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
secOpts, err := dockertypes.DecodeSecurityOptions(dockerInfo.SecurityOptions)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, f := range secOpts {
|
||||
if f.Name == "userns" {
|
||||
d.userNSRemap = true
|
||||
break
|
||||
}
|
||||
}
|
||||
for k, v := range cfg.DriverOpts {
|
||||
switch {
|
||||
case k == "network":
|
||||
|
@@ -33,7 +33,7 @@ func (d *Driver) Stop(ctx context.Context, force bool) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Driver) Rm(ctx context.Context, force bool, rmVolume bool) error {
|
||||
func (d *Driver) Rm(ctx context.Context, force, rmVolume, rmDaemon bool) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@@ -54,7 +54,7 @@ type Driver interface {
|
||||
Bootstrap(context.Context, progress.Logger) error
|
||||
Info(context.Context) (*Info, error)
|
||||
Stop(ctx context.Context, force bool) error
|
||||
Rm(ctx context.Context, force bool, rmVolume bool) error
|
||||
Rm(ctx context.Context, force, rmVolume, rmDaemon bool) error
|
||||
Client(ctx context.Context) (*client.Client, error)
|
||||
Features() map[Feature]bool
|
||||
IsMobyDriver() bool
|
||||
|
@@ -1,4 +1,4 @@
|
||||
package kubernetes
|
||||
package context
|
||||
|
||||
const (
|
||||
// KubernetesEndpoint is the kubernetes endpoint name in a stored context
|
225
driver/kubernetes/context/endpoint_test.go
Normal file
225
driver/kubernetes/context/endpoint_test.go
Normal file
@@ -0,0 +1,225 @@
|
||||
package context
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/cli/cli/context"
|
||||
"github.com/docker/cli/cli/context/store"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
|
||||
)
|
||||
|
||||
func testEndpoint(server, defaultNamespace string, ca, cert, key []byte, skipTLSVerify bool) Endpoint {
|
||||
var tlsData *context.TLSData
|
||||
if ca != nil || cert != nil || key != nil {
|
||||
tlsData = &context.TLSData{
|
||||
CA: ca,
|
||||
Cert: cert,
|
||||
Key: key,
|
||||
}
|
||||
}
|
||||
return Endpoint{
|
||||
EndpointMeta: EndpointMeta{
|
||||
EndpointMetaBase: context.EndpointMetaBase{
|
||||
Host: server,
|
||||
SkipTLSVerify: skipTLSVerify,
|
||||
},
|
||||
DefaultNamespace: defaultNamespace,
|
||||
},
|
||||
TLSData: tlsData,
|
||||
}
|
||||
}
|
||||
|
||||
var testStoreCfg = store.NewConfig(
|
||||
func() interface{} {
|
||||
return &map[string]interface{}{}
|
||||
},
|
||||
store.EndpointTypeGetter(KubernetesEndpoint, func() interface{} { return &EndpointMeta{} }),
|
||||
)
|
||||
|
||||
func TestSaveLoadContexts(t *testing.T) {
|
||||
storeDir, err := ioutil.TempDir("", "test-load-save-k8-context")
|
||||
require.NoError(t, err)
|
||||
defer os.RemoveAll(storeDir)
|
||||
store := store.New(storeDir, testStoreCfg)
|
||||
require.NoError(t, save(store, testEndpoint("https://test", "test", nil, nil, nil, false), "raw-notls"))
|
||||
require.NoError(t, save(store, testEndpoint("https://test", "test", nil, nil, nil, true), "raw-notls-skip"))
|
||||
require.NoError(t, save(store, testEndpoint("https://test", "test", []byte("ca"), []byte("cert"), []byte("key"), true), "raw-tls"))
|
||||
|
||||
kcFile, err := ioutil.TempFile(os.TempDir(), "test-load-save-k8-context")
|
||||
require.NoError(t, err)
|
||||
defer os.Remove(kcFile.Name())
|
||||
defer kcFile.Close()
|
||||
cfg := clientcmdapi.NewConfig()
|
||||
cfg.AuthInfos["user"] = clientcmdapi.NewAuthInfo()
|
||||
cfg.Contexts["context1"] = clientcmdapi.NewContext()
|
||||
cfg.Clusters["cluster1"] = clientcmdapi.NewCluster()
|
||||
cfg.Contexts["context2"] = clientcmdapi.NewContext()
|
||||
cfg.Clusters["cluster2"] = clientcmdapi.NewCluster()
|
||||
cfg.AuthInfos["user"].ClientCertificateData = []byte("cert")
|
||||
cfg.AuthInfos["user"].ClientKeyData = []byte("key")
|
||||
cfg.Clusters["cluster1"].Server = "https://server1"
|
||||
cfg.Clusters["cluster1"].InsecureSkipTLSVerify = true
|
||||
cfg.Clusters["cluster2"].Server = "https://server2"
|
||||
cfg.Clusters["cluster2"].CertificateAuthorityData = []byte("ca")
|
||||
cfg.Contexts["context1"].AuthInfo = "user"
|
||||
cfg.Contexts["context1"].Cluster = "cluster1"
|
||||
cfg.Contexts["context1"].Namespace = "namespace1"
|
||||
cfg.Contexts["context2"].AuthInfo = "user"
|
||||
cfg.Contexts["context2"].Cluster = "cluster2"
|
||||
cfg.Contexts["context2"].Namespace = "namespace2"
|
||||
cfg.CurrentContext = "context1"
|
||||
cfgData, err := clientcmd.Write(*cfg)
|
||||
require.NoError(t, err)
|
||||
_, err = kcFile.Write(cfgData)
|
||||
require.NoError(t, err)
|
||||
kcFile.Close()
|
||||
|
||||
epDefault, err := FromKubeConfig(kcFile.Name(), "", "")
|
||||
require.NoError(t, err)
|
||||
epContext2, err := FromKubeConfig(kcFile.Name(), "context2", "namespace-override")
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, save(store, epDefault, "embed-default-context"))
|
||||
require.NoError(t, save(store, epContext2, "embed-context2"))
|
||||
|
||||
rawNoTLSMeta, err := store.GetMetadata("raw-notls")
|
||||
require.NoError(t, err)
|
||||
rawNoTLSSkipMeta, err := store.GetMetadata("raw-notls-skip")
|
||||
require.NoError(t, err)
|
||||
rawTLSMeta, err := store.GetMetadata("raw-tls")
|
||||
require.NoError(t, err)
|
||||
embededDefaultMeta, err := store.GetMetadata("embed-default-context")
|
||||
require.NoError(t, err)
|
||||
embededContext2Meta, err := store.GetMetadata("embed-context2")
|
||||
require.NoError(t, err)
|
||||
|
||||
rawNoTLS := EndpointFromContext(rawNoTLSMeta)
|
||||
rawNoTLSSkip := EndpointFromContext(rawNoTLSSkipMeta)
|
||||
rawTLS := EndpointFromContext(rawTLSMeta)
|
||||
embededDefault := EndpointFromContext(embededDefaultMeta)
|
||||
embededContext2 := EndpointFromContext(embededContext2Meta)
|
||||
|
||||
rawNoTLSEP, err := rawNoTLS.WithTLSData(store, "raw-notls")
|
||||
require.NoError(t, err)
|
||||
checkClientConfig(t, rawNoTLSEP, "https://test", "test", nil, nil, nil, false)
|
||||
rawNoTLSSkipEP, err := rawNoTLSSkip.WithTLSData(store, "raw-notls-skip")
|
||||
require.NoError(t, err)
|
||||
checkClientConfig(t, rawNoTLSSkipEP, "https://test", "test", nil, nil, nil, true)
|
||||
rawTLSEP, err := rawTLS.WithTLSData(store, "raw-tls")
|
||||
require.NoError(t, err)
|
||||
checkClientConfig(t, rawTLSEP, "https://test", "test", []byte("ca"), []byte("cert"), []byte("key"), true)
|
||||
embededDefaultEP, err := embededDefault.WithTLSData(store, "embed-default-context")
|
||||
require.NoError(t, err)
|
||||
checkClientConfig(t, embededDefaultEP, "https://server1", "namespace1", nil, []byte("cert"), []byte("key"), true)
|
||||
embededContext2EP, err := embededContext2.WithTLSData(store, "embed-context2")
|
||||
require.NoError(t, err)
|
||||
checkClientConfig(t, embededContext2EP, "https://server2", "namespace-override", []byte("ca"), []byte("cert"), []byte("key"), false)
|
||||
}
|
||||
|
||||
func checkClientConfig(t *testing.T, ep Endpoint, server, namespace string, ca, cert, key []byte, skipTLSVerify bool) {
|
||||
config := ep.KubernetesConfig()
|
||||
cfg, err := config.ClientConfig()
|
||||
require.NoError(t, err)
|
||||
ns, _, _ := config.Namespace()
|
||||
assert.Equal(t, server, cfg.Host)
|
||||
assert.Equal(t, namespace, ns)
|
||||
assert.Equal(t, ca, cfg.CAData)
|
||||
assert.Equal(t, cert, cfg.CertData)
|
||||
assert.Equal(t, key, cfg.KeyData)
|
||||
assert.Equal(t, skipTLSVerify, cfg.Insecure)
|
||||
}
|
||||
|
||||
func save(s store.Writer, ep Endpoint, name string) error {
|
||||
meta := store.Metadata{
|
||||
Endpoints: map[string]interface{}{
|
||||
KubernetesEndpoint: ep.EndpointMeta,
|
||||
},
|
||||
Name: name,
|
||||
}
|
||||
if err := s.CreateOrUpdate(meta); err != nil {
|
||||
return err
|
||||
}
|
||||
return s.ResetEndpointTLSMaterial(name, KubernetesEndpoint, ep.TLSData.ToStoreTLSData())
|
||||
}
|
||||
|
||||
func TestSaveLoadGKEConfig(t *testing.T) {
|
||||
storeDir, err := ioutil.TempDir("", t.Name())
|
||||
require.NoError(t, err)
|
||||
defer os.RemoveAll(storeDir)
|
||||
store := store.New(storeDir, testStoreCfg)
|
||||
cfg, err := clientcmd.LoadFromFile("fixtures/gke-kubeconfig")
|
||||
require.NoError(t, err)
|
||||
clientCfg := clientcmd.NewDefaultClientConfig(*cfg, &clientcmd.ConfigOverrides{})
|
||||
expectedCfg, err := clientCfg.ClientConfig()
|
||||
require.NoError(t, err)
|
||||
ep, err := FromKubeConfig("fixtures/gke-kubeconfig", "", "")
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, save(store, ep, "gke-context"))
|
||||
persistedMetadata, err := store.GetMetadata("gke-context")
|
||||
require.NoError(t, err)
|
||||
persistedEPMeta := EndpointFromContext(persistedMetadata)
|
||||
assert.True(t, persistedEPMeta != nil)
|
||||
persistedEP, err := persistedEPMeta.WithTLSData(store, "gke-context")
|
||||
require.NoError(t, err)
|
||||
persistedCfg := persistedEP.KubernetesConfig()
|
||||
actualCfg, err := persistedCfg.ClientConfig()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, expectedCfg.AuthProvider, actualCfg.AuthProvider)
|
||||
}
|
||||
|
||||
func TestSaveLoadEKSConfig(t *testing.T) {
|
||||
storeDir, err := ioutil.TempDir("", t.Name())
|
||||
require.NoError(t, err)
|
||||
defer os.RemoveAll(storeDir)
|
||||
store := store.New(storeDir, testStoreCfg)
|
||||
cfg, err := clientcmd.LoadFromFile("fixtures/eks-kubeconfig")
|
||||
require.NoError(t, err)
|
||||
clientCfg := clientcmd.NewDefaultClientConfig(*cfg, &clientcmd.ConfigOverrides{})
|
||||
expectedCfg, err := clientCfg.ClientConfig()
|
||||
require.NoError(t, err)
|
||||
ep, err := FromKubeConfig("fixtures/eks-kubeconfig", "", "")
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, save(store, ep, "eks-context"))
|
||||
persistedMetadata, err := store.GetMetadata("eks-context")
|
||||
require.NoError(t, err)
|
||||
persistedEPMeta := EndpointFromContext(persistedMetadata)
|
||||
assert.True(t, persistedEPMeta != nil)
|
||||
persistedEP, err := persistedEPMeta.WithTLSData(store, "eks-context")
|
||||
require.NoError(t, err)
|
||||
persistedCfg := persistedEP.KubernetesConfig()
|
||||
actualCfg, err := persistedCfg.ClientConfig()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, expectedCfg.ExecProvider, actualCfg.ExecProvider)
|
||||
}
|
||||
|
||||
func TestSaveLoadK3SConfig(t *testing.T) {
|
||||
storeDir, err := ioutil.TempDir("", t.Name())
|
||||
require.NoError(t, err)
|
||||
defer os.RemoveAll(storeDir)
|
||||
store := store.New(storeDir, testStoreCfg)
|
||||
cfg, err := clientcmd.LoadFromFile("fixtures/k3s-kubeconfig")
|
||||
require.NoError(t, err)
|
||||
clientCfg := clientcmd.NewDefaultClientConfig(*cfg, &clientcmd.ConfigOverrides{})
|
||||
expectedCfg, err := clientCfg.ClientConfig()
|
||||
require.NoError(t, err)
|
||||
ep, err := FromKubeConfig("fixtures/k3s-kubeconfig", "", "")
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, save(store, ep, "k3s-context"))
|
||||
persistedMetadata, err := store.GetMetadata("k3s-context")
|
||||
require.NoError(t, err)
|
||||
persistedEPMeta := EndpointFromContext(persistedMetadata)
|
||||
assert.True(t, persistedEPMeta != nil)
|
||||
persistedEP, err := persistedEPMeta.WithTLSData(store, "k3s-context")
|
||||
require.NoError(t, err)
|
||||
persistedCfg := persistedEP.KubernetesConfig()
|
||||
actualCfg, err := persistedCfg.ClientConfig()
|
||||
require.NoError(t, err)
|
||||
assert.True(t, len(actualCfg.Username) > 0)
|
||||
assert.True(t, len(actualCfg.Password) > 0)
|
||||
assert.Equal(t, expectedCfg.Username, actualCfg.Username)
|
||||
assert.Equal(t, expectedCfg.Password, actualCfg.Password)
|
||||
}
|
23
driver/kubernetes/context/fixtures/eks-kubeconfig
Normal file
23
driver/kubernetes/context/fixtures/eks-kubeconfig
Normal file
@@ -0,0 +1,23 @@
|
||||
apiVersion: v1
|
||||
clusters:
|
||||
- cluster:
|
||||
server: https://some-server
|
||||
name: kubernetes
|
||||
contexts:
|
||||
- context:
|
||||
cluster: kubernetes
|
||||
user: aws
|
||||
name: aws
|
||||
current-context: aws
|
||||
kind: Config
|
||||
preferences: {}
|
||||
users:
|
||||
- name: aws
|
||||
user:
|
||||
exec:
|
||||
apiVersion: client.authentication.k8s.io/v1alpha1
|
||||
command: heptio-authenticator-aws
|
||||
args:
|
||||
- "token"
|
||||
- "-i"
|
||||
- "eks-cf"
|
23
driver/kubernetes/context/fixtures/gke-kubeconfig
Normal file
23
driver/kubernetes/context/fixtures/gke-kubeconfig
Normal file
@@ -0,0 +1,23 @@
|
||||
apiVersion: v1
|
||||
clusters:
|
||||
- cluster:
|
||||
server: https://some-server
|
||||
name: gke_sample
|
||||
contexts:
|
||||
- context:
|
||||
cluster: gke_sample
|
||||
user: gke_sample
|
||||
name: gke_sample
|
||||
current-context: gke_sample
|
||||
kind: Config
|
||||
preferences: {}
|
||||
users:
|
||||
- name: gke_sample
|
||||
user:
|
||||
auth-provider:
|
||||
config:
|
||||
cmd-args: config config-helper --format=json
|
||||
cmd-path: /google/google-cloud-sdk/bin/gcloud
|
||||
expiry-key: '{.credential.token_expiry}'
|
||||
token-key: '{.credential.access_token}'
|
||||
name: gcp
|
20
driver/kubernetes/context/fixtures/k3s-kubeconfig
Normal file
20
driver/kubernetes/context/fixtures/k3s-kubeconfig
Normal file
@@ -0,0 +1,20 @@
|
||||
apiVersion: v1
|
||||
clusters:
|
||||
- cluster:
|
||||
certificate-authority-data: dGhlLWNh
|
||||
server: https://someserver
|
||||
name: test-cluster
|
||||
contexts:
|
||||
- context:
|
||||
cluster: test-cluster
|
||||
user: test-user
|
||||
namespace: zoinx
|
||||
name: test
|
||||
current-context: test
|
||||
kind: Config
|
||||
preferences: {}
|
||||
users:
|
||||
- name: test-user
|
||||
user:
|
||||
username: admin
|
||||
password: testpwd
|
20
driver/kubernetes/context/fixtures/test-kubeconfig
Normal file
20
driver/kubernetes/context/fixtures/test-kubeconfig
Normal file
@@ -0,0 +1,20 @@
|
||||
apiVersion: v1
|
||||
clusters:
|
||||
- cluster:
|
||||
certificate-authority-data: dGhlLWNh
|
||||
server: https://someserver
|
||||
name: test-cluster
|
||||
contexts:
|
||||
- context:
|
||||
cluster: test-cluster
|
||||
user: test-user
|
||||
namespace: zoinx
|
||||
name: test
|
||||
current-context: test
|
||||
kind: Config
|
||||
preferences: {}
|
||||
users:
|
||||
- name: test-user
|
||||
user:
|
||||
client-certificate-data: dGhlLWNlcnQ=
|
||||
client-key-data: dGhlLWtleQ==
|
@@ -1,4 +1,4 @@
|
||||
package kubernetes
|
||||
package context
|
||||
|
||||
import (
|
||||
"os"
|
||||
@@ -7,9 +7,7 @@ import (
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/cli/cli/context"
|
||||
"github.com/docker/cli/cli/context/store"
|
||||
api "github.com/docker/compose-on-kubernetes/api"
|
||||
"github.com/docker/docker/pkg/homedir"
|
||||
"github.com/pkg/errors"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
|
||||
)
|
||||
@@ -88,23 +86,18 @@ func (c *Endpoint) KubernetesConfig() clientcmd.ClientConfig {
|
||||
|
||||
// ResolveDefault returns endpoint metadata for the default Kubernetes
|
||||
// endpoint, which is derived from the env-based kubeconfig.
|
||||
func (c *EndpointMeta) ResolveDefault(stackOrchestrator command.Orchestrator) (interface{}, *store.EndpointTLSData, error) {
|
||||
func (c *EndpointMeta) ResolveDefault() (interface{}, *store.EndpointTLSData, error) {
|
||||
kubeconfig := os.Getenv("KUBECONFIG")
|
||||
if kubeconfig == "" {
|
||||
kubeconfig = filepath.Join(homedir.Get(), ".kube/config")
|
||||
}
|
||||
kubeEP, err := FromKubeConfig(kubeconfig, "", "")
|
||||
if err != nil {
|
||||
if stackOrchestrator == command.OrchestratorKubernetes || stackOrchestrator == command.OrchestratorAll {
|
||||
return nil, nil, errors.Wrapf(err, "default orchestrator is %s but unable to resolve kubernetes endpoint", stackOrchestrator)
|
||||
}
|
||||
|
||||
// We deliberately quash the error here, returning nil
|
||||
// for the first argument is sufficient to indicate we weren't able to
|
||||
// provide a default
|
||||
return nil, nil, nil
|
||||
}
|
||||
|
||||
var tls *store.EndpointTLSData
|
||||
if kubeEP.TLSData != nil {
|
||||
tls = kubeEP.TLSData.ToStoreTLSData()
|
||||
@@ -142,5 +135,21 @@ func ConfigFromContext(name string, s store.Reader) (clientcmd.ClientConfig, err
|
||||
return ep.KubernetesConfig(), nil
|
||||
}
|
||||
// context has no kubernetes endpoint
|
||||
return api.NewKubernetesConfig(""), nil
|
||||
return NewKubernetesConfig(""), nil
|
||||
}
|
||||
|
||||
// NewKubernetesConfig resolves the path to the desired Kubernetes configuration
|
||||
// file based on the KUBECONFIG environment variable and command line flags.
|
||||
func NewKubernetesConfig(configPath string) clientcmd.ClientConfig {
|
||||
kubeConfig := configPath
|
||||
if kubeConfig == "" {
|
||||
if config := os.Getenv("KUBECONFIG"); config != "" {
|
||||
kubeConfig = config
|
||||
} else {
|
||||
kubeConfig = filepath.Join(homedir.Get(), ".kube/config")
|
||||
}
|
||||
}
|
||||
return clientcmd.NewNonInteractiveDeferredLoadingClientConfig(
|
||||
&clientcmd.ClientConfigLoadingRules{ExplicitPath: kubeConfig},
|
||||
&clientcmd.ConfigOverrides{})
|
||||
}
|
23
driver/kubernetes/context/load_test.go
Normal file
23
driver/kubernetes/context/load_test.go
Normal file
@@ -0,0 +1,23 @@
|
||||
package context
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/cli/cli/config/configfile"
|
||||
cliflags "github.com/docker/cli/cli/flags"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestDefaultContextInitializer(t *testing.T) {
|
||||
cli, err := command.NewDockerCli()
|
||||
require.NoError(t, err)
|
||||
os.Setenv("KUBECONFIG", "./fixtures/test-kubeconfig")
|
||||
defer os.Unsetenv("KUBECONFIG")
|
||||
ctx, err := command.ResolveDefaultContext(&cliflags.CommonOptions{}, &configfile.ConfigFile{}, command.DefaultContextStoreConfig(), cli.Err())
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "default", ctx.Meta.Name)
|
||||
assert.Equal(t, "zoinx", ctx.Meta.Endpoints[KubernetesEndpoint].(EndpointMeta).DefaultNamespace)
|
||||
}
|
@@ -1,4 +1,4 @@
|
||||
package kubernetes
|
||||
package context
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
@@ -165,7 +165,11 @@ func (d *Driver) Stop(ctx context.Context, force bool) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Driver) Rm(ctx context.Context, force bool, rmVolume bool) error {
|
||||
func (d *Driver) Rm(ctx context.Context, force, rmVolume, rmDaemon bool) error {
|
||||
if !rmDaemon {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err := d.deploymentClient.Delete(ctx, d.deployment.Name, metav1.DeleteOptions{}); err != nil {
|
||||
if !apierrors.IsNotFound(err) {
|
||||
return errors.Wrapf(err, "error while calling deploymentClient.Delete for %q", d.deployment.Name)
|
||||
|
41
go.mod
41
go.mod
@@ -8,13 +8,12 @@ require (
|
||||
github.com/bugsnag/panicwrap v1.2.0 // indirect
|
||||
github.com/cenkalti/backoff v2.1.1+incompatible // indirect
|
||||
github.com/cloudflare/cfssl v0.0.0-20181213083726-b94e044bb51e // indirect
|
||||
github.com/compose-spec/compose-go v0.0.0-20210729195839-de56f4f0cb3c
|
||||
github.com/compose-spec/compose-go v1.2.1
|
||||
github.com/containerd/console v1.0.3
|
||||
github.com/containerd/containerd v1.5.5
|
||||
github.com/docker/cli v20.10.8+incompatible
|
||||
github.com/docker/cli-docs-tool v0.2.1
|
||||
github.com/docker/compose-on-kubernetes v0.4.19-0.20190128150448-356b2919c496 // indirect
|
||||
github.com/docker/distribution v2.7.1+incompatible
|
||||
github.com/containerd/containerd v1.6.1
|
||||
github.com/docker/cli v20.10.12+incompatible
|
||||
github.com/docker/cli-docs-tool v0.4.0
|
||||
github.com/docker/distribution v2.8.0+incompatible
|
||||
github.com/docker/docker v20.10.7+incompatible
|
||||
github.com/docker/go v1.5.1-1.0.20160303222718-d30aec9fd63c // indirect
|
||||
github.com/docker/go-units v0.4.0
|
||||
@@ -31,9 +30,10 @@ require (
|
||||
github.com/jinzhu/gorm v1.9.2 // indirect
|
||||
github.com/jinzhu/inflection v0.0.0-20180308033659-04140366298a // indirect
|
||||
github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 // indirect
|
||||
github.com/moby/buildkit v0.9.1-0.20211019185819-8778943ac3da
|
||||
github.com/moby/buildkit v0.10.1-0.20220403220257-10e6f94bf90d
|
||||
github.com/morikuni/aec v1.0.0
|
||||
github.com/opencontainers/go-digest v1.0.0
|
||||
github.com/opencontainers/image-spec v1.0.2-0.20210819154149-5ad6f50d6283
|
||||
github.com/opencontainers/image-spec v1.0.2-0.20211117181255-693428a734f5
|
||||
github.com/pelletier/go-toml v1.9.4
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/serialx/hashring v0.0.0-20190422032157-8b2912629002
|
||||
@@ -42,23 +42,24 @@ require (
|
||||
github.com/spf13/pflag v1.0.5
|
||||
github.com/stretchr/testify v1.7.0
|
||||
github.com/theupdateframework/notary v0.6.1 // indirect
|
||||
github.com/tonistiigi/units v0.0.0-20180711220420-6950e57a87ea
|
||||
github.com/zclconf/go-cty v1.7.1
|
||||
go.opentelemetry.io/otel v1.0.0-RC1
|
||||
go.opentelemetry.io/otel/trace v1.0.0-RC1
|
||||
github.com/tonistiigi/fsutil v0.0.0-20220315205639-9ed612626da3 // indirect
|
||||
github.com/zclconf/go-cty v1.10.0
|
||||
go.opentelemetry.io/otel v1.4.1
|
||||
go.opentelemetry.io/otel/trace v1.4.1
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
|
||||
google.golang.org/grpc v1.44.0
|
||||
gopkg.in/dancannon/gorethink.v3 v3.0.5 // indirect
|
||||
gopkg.in/fatih/pool.v2 v2.0.0 // indirect
|
||||
gopkg.in/gorethink/gorethink.v3 v3.0.5 // indirect
|
||||
k8s.io/api v0.22.1
|
||||
k8s.io/apimachinery v0.22.1
|
||||
k8s.io/client-go v0.22.1
|
||||
k8s.io/api v0.23.4
|
||||
k8s.io/apimachinery v0.23.4
|
||||
k8s.io/client-go v0.23.4
|
||||
)
|
||||
|
||||
replace (
|
||||
github.com/docker/cli => github.com/docker/cli v20.10.3-0.20210702143511-f782d1355eff+incompatible
|
||||
github.com/docker/docker => github.com/docker/docker v20.10.3-0.20210817025855-ba2adeebdb8d+incompatible
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc => github.com/tonistiigi/opentelemetry-go-contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.0.0-20210714055410-d010b05b4939
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace => github.com/tonistiigi/opentelemetry-go-contrib/instrumentation/net/http/httptrace/otelhttptrace v0.0.0-20210714055410-d010b05b4939
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp => github.com/tonistiigi/opentelemetry-go-contrib/instrumentation/net/http/otelhttp v0.0.0-20210714055410-d010b05b4939
|
||||
github.com/docker/cli => github.com/docker/cli v20.10.3-0.20220226190722-8667ccd1124c+incompatible
|
||||
github.com/docker/docker => github.com/docker/docker v20.10.3-0.20220121014307-40bb9831756f+incompatible
|
||||
k8s.io/api => k8s.io/api v0.22.4
|
||||
k8s.io/apimachinery => k8s.io/apimachinery v0.22.4
|
||||
k8s.io/client-go => k8s.io/client-go v0.22.4
|
||||
)
|
||||
|
@@ -1,6 +1,7 @@
|
||||
# syntax=docker/dockerfile:1.3-labs
|
||||
|
||||
ARG GO_VERSION=1.17
|
||||
ARG MODOUTDATED_VERSION=v0.8.0
|
||||
|
||||
FROM golang:${GO_VERSION}-alpine AS base
|
||||
RUN apk add --no-cache git rsync
|
||||
@@ -36,8 +37,9 @@ if [ -n "$(git status --porcelain -- go.mod go.sum vendor)" ]; then
|
||||
fi
|
||||
EOT
|
||||
|
||||
FROM psampaz/go-mod-outdated:v0.8.0 AS go-mod-outdated
|
||||
FROM psampaz/go-mod-outdated:${MODOUTDATED_VERSION} AS go-mod-outdated
|
||||
FROM base AS outdated
|
||||
ARG _RANDOM
|
||||
RUN --mount=target=.,ro \
|
||||
--mount=target=/go/pkg/mod,type=cache \
|
||||
--mount=from=go-mod-outdated,source=/home/go-mod-outdated,target=/usr/bin/go-mod-outdated \
|
||||
|
108
hack/test-driver
108
hack/test-driver
@@ -10,13 +10,20 @@ set -eu -o pipefail
|
||||
: ${MULTI_NODE=0}
|
||||
: ${PLATFORMS=linux/amd64,linux/arm64}
|
||||
|
||||
function buildxCmd {
|
||||
(set -x ; $BUILDX_CMD "$@")
|
||||
}
|
||||
|
||||
function clean {
|
||||
rm -rf "$context"
|
||||
${BUILDX_CMD} rm "$builderName"
|
||||
if [ "$builderName" != "default" ]; then
|
||||
buildxCmd rm "$builderName"
|
||||
fi
|
||||
}
|
||||
|
||||
context=$(mktemp -d -t buildx-output.XXXXXXXXXX)
|
||||
dockerfile=${context}/Dockerfile
|
||||
bakedef=${context}/docker-bake.hcl
|
||||
trap clean EXIT
|
||||
|
||||
builderName=buildx-test-$(openssl rand -hex 16)
|
||||
@@ -44,15 +51,12 @@ if [ "$DRIVER" != "docker" ]; then
|
||||
if [ "$firstNode" = "0" ]; then
|
||||
createFlags="$createFlags --append"
|
||||
fi
|
||||
(
|
||||
set -x
|
||||
${BUILDX_CMD} create ${createFlags} \
|
||||
--name="${builderName}" \
|
||||
--node="${builderName}-${platform/\//-}" \
|
||||
--driver="${DRIVER}" \
|
||||
--driver-opt="${driverOpt}" \
|
||||
--platform="${platform}"
|
||||
)
|
||||
buildxCmd create ${createFlags} \
|
||||
--name="${builderName}" \
|
||||
--node="${builderName}-${platform/\//-}" \
|
||||
--driver="${DRIVER}" \
|
||||
--driver-opt="${driverOpt}" \
|
||||
--platform="${platform}"
|
||||
firstNode=0
|
||||
done
|
||||
else
|
||||
@@ -60,27 +64,37 @@ if [ "$DRIVER" != "docker" ]; then
|
||||
if [ -f "$BUILDKIT_CFG" ]; then
|
||||
createFlags="$createFlags --config=${BUILDKIT_CFG}"
|
||||
fi
|
||||
(
|
||||
set -x
|
||||
${BUILDX_CMD} create ${createFlags} \
|
||||
--name="${builderName}" \
|
||||
--driver="${DRIVER}" \
|
||||
--driver-opt="${driverOpt}" \
|
||||
--platform="${PLATFORMS}"
|
||||
)
|
||||
buildxCmd create ${createFlags} \
|
||||
--name="${builderName}" \
|
||||
--driver="${DRIVER}" \
|
||||
--driver-opt="${driverOpt}" \
|
||||
--platform="${PLATFORMS}"
|
||||
fi
|
||||
fi
|
||||
|
||||
function buildOutput {
|
||||
local name=$1
|
||||
if [ "$DRIVER" != "docker" ]; then
|
||||
if [ "${MULTI_NODE}" = "1" ]; then
|
||||
echo "type=cacheonly"
|
||||
else
|
||||
echo "type=oci,dest=${context}/${name}.tar"
|
||||
fi
|
||||
else
|
||||
echo "type=docker,name=${name}"
|
||||
fi
|
||||
}
|
||||
|
||||
# multi-platform not supported by docker driver
|
||||
buildPlatformFlag=
|
||||
bakePlatformFlag=
|
||||
if [ "$DRIVER" != "docker" ]; then
|
||||
buildPlatformFlag=--platform="${PLATFORMS}"
|
||||
bakePlatformFlag=--set="*.platform=${PLATFORMS}"
|
||||
fi
|
||||
|
||||
set -x
|
||||
|
||||
# inspect and bootstrap
|
||||
${BUILDX_CMD} inspect --bootstrap --builder="${builderName}"
|
||||
buildxCmd inspect --bootstrap --builder="${builderName}"
|
||||
|
||||
# create dockerfile
|
||||
cat > "${dockerfile}" <<EOL
|
||||
@@ -88,14 +102,60 @@ FROM busybox as build
|
||||
ARG TARGETPLATFORM
|
||||
ARG BUILDPLATFORM
|
||||
RUN echo "I am running on \$BUILDPLATFORM, building for \$TARGETPLATFORM" > /log
|
||||
FROM busybox
|
||||
|
||||
FROM busybox AS log
|
||||
COPY --from=build /log /log
|
||||
RUN cat /log
|
||||
RUN uname -a
|
||||
|
||||
FROM busybox AS hello
|
||||
RUN echo hello > /hello
|
||||
|
||||
FROM scratch
|
||||
COPY --from=log /log /log
|
||||
COPY --from=hello /hello /hello
|
||||
EOL
|
||||
|
||||
# build
|
||||
${BUILDX_CMD} build ${buildPlatformFlag} \
|
||||
--output="type=cacheonly" \
|
||||
buildxCmd build ${buildPlatformFlag} \
|
||||
--output="$(buildOutput buildx-test-build)" \
|
||||
--builder="${builderName}" \
|
||||
--metadata-file="${context}/metadata-build.json" \
|
||||
"${context}"
|
||||
cat "${context}/metadata-build.json"
|
||||
|
||||
# create bake def
|
||||
cat > "${bakedef}" <<EOL
|
||||
group "default" {
|
||||
targets = ["release"]
|
||||
}
|
||||
group "all" {
|
||||
targets = ["log", "hello"]
|
||||
}
|
||||
target "release" {
|
||||
output = ["$(buildOutput buildx-test-bake-release)"]
|
||||
}
|
||||
target "log" {
|
||||
output = ["$(buildOutput buildx-test-bake-log)"]
|
||||
}
|
||||
target "hello" {
|
||||
output = ["$(buildOutput buildx-test-bake-hello)"]
|
||||
}
|
||||
EOL
|
||||
|
||||
# bake default target
|
||||
buildxCmd bake ${bakePlatformFlag} \
|
||||
--file="${bakedef}" \
|
||||
--builder="${builderName}" \
|
||||
--set "*.context=${context}" \
|
||||
--metadata-file="${context}/metadata-bake-def.json"
|
||||
cat "${context}/metadata-bake-def.json"
|
||||
|
||||
# bake all target
|
||||
buildxCmd bake ${bakePlatformFlag} \
|
||||
--file="${bakedef}" \
|
||||
--builder="${builderName}" \
|
||||
--set "*.context=${context}" \
|
||||
--metadata-file="${context}/metadata-bake-all.json" \
|
||||
all
|
||||
cat "${context}/metadata-bake-all.json"
|
||||
|
@@ -37,6 +37,32 @@ func GetCurrentEndpoint(dockerCli command.Cli) (string, error) {
|
||||
return de, nil
|
||||
}
|
||||
|
||||
func GetProxyConfig(dockerCli command.Cli) map[string]string {
|
||||
cfg := dockerCli.ConfigFile()
|
||||
host := dockerCli.Client().DaemonHost()
|
||||
|
||||
proxy, ok := cfg.Proxies[host]
|
||||
if !ok {
|
||||
proxy = cfg.Proxies["default"]
|
||||
}
|
||||
|
||||
m := map[string]string{}
|
||||
|
||||
if v := proxy.HTTPProxy; v != "" {
|
||||
m["HTTP_PROXY"] = v
|
||||
}
|
||||
if v := proxy.HTTPSProxy; v != "" {
|
||||
m["HTTPS_PROXY"] = v
|
||||
}
|
||||
if v := proxy.NoProxy; v != "" {
|
||||
m["NO_PROXY"] = v
|
||||
}
|
||||
if v := proxy.FTPProxy; v != "" {
|
||||
m["FTP_PROXY"] = v
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
// GetDockerEndpoint returns docker endpoint string for given context
|
||||
func GetDockerEndpoint(dockerCli command.Cli, name string) (string, error) {
|
||||
list, err := dockerCli.ContextStore().List()
|
||||
|
@@ -8,13 +8,18 @@ import (
|
||||
"io"
|
||||
"net/http"
|
||||
|
||||
"github.com/containerd/containerd/log"
|
||||
"github.com/containerd/containerd/remotes"
|
||||
"github.com/containerd/containerd/remotes/docker"
|
||||
"github.com/docker/buildx/util/resolver"
|
||||
clitypes "github.com/docker/cli/cli/config/types"
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/moby/buildkit/util/contentutil"
|
||||
"github.com/moby/buildkit/util/imageutil"
|
||||
"github.com/moby/buildkit/util/tracing"
|
||||
"github.com/opencontainers/go-digest"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type Auth interface {
|
||||
@@ -27,14 +32,16 @@ type Opt struct {
|
||||
}
|
||||
|
||||
type Resolver struct {
|
||||
auth docker.Authorizer
|
||||
hosts docker.RegistryHosts
|
||||
auth docker.Authorizer
|
||||
hosts docker.RegistryHosts
|
||||
buffer contentutil.Buffer
|
||||
}
|
||||
|
||||
func New(opt Opt) *Resolver {
|
||||
return &Resolver{
|
||||
auth: docker.NewDockerAuthorizer(docker.WithAuthCreds(toCredentialsFunc(opt.Auth)), docker.WithAuthClient(http.DefaultClient)),
|
||||
hosts: resolver.NewRegistryConfig(opt.RegistryConfig),
|
||||
auth: docker.NewDockerAuthorizer(docker.WithAuthCreds(toCredentialsFunc(opt.Auth)), docker.WithAuthClient(http.DefaultClient)),
|
||||
hosts: resolver.NewRegistryConfig(opt.RegistryConfig),
|
||||
buffer: contentutil.NewBuffer(),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -47,14 +54,20 @@ func (r *Resolver) resolver() remotes.Resolver {
|
||||
}
|
||||
for i := range res {
|
||||
res[i].Authorizer = r.auth
|
||||
res[i].Client = tracing.DefaultClient
|
||||
}
|
||||
return res, nil
|
||||
},
|
||||
Client: tracing.DefaultClient,
|
||||
})
|
||||
}
|
||||
|
||||
func (r *Resolver) Resolve(ctx context.Context, in string) (string, ocispec.Descriptor, error) {
|
||||
// discard containerd logger to avoid printing unnecessary info during image reference resolution.
|
||||
// https://github.com/containerd/containerd/blob/1a88cf5242445657258e0c744def5017d7cfb492/remotes/docker/resolver.go#L288
|
||||
logger := logrus.New()
|
||||
logger.Out = io.Discard
|
||||
ctx = log.WithLogger(ctx, logrus.NewEntry(logger))
|
||||
|
||||
ref, err := parseRef(in)
|
||||
if err != nil {
|
||||
return "", ocispec.Descriptor{}, err
|
||||
@@ -149,3 +162,11 @@ func RegistryAuthForRef(ref string, a Auth) (string, error) {
|
||||
}
|
||||
return base64.URLEncoding.EncodeToString(buf), nil
|
||||
}
|
||||
|
||||
func (r *Resolver) ImageConfig(ctx context.Context, in string, platform *ocispec.Platform) (digest.Digest, []byte, error) {
|
||||
in, _, err := r.Resolve(ctx, in)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
return imageutil.Config(ctx, in, r.resolver(), r.buffer, nil, platform)
|
||||
}
|
||||
|
@@ -1,74 +1,357 @@
|
||||
package imagetools
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"sort"
|
||||
"strings"
|
||||
"text/tabwriter"
|
||||
"text/template"
|
||||
|
||||
"github.com/containerd/containerd/images"
|
||||
"github.com/containerd/containerd/platforms"
|
||||
"github.com/docker/distribution/reference"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
binfotypes "github.com/moby/buildkit/util/buildinfo/types"
|
||||
"github.com/opencontainers/go-digest"
|
||||
ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
func PrintManifestList(dt []byte, desc ocispec.Descriptor, refstr string, out io.Writer) error {
|
||||
ref, err := parseRef(refstr)
|
||||
const defaultPfx = " "
|
||||
|
||||
type Printer struct {
|
||||
ctx context.Context
|
||||
resolver *Resolver
|
||||
|
||||
name string
|
||||
format string
|
||||
|
||||
raw []byte
|
||||
ref reference.Named
|
||||
manifest ocispecs.Descriptor
|
||||
index ocispecs.Index
|
||||
platforms []ocispecs.Platform
|
||||
}
|
||||
|
||||
func NewPrinter(ctx context.Context, opt Opt, name string, format string) (*Printer, error) {
|
||||
resolver := New(opt)
|
||||
|
||||
ref, err := parseRef(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
dt, manifest, err := resolver.Get(ctx, name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var index ocispecs.Index
|
||||
if err = json.Unmarshal(dt, &index); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var pforms []ocispecs.Platform
|
||||
switch manifest.MediaType {
|
||||
case images.MediaTypeDockerSchema2ManifestList, ocispecs.MediaTypeImageIndex:
|
||||
for _, m := range index.Manifests {
|
||||
pforms = append(pforms, *m.Platform)
|
||||
}
|
||||
default:
|
||||
pforms = append(pforms, platforms.DefaultSpec())
|
||||
}
|
||||
|
||||
return &Printer{
|
||||
ctx: ctx,
|
||||
resolver: resolver,
|
||||
name: name,
|
||||
format: format,
|
||||
raw: dt,
|
||||
ref: ref,
|
||||
manifest: manifest,
|
||||
index: index,
|
||||
platforms: pforms,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (p *Printer) Print(raw bool, out io.Writer) error {
|
||||
if raw {
|
||||
_, err := fmt.Fprintf(out, "%s", p.raw) // avoid newline to keep digest
|
||||
return err
|
||||
}
|
||||
|
||||
if p.format == "" {
|
||||
w := tabwriter.NewWriter(out, 0, 0, 1, ' ', 0)
|
||||
_, _ = fmt.Fprintf(w, "Name:\t%s\n", p.ref.String())
|
||||
_, _ = fmt.Fprintf(w, "MediaType:\t%s\n", p.manifest.MediaType)
|
||||
_, _ = fmt.Fprintf(w, "Digest:\t%s\n", p.manifest.Digest)
|
||||
_ = w.Flush()
|
||||
switch p.manifest.MediaType {
|
||||
case images.MediaTypeDockerSchema2ManifestList, ocispecs.MediaTypeImageIndex:
|
||||
if err := p.printManifestList(out); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
tpl, err := template.New("").Funcs(template.FuncMap{
|
||||
"json": func(v interface{}) string {
|
||||
b, _ := json.MarshalIndent(v, "", " ")
|
||||
return string(b)
|
||||
},
|
||||
}).Parse(p.format)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var mfst ocispec.Index
|
||||
if err := json.Unmarshal(dt, &mfst); err != nil {
|
||||
imageconfigs := make(map[string]*ocispecs.Image)
|
||||
buildinfos := make(map[string]*binfotypes.BuildInfo)
|
||||
|
||||
eg, _ := errgroup.WithContext(p.ctx)
|
||||
for _, platform := range p.platforms {
|
||||
func(platform ocispecs.Platform) {
|
||||
eg.Go(func() error {
|
||||
img, dtic, err := p.getImageConfig(&platform)
|
||||
if err != nil {
|
||||
return err
|
||||
} else if img != nil {
|
||||
imageconfigs[platforms.Format(platform)] = img
|
||||
}
|
||||
if bi, err := p.getBuildInfo(dtic); err != nil {
|
||||
return err
|
||||
} else if bi != nil {
|
||||
buildinfos[platforms.Format(platform)] = bi
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}(platform)
|
||||
}
|
||||
if err := eg.Wait(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
format := tpl.Root.String()
|
||||
|
||||
var manifest interface{}
|
||||
switch p.manifest.MediaType {
|
||||
case images.MediaTypeDockerSchema2Manifest, ocispecs.MediaTypeImageManifest:
|
||||
manifest = p.manifest
|
||||
case images.MediaTypeDockerSchema2ManifestList, ocispecs.MediaTypeImageIndex:
|
||||
manifest = struct {
|
||||
SchemaVersion int `json:"schemaVersion"`
|
||||
MediaType string `json:"mediaType,omitempty"`
|
||||
Digest digest.Digest `json:"digest"`
|
||||
Size int64 `json:"size"`
|
||||
Manifests []ocispecs.Descriptor `json:"manifests"`
|
||||
Annotations map[string]string `json:"annotations,omitempty"`
|
||||
}{
|
||||
SchemaVersion: p.index.Versioned.SchemaVersion,
|
||||
MediaType: p.index.MediaType,
|
||||
Digest: p.manifest.Digest,
|
||||
Size: p.manifest.Size,
|
||||
Manifests: p.index.Manifests,
|
||||
Annotations: p.index.Annotations,
|
||||
}
|
||||
}
|
||||
|
||||
switch {
|
||||
// TODO: print formatted config
|
||||
case strings.HasPrefix(format, "{{.Manifest"), strings.HasPrefix(format, "{{.BuildInfo"):
|
||||
w := tabwriter.NewWriter(out, 0, 0, 1, ' ', 0)
|
||||
_, _ = fmt.Fprintf(w, "Name:\t%s\n", p.ref.String())
|
||||
if strings.HasPrefix(format, "{{.Manifest") {
|
||||
_, _ = fmt.Fprintf(w, "MediaType:\t%s\n", p.manifest.MediaType)
|
||||
_, _ = fmt.Fprintf(w, "Digest:\t%s\n", p.manifest.Digest)
|
||||
_ = w.Flush()
|
||||
switch p.manifest.MediaType {
|
||||
case images.MediaTypeDockerSchema2ManifestList, ocispecs.MediaTypeImageIndex:
|
||||
_ = p.printManifestList(out)
|
||||
}
|
||||
} else if strings.HasPrefix(format, "{{.BuildInfo") {
|
||||
_ = w.Flush()
|
||||
_ = p.printBuildInfos(buildinfos, out)
|
||||
}
|
||||
default:
|
||||
if len(p.platforms) > 1 {
|
||||
return tpl.Execute(out, struct {
|
||||
Name string `json:"name,omitempty"`
|
||||
Manifest interface{} `json:"manifest,omitempty"`
|
||||
Image map[string]*ocispecs.Image `json:"image,omitempty"`
|
||||
BuildInfo map[string]*binfotypes.BuildInfo `json:"buildinfo,omitempty"`
|
||||
}{
|
||||
Name: p.name,
|
||||
Manifest: manifest,
|
||||
Image: imageconfigs,
|
||||
BuildInfo: buildinfos,
|
||||
})
|
||||
}
|
||||
var ic *ocispecs.Image
|
||||
for _, v := range imageconfigs {
|
||||
ic = v
|
||||
}
|
||||
var bi *binfotypes.BuildInfo
|
||||
for _, v := range buildinfos {
|
||||
bi = v
|
||||
}
|
||||
return tpl.Execute(out, struct {
|
||||
Name string `json:"name,omitempty"`
|
||||
Manifest interface{} `json:"manifest,omitempty"`
|
||||
Image *ocispecs.Image `json:"image,omitempty"`
|
||||
BuildInfo *binfotypes.BuildInfo `json:"buildinfo,omitempty"`
|
||||
}{
|
||||
Name: p.name,
|
||||
Manifest: manifest,
|
||||
Image: ic,
|
||||
BuildInfo: bi,
|
||||
})
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Printer) printManifestList(out io.Writer) error {
|
||||
w := tabwriter.NewWriter(out, 0, 0, 1, ' ', 0)
|
||||
|
||||
fmt.Fprintf(w, "Name:\t%s\n", ref.String())
|
||||
fmt.Fprintf(w, "MediaType:\t%s\n", desc.MediaType)
|
||||
fmt.Fprintf(w, "Digest:\t%s\n", desc.Digest)
|
||||
fmt.Fprintf(w, "\t\n")
|
||||
|
||||
fmt.Fprintf(w, "Manifests:\t\n")
|
||||
w.Flush()
|
||||
|
||||
pfx := " "
|
||||
_, _ = fmt.Fprintf(w, "\t\n")
|
||||
_, _ = fmt.Fprintf(w, "Manifests:\t\n")
|
||||
_ = w.Flush()
|
||||
|
||||
w = tabwriter.NewWriter(os.Stdout, 0, 0, 1, ' ', 0)
|
||||
for i, m := range mfst.Manifests {
|
||||
for i, m := range p.index.Manifests {
|
||||
if i != 0 {
|
||||
fmt.Fprintf(w, "\t\n")
|
||||
_, _ = fmt.Fprintf(w, "\t\n")
|
||||
}
|
||||
cr, err := reference.WithDigest(ref, m.Digest)
|
||||
cr, err := reference.WithDigest(p.ref, m.Digest)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Fprintf(w, "%sName:\t%s\n", pfx, cr.String())
|
||||
fmt.Fprintf(w, "%sMediaType:\t%s\n", pfx, m.MediaType)
|
||||
_, _ = fmt.Fprintf(w, "%sName:\t%s\n", defaultPfx, cr.String())
|
||||
_, _ = fmt.Fprintf(w, "%sMediaType:\t%s\n", defaultPfx, m.MediaType)
|
||||
if p := m.Platform; p != nil {
|
||||
fmt.Fprintf(w, "%sPlatform:\t%s\n", pfx, platforms.Format(*p))
|
||||
_, _ = fmt.Fprintf(w, "%sPlatform:\t%s\n", defaultPfx, platforms.Format(*p))
|
||||
if p.OSVersion != "" {
|
||||
fmt.Fprintf(w, "%sOSVersion:\t%s\n", pfx, p.OSVersion)
|
||||
_, _ = fmt.Fprintf(w, "%sOSVersion:\t%s\n", defaultPfx, p.OSVersion)
|
||||
}
|
||||
if len(p.OSFeatures) > 0 {
|
||||
fmt.Fprintf(w, "%sOSFeatures:\t%s\n", pfx, strings.Join(p.OSFeatures, ", "))
|
||||
_, _ = fmt.Fprintf(w, "%sOSFeatures:\t%s\n", defaultPfx, strings.Join(p.OSFeatures, ", "))
|
||||
}
|
||||
if len(m.URLs) > 0 {
|
||||
fmt.Fprintf(w, "%sURLs:\t%s\n", pfx, strings.Join(m.URLs, ", "))
|
||||
_, _ = fmt.Fprintf(w, "%sURLs:\t%s\n", defaultPfx, strings.Join(m.URLs, ", "))
|
||||
}
|
||||
if len(m.Annotations) > 0 {
|
||||
w.Flush()
|
||||
_ = w.Flush()
|
||||
w2 := tabwriter.NewWriter(os.Stdout, 0, 0, 1, ' ', 0)
|
||||
pfx2 := pfx + " "
|
||||
for k, v := range m.Annotations {
|
||||
fmt.Fprintf(w2, "%s%s:\t%s\n", pfx2, k, v)
|
||||
_, _ = fmt.Fprintf(w2, "%s%s:\t%s\n", defaultPfx+defaultPfx, k, v)
|
||||
}
|
||||
w2.Flush()
|
||||
_ = w2.Flush()
|
||||
}
|
||||
}
|
||||
}
|
||||
return w.Flush()
|
||||
}
|
||||
|
||||
func (p *Printer) printBuildInfos(bis map[string]*binfotypes.BuildInfo, out io.Writer) error {
|
||||
if len(bis) == 0 {
|
||||
return nil
|
||||
} else if len(bis) == 1 {
|
||||
for _, bi := range bis {
|
||||
return p.printBuildInfo(bi, "", out)
|
||||
}
|
||||
}
|
||||
var pkeys []string
|
||||
for _, pform := range p.platforms {
|
||||
pkeys = append(pkeys, platforms.Format(pform))
|
||||
}
|
||||
sort.Strings(pkeys)
|
||||
for _, platform := range pkeys {
|
||||
bi := bis[platform]
|
||||
w := tabwriter.NewWriter(out, 0, 0, 1, ' ', 0)
|
||||
_, _ = fmt.Fprintf(w, "\t\nPlatform:\t%s\t\n", platform)
|
||||
_ = w.Flush()
|
||||
if err := p.printBuildInfo(bi, "", out); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Printer) printBuildInfo(bi *binfotypes.BuildInfo, pfx string, out io.Writer) error {
|
||||
w := tabwriter.NewWriter(out, 0, 0, 1, ' ', 0)
|
||||
_, _ = fmt.Fprintf(w, "%sFrontend:\t%s\n", pfx, bi.Frontend)
|
||||
|
||||
if len(bi.Attrs) > 0 {
|
||||
_, _ = fmt.Fprintf(w, "%sAttrs:\t\n", pfx)
|
||||
_ = w.Flush()
|
||||
for k, v := range bi.Attrs {
|
||||
_, _ = fmt.Fprintf(w, "%s%s:\t%s\n", pfx+defaultPfx, k, *v)
|
||||
}
|
||||
}
|
||||
|
||||
if len(bi.Sources) > 0 {
|
||||
_, _ = fmt.Fprintf(w, "%sSources:\t\n", pfx)
|
||||
_ = w.Flush()
|
||||
for i, v := range bi.Sources {
|
||||
if i != 0 {
|
||||
_, _ = fmt.Fprintf(w, "\t\n")
|
||||
}
|
||||
_, _ = fmt.Fprintf(w, "%sType:\t%s\n", pfx+defaultPfx, v.Type)
|
||||
_, _ = fmt.Fprintf(w, "%sRef:\t%s\n", pfx+defaultPfx, v.Ref)
|
||||
_, _ = fmt.Fprintf(w, "%sPin:\t%s\n", pfx+defaultPfx, v.Pin)
|
||||
}
|
||||
}
|
||||
|
||||
if len(bi.Deps) > 0 {
|
||||
_, _ = fmt.Fprintf(w, "%sDeps:\t\n", pfx)
|
||||
_ = w.Flush()
|
||||
firstPass := true
|
||||
for k, v := range bi.Deps {
|
||||
if !firstPass {
|
||||
_, _ = fmt.Fprintf(w, "\t\n")
|
||||
}
|
||||
_, _ = fmt.Fprintf(w, "%sName:\t%s\n", pfx+defaultPfx, k)
|
||||
_ = w.Flush()
|
||||
_ = p.printBuildInfo(&v, pfx+defaultPfx, out)
|
||||
firstPass = false
|
||||
}
|
||||
}
|
||||
|
||||
return w.Flush()
|
||||
}
|
||||
|
||||
func (p *Printer) getImageConfig(platform *ocispecs.Platform) (*ocispecs.Image, []byte, error) {
|
||||
_, dtic, err := p.resolver.ImageConfig(p.ctx, p.name, platform)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
var img *ocispecs.Image
|
||||
if err = json.Unmarshal(dtic, &img); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return img, dtic, nil
|
||||
}
|
||||
|
||||
func (p *Printer) getBuildInfo(dtic []byte) (*binfotypes.BuildInfo, error) {
|
||||
var binfo *binfotypes.BuildInfo
|
||||
if len(dtic) > 0 {
|
||||
var biconfig binfotypes.ImageConfig
|
||||
if err := json.Unmarshal(dtic, &biconfig); err != nil {
|
||||
return nil, errors.Wrap(err, "failed to unmarshal image config")
|
||||
}
|
||||
if len(biconfig.BuildInfo) > 0 {
|
||||
dtbi, err := base64.StdEncoding.DecodeString(biconfig.BuildInfo)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to decode build info")
|
||||
}
|
||||
if err = json.Unmarshal(dtbi, &binfo); err != nil {
|
||||
return nil, errors.Wrap(err, "failed to unmarshal build info")
|
||||
}
|
||||
}
|
||||
}
|
||||
return binfo, nil
|
||||
}
|
||||
|
38
util/logutil/filter.go
Normal file
38
util/logutil/filter.go
Normal file
@@ -0,0 +1,38 @@
|
||||
package logutil
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"strings"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func NewFilter(levels []logrus.Level, filters ...string) logrus.Hook {
|
||||
dl := logrus.New()
|
||||
dl.SetOutput(ioutil.Discard)
|
||||
return &logsFilter{
|
||||
levels: levels,
|
||||
filters: filters,
|
||||
discardLogger: dl,
|
||||
}
|
||||
}
|
||||
|
||||
type logsFilter struct {
|
||||
levels []logrus.Level
|
||||
filters []string
|
||||
discardLogger *logrus.Logger
|
||||
}
|
||||
|
||||
func (d *logsFilter) Levels() []logrus.Level {
|
||||
return d.levels
|
||||
}
|
||||
|
||||
func (d *logsFilter) Fire(entry *logrus.Entry) error {
|
||||
for _, f := range d.filters {
|
||||
if strings.Contains(entry.Message, f) {
|
||||
entry.Logger = d.discardLogger
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
16
util/logutil/format.go
Normal file
16
util/logutil/format.go
Normal file
@@ -0,0 +1,16 @@
|
||||
package logutil
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type Formatter struct {
|
||||
logrus.TextFormatter
|
||||
}
|
||||
|
||||
func (f *Formatter) Format(entry *logrus.Entry) ([]byte, error) {
|
||||
return []byte(fmt.Sprintf("%s: %s\n", strings.ToUpper(entry.Level.String()), entry.Message)), nil
|
||||
}
|
52
util/logutil/pause.go
Normal file
52
util/logutil/pause.go
Normal file
@@ -0,0 +1,52 @@
|
||||
package logutil
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"sync"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func Pause(l *logrus.Logger) func() {
|
||||
// initialize formatter with original terminal settings
|
||||
l.Formatter.Format(logrus.NewEntry(l))
|
||||
|
||||
bw := newBufferedWriter(l.Out)
|
||||
l.SetOutput(bw)
|
||||
return func() {
|
||||
bw.resume()
|
||||
}
|
||||
}
|
||||
|
||||
type bufferedWriter struct {
|
||||
mu sync.Mutex
|
||||
buf *bytes.Buffer
|
||||
w io.Writer
|
||||
}
|
||||
|
||||
func newBufferedWriter(w io.Writer) *bufferedWriter {
|
||||
return &bufferedWriter{
|
||||
buf: bytes.NewBuffer(nil),
|
||||
w: w,
|
||||
}
|
||||
}
|
||||
|
||||
func (bw *bufferedWriter) Write(p []byte) (int, error) {
|
||||
bw.mu.Lock()
|
||||
defer bw.mu.Unlock()
|
||||
if bw.buf == nil {
|
||||
return bw.w.Write(p)
|
||||
}
|
||||
return bw.buf.Write(p)
|
||||
}
|
||||
|
||||
func (bw *bufferedWriter) resume() {
|
||||
bw.mu.Lock()
|
||||
defer bw.mu.Unlock()
|
||||
if bw.buf == nil {
|
||||
return
|
||||
}
|
||||
io.Copy(bw.w, bw.buf)
|
||||
bw.buf = nil
|
||||
}
|
@@ -8,14 +8,14 @@ import (
|
||||
|
||||
func WithPrefix(w Writer, pfx string, force bool) Writer {
|
||||
return &prefixed{
|
||||
main: w,
|
||||
pfx: pfx,
|
||||
force: force,
|
||||
Writer: w,
|
||||
pfx: pfx,
|
||||
force: force,
|
||||
}
|
||||
}
|
||||
|
||||
type prefixed struct {
|
||||
main Writer
|
||||
Writer
|
||||
pfx string
|
||||
force bool
|
||||
}
|
||||
@@ -26,7 +26,7 @@ func (p *prefixed) Write(v *client.SolveStatus) {
|
||||
v.Name = addPrefix(p.pfx, v.Name)
|
||||
}
|
||||
}
|
||||
p.main.Write(v)
|
||||
p.Writer.Write(v)
|
||||
}
|
||||
|
||||
func addPrefix(pfx, name string) string {
|
||||
|
@@ -5,10 +5,14 @@ import (
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"sync"
|
||||
|
||||
"github.com/containerd/console"
|
||||
"github.com/docker/buildx/util/logutil"
|
||||
"github.com/moby/buildkit/client"
|
||||
"github.com/moby/buildkit/util/progress/progressui"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -19,9 +23,12 @@ const (
|
||||
)
|
||||
|
||||
type Printer struct {
|
||||
status chan *client.SolveStatus
|
||||
done <-chan struct{}
|
||||
err error
|
||||
status chan *client.SolveStatus
|
||||
done <-chan struct{}
|
||||
err error
|
||||
warnings []client.VertexWarning
|
||||
logMu sync.Mutex
|
||||
logSourceMap map[digest.Digest]interface{}
|
||||
}
|
||||
|
||||
func (p *Printer) Wait() error {
|
||||
@@ -34,13 +41,43 @@ func (p *Printer) Write(s *client.SolveStatus) {
|
||||
p.status <- s
|
||||
}
|
||||
|
||||
func NewPrinter(ctx context.Context, out console.File, mode string) *Printer {
|
||||
func (p *Printer) Warnings() []client.VertexWarning {
|
||||
return p.warnings
|
||||
}
|
||||
|
||||
func (p *Printer) ValidateLogSource(dgst digest.Digest, v interface{}) bool {
|
||||
p.logMu.Lock()
|
||||
defer p.logMu.Unlock()
|
||||
src, ok := p.logSourceMap[dgst]
|
||||
if ok {
|
||||
if src == v {
|
||||
return true
|
||||
}
|
||||
} else {
|
||||
p.logSourceMap[dgst] = v
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (p *Printer) ClearLogSource(v interface{}) {
|
||||
p.logMu.Lock()
|
||||
defer p.logMu.Unlock()
|
||||
for d := range p.logSourceMap {
|
||||
if p.logSourceMap[d] == v {
|
||||
delete(p.logSourceMap, d)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func NewPrinter(ctx context.Context, w io.Writer, out console.File, mode string) *Printer {
|
||||
statusCh := make(chan *client.SolveStatus)
|
||||
doneCh := make(chan struct{})
|
||||
|
||||
pw := &Printer{
|
||||
status: statusCh,
|
||||
done: doneCh,
|
||||
status: statusCh,
|
||||
done: doneCh,
|
||||
logSourceMap: map[digest.Digest]interface{}{},
|
||||
}
|
||||
|
||||
if v := os.Getenv("BUILDKIT_PROGRESS"); v != "" && mode == PrinterModeAuto {
|
||||
@@ -49,7 +86,6 @@ func NewPrinter(ctx context.Context, out console.File, mode string) *Printer {
|
||||
|
||||
go func() {
|
||||
var c console.Console
|
||||
var w io.Writer = out
|
||||
switch mode {
|
||||
case PrinterModeQuiet:
|
||||
w = ioutil.Discard
|
||||
@@ -58,8 +94,10 @@ func NewPrinter(ctx context.Context, out console.File, mode string) *Printer {
|
||||
c = cons
|
||||
}
|
||||
}
|
||||
resumeLogs := logutil.Pause(logrus.StandardLogger())
|
||||
// not using shared context to not disrupt display but let is finish reporting errors
|
||||
pw.err = progressui.DisplaySolveStatus(ctx, "", c, w, statusCh)
|
||||
pw.warnings, pw.err = progressui.DisplaySolveStatus(ctx, "", c, w, statusCh)
|
||||
resumeLogs()
|
||||
close(doneCh)
|
||||
}()
|
||||
return pw
|
||||
|
@@ -10,6 +10,8 @@ import (
|
||||
|
||||
type Writer interface {
|
||||
Write(*client.SolveStatus)
|
||||
ValidateLogSource(digest.Digest, interface{}) bool
|
||||
ClearLogSource(interface{})
|
||||
}
|
||||
|
||||
func Write(w Writer, name string, f func() error) {
|
||||
@@ -47,8 +49,20 @@ func NewChannel(w Writer) (chan *client.SolveStatus, chan struct{}) {
|
||||
v, ok := <-ch
|
||||
if !ok {
|
||||
close(done)
|
||||
w.ClearLogSource(done)
|
||||
return
|
||||
}
|
||||
|
||||
if len(v.Logs) > 0 {
|
||||
logs := make([]*client.VertexLog, 0, len(v.Logs))
|
||||
for _, l := range v.Logs {
|
||||
if w.ValidateLogSource(l.Vertex, done) {
|
||||
logs = append(logs, l)
|
||||
}
|
||||
}
|
||||
v.Logs = logs
|
||||
}
|
||||
|
||||
w.Write(v)
|
||||
}
|
||||
}()
|
||||
|
74
util/waitmap/waitmap.go
Normal file
74
util/waitmap/waitmap.go
Normal file
@@ -0,0 +1,74 @@
|
||||
package waitmap
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
)
|
||||
|
||||
type Map struct {
|
||||
mu sync.RWMutex
|
||||
m map[string]interface{}
|
||||
ch map[string]chan struct{}
|
||||
}
|
||||
|
||||
func New() *Map {
|
||||
return &Map{
|
||||
m: make(map[string]interface{}),
|
||||
ch: make(map[string]chan struct{}),
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Map) Set(key string, value interface{}) {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
|
||||
m.m[key] = value
|
||||
|
||||
if ch, ok := m.ch[key]; ok {
|
||||
if ch != nil {
|
||||
close(ch)
|
||||
}
|
||||
}
|
||||
m.ch[key] = nil
|
||||
}
|
||||
|
||||
func (m *Map) Get(ctx context.Context, keys ...string) (map[string]interface{}, error) {
|
||||
if len(keys) == 0 {
|
||||
return map[string]interface{}{}, nil
|
||||
}
|
||||
|
||||
if len(keys) > 1 {
|
||||
out := make(map[string]interface{})
|
||||
for _, key := range keys {
|
||||
mm, err := m.Get(ctx, key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
out[key] = mm[key]
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
key := keys[0]
|
||||
m.mu.Lock()
|
||||
ch, ok := m.ch[key]
|
||||
if !ok {
|
||||
ch = make(chan struct{})
|
||||
m.ch[key] = ch
|
||||
}
|
||||
|
||||
if ch != nil {
|
||||
m.mu.Unlock()
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return nil, ctx.Err()
|
||||
case <-ch:
|
||||
m.mu.Lock()
|
||||
}
|
||||
}
|
||||
|
||||
res := m.m[key]
|
||||
m.mu.Unlock()
|
||||
|
||||
return map[string]interface{}{key: res}, nil
|
||||
}
|
64
util/waitmap/waitmap_test.go
Normal file
64
util/waitmap/waitmap_test.go
Normal file
@@ -0,0 +1,64 @@
|
||||
package waitmap
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestGetAfter(t *testing.T) {
|
||||
m := New()
|
||||
|
||||
m.Set("foo", "bar")
|
||||
m.Set("bar", "baz")
|
||||
|
||||
ctx := context.TODO()
|
||||
v, err := m.Get(ctx, "foo", "bar")
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, 2, len(v))
|
||||
require.Equal(t, "bar", v["foo"])
|
||||
require.Equal(t, "baz", v["bar"])
|
||||
|
||||
v, err = m.Get(ctx, "foo")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(v))
|
||||
require.Equal(t, "bar", v["foo"])
|
||||
}
|
||||
|
||||
func TestTimeout(t *testing.T) {
|
||||
m := New()
|
||||
|
||||
m.Set("foo", "bar")
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.TODO(), 100*time.Millisecond)
|
||||
defer cancel()
|
||||
|
||||
_, err := m.Get(ctx, "bar")
|
||||
require.Error(t, err)
|
||||
require.True(t, errors.Is(err, context.DeadlineExceeded))
|
||||
}
|
||||
|
||||
func TestBlocking(t *testing.T) {
|
||||
m := New()
|
||||
|
||||
m.Set("foo", "bar")
|
||||
|
||||
go func() {
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
m.Set("bar", "baz")
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
m.Set("baz", "abc")
|
||||
}()
|
||||
|
||||
ctx := context.TODO()
|
||||
v, err := m.Get(ctx, "foo", "bar", "baz")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 3, len(v))
|
||||
require.Equal(t, "bar", v["foo"])
|
||||
require.Equal(t, "baz", v["bar"])
|
||||
require.Equal(t, "abc", v["baz"])
|
||||
}
|
27
vendor/github.com/Microsoft/go-winio/README.md
generated
vendored
27
vendor/github.com/Microsoft/go-winio/README.md
generated
vendored
@@ -11,12 +11,27 @@ package.
|
||||
|
||||
Please see the LICENSE file for licensing information.
|
||||
|
||||
This project has adopted the [Microsoft Open Source Code of
|
||||
Conduct](https://opensource.microsoft.com/codeofconduct/). For more information
|
||||
see the [Code of Conduct
|
||||
FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact
|
||||
[opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional
|
||||
questions or comments.
|
||||
## Contributing
|
||||
|
||||
This project welcomes contributions and suggestions. Most contributions require you to agree to a Contributor License Agreement (CLA)
|
||||
declaring that you have the right to, and actually do, grant us the rights to use your contribution. For details, visit https://cla.microsoft.com.
|
||||
|
||||
When you submit a pull request, a CLA-bot will automatically determine whether you need to provide a CLA and decorate the PR
|
||||
appropriately (e.g., label, comment). Simply follow the instructions provided by the bot. You will only need to do this once across all repos using our CLA.
|
||||
|
||||
We also require that contributors sign their commits using git commit -s or git commit --signoff to certify they either authored the work themselves
|
||||
or otherwise have permission to use it in this project. Please see https://developercertificate.org/ for more info, as well as to make sure that you can
|
||||
attest to the rules listed. Our CI uses the DCO Github app to ensure that all commits in a given PR are signed-off.
|
||||
|
||||
|
||||
## Code of Conduct
|
||||
|
||||
This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/).
|
||||
For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or
|
||||
contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments.
|
||||
|
||||
|
||||
|
||||
## Special Thanks
|
||||
Thanks to natefinch for the inspiration for this library. See https://github.com/natefinch/npipe
|
||||
for another named pipe implementation.
|
||||
|
95
vendor/github.com/apparentlymart/go-textseg/v13/LICENSE
generated
vendored
Normal file
95
vendor/github.com/apparentlymart/go-textseg/v13/LICENSE
generated
vendored
Normal file
@@ -0,0 +1,95 @@
|
||||
Copyright (c) 2017 Martin Atkins
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
|
||||
---------
|
||||
|
||||
Unicode table generation programs are under a separate copyright and license:
|
||||
|
||||
Copyright (c) 2014 Couchbase, Inc.
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
|
||||
except in compliance with the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the
|
||||
License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
|
||||
either express or implied. See the License for the specific language governing permissions
|
||||
and limitations under the License.
|
||||
|
||||
---------
|
||||
|
||||
Grapheme break data is provided as part of the Unicode character database,
|
||||
copright 2016 Unicode, Inc, which is provided with the following license:
|
||||
|
||||
Unicode Data Files include all data files under the directories
|
||||
http://www.unicode.org/Public/, http://www.unicode.org/reports/,
|
||||
http://www.unicode.org/cldr/data/, http://source.icu-project.org/repos/icu/, and
|
||||
http://www.unicode.org/utility/trac/browser/.
|
||||
|
||||
Unicode Data Files do not include PDF online code charts under the
|
||||
directory http://www.unicode.org/Public/.
|
||||
|
||||
Software includes any source code published in the Unicode Standard
|
||||
or under the directories
|
||||
http://www.unicode.org/Public/, http://www.unicode.org/reports/,
|
||||
http://www.unicode.org/cldr/data/, http://source.icu-project.org/repos/icu/, and
|
||||
http://www.unicode.org/utility/trac/browser/.
|
||||
|
||||
NOTICE TO USER: Carefully read the following legal agreement.
|
||||
BY DOWNLOADING, INSTALLING, COPYING OR OTHERWISE USING UNICODE INC.'S
|
||||
DATA FILES ("DATA FILES"), AND/OR SOFTWARE ("SOFTWARE"),
|
||||
YOU UNEQUIVOCALLY ACCEPT, AND AGREE TO BE BOUND BY, ALL OF THE
|
||||
TERMS AND CONDITIONS OF THIS AGREEMENT.
|
||||
IF YOU DO NOT AGREE, DO NOT DOWNLOAD, INSTALL, COPY, DISTRIBUTE OR USE
|
||||
THE DATA FILES OR SOFTWARE.
|
||||
|
||||
COPYRIGHT AND PERMISSION NOTICE
|
||||
|
||||
Copyright © 1991-2017 Unicode, Inc. All rights reserved.
|
||||
Distributed under the Terms of Use in http://www.unicode.org/copyright.html.
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining
|
||||
a copy of the Unicode data files and any associated documentation
|
||||
(the "Data Files") or Unicode software and any associated documentation
|
||||
(the "Software") to deal in the Data Files or Software
|
||||
without restriction, including without limitation the rights to use,
|
||||
copy, modify, merge, publish, distribute, and/or sell copies of
|
||||
the Data Files or Software, and to permit persons to whom the Data Files
|
||||
or Software are furnished to do so, provided that either
|
||||
(a) this copyright and permission notice appear with all copies
|
||||
of the Data Files or Software, or
|
||||
(b) this copyright and permission notice appear in associated
|
||||
Documentation.
|
||||
|
||||
THE DATA FILES AND SOFTWARE ARE PROVIDED "AS IS", WITHOUT WARRANTY OF
|
||||
ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
|
||||
WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
NONINFRINGEMENT OF THIRD PARTY RIGHTS.
|
||||
IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS INCLUDED IN THIS
|
||||
NOTICE BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL
|
||||
DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
|
||||
DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
|
||||
TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
|
||||
PERFORMANCE OF THE DATA FILES OR SOFTWARE.
|
||||
|
||||
Except as contained in this notice, the name of a copyright holder
|
||||
shall not be used in advertising or otherwise to promote the sale,
|
||||
use or other dealings in these Data Files or Software without prior
|
||||
written authorization of the copyright holder.
|
30
vendor/github.com/apparentlymart/go-textseg/v13/textseg/all_tokens.go
generated
vendored
Normal file
30
vendor/github.com/apparentlymart/go-textseg/v13/textseg/all_tokens.go
generated
vendored
Normal file
@@ -0,0 +1,30 @@
|
||||
package textseg
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
)
|
||||
|
||||
// AllTokens is a utility that uses a bufio.SplitFunc to produce a slice of
|
||||
// all of the recognized tokens in the given buffer.
|
||||
func AllTokens(buf []byte, splitFunc bufio.SplitFunc) ([][]byte, error) {
|
||||
scanner := bufio.NewScanner(bytes.NewReader(buf))
|
||||
scanner.Split(splitFunc)
|
||||
var ret [][]byte
|
||||
for scanner.Scan() {
|
||||
ret = append(ret, scanner.Bytes())
|
||||
}
|
||||
return ret, scanner.Err()
|
||||
}
|
||||
|
||||
// TokenCount is a utility that uses a bufio.SplitFunc to count the number of
|
||||
// recognized tokens in the given buffer.
|
||||
func TokenCount(buf []byte, splitFunc bufio.SplitFunc) (int, error) {
|
||||
scanner := bufio.NewScanner(bytes.NewReader(buf))
|
||||
scanner.Split(splitFunc)
|
||||
var ret int
|
||||
for scanner.Scan() {
|
||||
ret++
|
||||
}
|
||||
return ret, scanner.Err()
|
||||
}
|
525
vendor/github.com/apparentlymart/go-textseg/v13/textseg/emoji_table.rl
generated
vendored
Normal file
525
vendor/github.com/apparentlymart/go-textseg/v13/textseg/emoji_table.rl
generated
vendored
Normal file
@@ -0,0 +1,525 @@
|
||||
# The following Ragel file was autogenerated with unicode2ragel.rb
|
||||
# from: https://www.unicode.org/Public/13.0.0/ucd/emoji/emoji-data.txt
|
||||
#
|
||||
# It defines ["Extended_Pictographic"].
|
||||
#
|
||||
# To use this, make sure that your alphtype is set to byte,
|
||||
# and that your input is in utf8.
|
||||
|
||||
%%{
|
||||
machine Emoji;
|
||||
|
||||
Extended_Pictographic =
|
||||
0xC2 0xA9 #E0.6 [1] (©️) copyright
|
||||
| 0xC2 0xAE #E0.6 [1] (®️) registered
|
||||
| 0xE2 0x80 0xBC #E0.6 [1] (‼️) double exclamation mark
|
||||
| 0xE2 0x81 0x89 #E0.6 [1] (⁉️) exclamation question ...
|
||||
| 0xE2 0x84 0xA2 #E0.6 [1] (™️) trade mark
|
||||
| 0xE2 0x84 0xB9 #E0.6 [1] (ℹ️) information
|
||||
| 0xE2 0x86 0x94..0x99 #E0.6 [6] (↔️..↙️) left-right arrow..do...
|
||||
| 0xE2 0x86 0xA9..0xAA #E0.6 [2] (↩️..↪️) right arrow curving ...
|
||||
| 0xE2 0x8C 0x9A..0x9B #E0.6 [2] (⌚..⌛) watch..hourglass done
|
||||
| 0xE2 0x8C 0xA8 #E1.0 [1] (⌨️) keyboard
|
||||
| 0xE2 0x8E 0x88 #E0.0 [1] (⎈) HELM SYMBOL
|
||||
| 0xE2 0x8F 0x8F #E1.0 [1] (⏏️) eject button
|
||||
| 0xE2 0x8F 0xA9..0xAC #E0.6 [4] (⏩..⏬) fast-forward button..f...
|
||||
| 0xE2 0x8F 0xAD..0xAE #E0.7 [2] (⏭️..⏮️) next track button..l...
|
||||
| 0xE2 0x8F 0xAF #E1.0 [1] (⏯️) play or pause button
|
||||
| 0xE2 0x8F 0xB0 #E0.6 [1] (⏰) alarm clock
|
||||
| 0xE2 0x8F 0xB1..0xB2 #E1.0 [2] (⏱️..⏲️) stopwatch..timer clock
|
||||
| 0xE2 0x8F 0xB3 #E0.6 [1] (⏳) hourglass not done
|
||||
| 0xE2 0x8F 0xB8..0xBA #E0.7 [3] (⏸️..⏺️) pause button..record...
|
||||
| 0xE2 0x93 0x82 #E0.6 [1] (Ⓜ️) circled M
|
||||
| 0xE2 0x96 0xAA..0xAB #E0.6 [2] (▪️..▫️) black small square.....
|
||||
| 0xE2 0x96 0xB6 #E0.6 [1] (▶️) play button
|
||||
| 0xE2 0x97 0x80 #E0.6 [1] (◀️) reverse button
|
||||
| 0xE2 0x97 0xBB..0xBE #E0.6 [4] (◻️..◾) white medium square.....
|
||||
| 0xE2 0x98 0x80..0x81 #E0.6 [2] (☀️..☁️) sun..cloud
|
||||
| 0xE2 0x98 0x82..0x83 #E0.7 [2] (☂️..☃️) umbrella..snowman
|
||||
| 0xE2 0x98 0x84 #E1.0 [1] (☄️) comet
|
||||
| 0xE2 0x98 0x85 #E0.0 [1] (★) BLACK STAR
|
||||
| 0xE2 0x98 0x87..0x8D #E0.0 [7] (☇..☍) LIGHTNING..OPPOSITION
|
||||
| 0xE2 0x98 0x8E #E0.6 [1] (☎️) telephone
|
||||
| 0xE2 0x98 0x8F..0x90 #E0.0 [2] (☏..☐) WHITE TELEPHONE..BALLO...
|
||||
| 0xE2 0x98 0x91 #E0.6 [1] (☑️) check box with check
|
||||
| 0xE2 0x98 0x92 #E0.0 [1] (☒) BALLOT BOX WITH X
|
||||
| 0xE2 0x98 0x94..0x95 #E0.6 [2] (☔..☕) umbrella with rain dro...
|
||||
| 0xE2 0x98 0x96..0x97 #E0.0 [2] (☖..☗) WHITE SHOGI PIECE..BLA...
|
||||
| 0xE2 0x98 0x98 #E1.0 [1] (☘️) shamrock
|
||||
| 0xE2 0x98 0x99..0x9C #E0.0 [4] (☙..☜) REVERSED ROTATED FLORA...
|
||||
| 0xE2 0x98 0x9D #E0.6 [1] (☝️) index pointing up
|
||||
| 0xE2 0x98 0x9E..0x9F #E0.0 [2] (☞..☟) WHITE RIGHT POINTING I...
|
||||
| 0xE2 0x98 0xA0 #E1.0 [1] (☠️) skull and crossbones
|
||||
| 0xE2 0x98 0xA1 #E0.0 [1] (☡) CAUTION SIGN
|
||||
| 0xE2 0x98 0xA2..0xA3 #E1.0 [2] (☢️..☣️) radioactive..biohazard
|
||||
| 0xE2 0x98 0xA4..0xA5 #E0.0 [2] (☤..☥) CADUCEUS..ANKH
|
||||
| 0xE2 0x98 0xA6 #E1.0 [1] (☦️) orthodox cross
|
||||
| 0xE2 0x98 0xA7..0xA9 #E0.0 [3] (☧..☩) CHI RHO..CROSS OF JERU...
|
||||
| 0xE2 0x98 0xAA #E0.7 [1] (☪️) star and crescent
|
||||
| 0xE2 0x98 0xAB..0xAD #E0.0 [3] (☫..☭) FARSI SYMBOL..HAMMER A...
|
||||
| 0xE2 0x98 0xAE #E1.0 [1] (☮️) peace symbol
|
||||
| 0xE2 0x98 0xAF #E0.7 [1] (☯️) yin yang
|
||||
| 0xE2 0x98 0xB0..0xB7 #E0.0 [8] (☰..☷) TRIGRAM FOR HEAVEN..TR...
|
||||
| 0xE2 0x98 0xB8..0xB9 #E0.7 [2] (☸️..☹️) wheel of dharma..fro...
|
||||
| 0xE2 0x98 0xBA #E0.6 [1] (☺️) smiling face
|
||||
| 0xE2 0x98 0xBB..0xBF #E0.0 [5] (☻..☿) BLACK SMILING FACE..ME...
|
||||
| 0xE2 0x99 0x80 #E4.0 [1] (♀️) female sign
|
||||
| 0xE2 0x99 0x81 #E0.0 [1] (♁) EARTH
|
||||
| 0xE2 0x99 0x82 #E4.0 [1] (♂️) male sign
|
||||
| 0xE2 0x99 0x83..0x87 #E0.0 [5] (♃..♇) JUPITER..PLUTO
|
||||
| 0xE2 0x99 0x88..0x93 #E0.6 [12] (♈..♓) Aries..Pisces
|
||||
| 0xE2 0x99 0x94..0x9E #E0.0 [11] (♔..♞) WHITE CHESS KING..BLAC...
|
||||
| 0xE2 0x99 0x9F #E11.0 [1] (♟️) chess pawn
|
||||
| 0xE2 0x99 0xA0 #E0.6 [1] (♠️) spade suit
|
||||
| 0xE2 0x99 0xA1..0xA2 #E0.0 [2] (♡..♢) WHITE HEART SUIT..WHIT...
|
||||
| 0xE2 0x99 0xA3 #E0.6 [1] (♣️) club suit
|
||||
| 0xE2 0x99 0xA4 #E0.0 [1] (♤) WHITE SPADE SUIT
|
||||
| 0xE2 0x99 0xA5..0xA6 #E0.6 [2] (♥️..♦️) heart suit..diamond ...
|
||||
| 0xE2 0x99 0xA7 #E0.0 [1] (♧) WHITE CLUB SUIT
|
||||
| 0xE2 0x99 0xA8 #E0.6 [1] (♨️) hot springs
|
||||
| 0xE2 0x99 0xA9..0xBA #E0.0 [18] (♩..♺) QUARTER NOTE..RECYCLIN...
|
||||
| 0xE2 0x99 0xBB #E0.6 [1] (♻️) recycling symbol
|
||||
| 0xE2 0x99 0xBC..0xBD #E0.0 [2] (♼..♽) RECYCLED PAPER SYMBOL....
|
||||
| 0xE2 0x99 0xBE #E11.0 [1] (♾️) infinity
|
||||
| 0xE2 0x99 0xBF #E0.6 [1] (♿) wheelchair symbol
|
||||
| 0xE2 0x9A 0x80..0x85 #E0.0 [6] (⚀..⚅) DIE FACE-1..DIE FACE-6
|
||||
| 0xE2 0x9A 0x90..0x91 #E0.0 [2] (⚐..⚑) WHITE FLAG..BLACK FLAG
|
||||
| 0xE2 0x9A 0x92 #E1.0 [1] (⚒️) hammer and pick
|
||||
| 0xE2 0x9A 0x93 #E0.6 [1] (⚓) anchor
|
||||
| 0xE2 0x9A 0x94 #E1.0 [1] (⚔️) crossed swords
|
||||
| 0xE2 0x9A 0x95 #E4.0 [1] (⚕️) medical symbol
|
||||
| 0xE2 0x9A 0x96..0x97 #E1.0 [2] (⚖️..⚗️) balance scale..alembic
|
||||
| 0xE2 0x9A 0x98 #E0.0 [1] (⚘) FLOWER
|
||||
| 0xE2 0x9A 0x99 #E1.0 [1] (⚙️) gear
|
||||
| 0xE2 0x9A 0x9A #E0.0 [1] (⚚) STAFF OF HERMES
|
||||
| 0xE2 0x9A 0x9B..0x9C #E1.0 [2] (⚛️..⚜️) atom symbol..fleur-d...
|
||||
| 0xE2 0x9A 0x9D..0x9F #E0.0 [3] (⚝..⚟) OUTLINED WHITE STAR..T...
|
||||
| 0xE2 0x9A 0xA0..0xA1 #E0.6 [2] (⚠️..⚡) warning..high voltage
|
||||
| 0xE2 0x9A 0xA2..0xA6 #E0.0 [5] (⚢..⚦) DOUBLED FEMALE SIGN..M...
|
||||
| 0xE2 0x9A 0xA7 #E13.0 [1] (⚧️) transgender symbol
|
||||
| 0xE2 0x9A 0xA8..0xA9 #E0.0 [2] (⚨..⚩) VERTICAL MALE WITH STR...
|
||||
| 0xE2 0x9A 0xAA..0xAB #E0.6 [2] (⚪..⚫) white circle..black ci...
|
||||
| 0xE2 0x9A 0xAC..0xAF #E0.0 [4] (⚬..⚯) MEDIUM SMALL WHITE CIR...
|
||||
| 0xE2 0x9A 0xB0..0xB1 #E1.0 [2] (⚰️..⚱️) coffin..funeral urn
|
||||
| 0xE2 0x9A 0xB2..0xBC #E0.0 [11] (⚲..⚼) NEUTER..SESQUIQUADRATE
|
||||
| 0xE2 0x9A 0xBD..0xBE #E0.6 [2] (⚽..⚾) soccer ball..baseball
|
||||
| 0xE2 0x9A 0xBF..0xFF #E0.0 [5] (⚿..⛃) SQUARED KEY..BLACK DRA...
|
||||
| 0xE2 0x9B 0x00..0x83 #
|
||||
| 0xE2 0x9B 0x84..0x85 #E0.6 [2] (⛄..⛅) snowman without snow.....
|
||||
| 0xE2 0x9B 0x86..0x87 #E0.0 [2] (⛆..⛇) RAIN..BLACK SNOWMAN
|
||||
| 0xE2 0x9B 0x88 #E0.7 [1] (⛈️) cloud with lightning ...
|
||||
| 0xE2 0x9B 0x89..0x8D #E0.0 [5] (⛉..⛍) TURNED WHITE SHOGI PIE...
|
||||
| 0xE2 0x9B 0x8E #E0.6 [1] (⛎) Ophiuchus
|
||||
| 0xE2 0x9B 0x8F #E0.7 [1] (⛏️) pick
|
||||
| 0xE2 0x9B 0x90 #E0.0 [1] (⛐) CAR SLIDING
|
||||
| 0xE2 0x9B 0x91 #E0.7 [1] (⛑️) rescue worker’s helmet
|
||||
| 0xE2 0x9B 0x92 #E0.0 [1] (⛒) CIRCLED CROSSING LANES
|
||||
| 0xE2 0x9B 0x93 #E0.7 [1] (⛓️) chains
|
||||
| 0xE2 0x9B 0x94 #E0.6 [1] (⛔) no entry
|
||||
| 0xE2 0x9B 0x95..0xA8 #E0.0 [20] (⛕..⛨) ALTERNATE ONE-WAY LEFT...
|
||||
| 0xE2 0x9B 0xA9 #E0.7 [1] (⛩️) shinto shrine
|
||||
| 0xE2 0x9B 0xAA #E0.6 [1] (⛪) church
|
||||
| 0xE2 0x9B 0xAB..0xAF #E0.0 [5] (⛫..⛯) CASTLE..MAP SYMBOL FOR...
|
||||
| 0xE2 0x9B 0xB0..0xB1 #E0.7 [2] (⛰️..⛱️) mountain..umbrella o...
|
||||
| 0xE2 0x9B 0xB2..0xB3 #E0.6 [2] (⛲..⛳) fountain..flag in hole
|
||||
| 0xE2 0x9B 0xB4 #E0.7 [1] (⛴️) ferry
|
||||
| 0xE2 0x9B 0xB5 #E0.6 [1] (⛵) sailboat
|
||||
| 0xE2 0x9B 0xB6 #E0.0 [1] (⛶) SQUARE FOUR CORNERS
|
||||
| 0xE2 0x9B 0xB7..0xB9 #E0.7 [3] (⛷️..⛹️) skier..person bounci...
|
||||
| 0xE2 0x9B 0xBA #E0.6 [1] (⛺) tent
|
||||
| 0xE2 0x9B 0xBB..0xBC #E0.0 [2] (⛻..⛼) JAPANESE BANK SYMBOL.....
|
||||
| 0xE2 0x9B 0xBD #E0.6 [1] (⛽) fuel pump
|
||||
| 0xE2 0x9B 0xBE..0xFF #E0.0 [4] (⛾..✁) CUP ON BLACK SQUARE..U...
|
||||
| 0xE2 0x9C 0x00..0x81 #
|
||||
| 0xE2 0x9C 0x82 #E0.6 [1] (✂️) scissors
|
||||
| 0xE2 0x9C 0x83..0x84 #E0.0 [2] (✃..✄) LOWER BLADE SCISSORS.....
|
||||
| 0xE2 0x9C 0x85 #E0.6 [1] (✅) check mark button
|
||||
| 0xE2 0x9C 0x88..0x8C #E0.6 [5] (✈️..✌️) airplane..victory hand
|
||||
| 0xE2 0x9C 0x8D #E0.7 [1] (✍️) writing hand
|
||||
| 0xE2 0x9C 0x8E #E0.0 [1] (✎) LOWER RIGHT PENCIL
|
||||
| 0xE2 0x9C 0x8F #E0.6 [1] (✏️) pencil
|
||||
| 0xE2 0x9C 0x90..0x91 #E0.0 [2] (✐..✑) UPPER RIGHT PENCIL..WH...
|
||||
| 0xE2 0x9C 0x92 #E0.6 [1] (✒️) black nib
|
||||
| 0xE2 0x9C 0x94 #E0.6 [1] (✔️) check mark
|
||||
| 0xE2 0x9C 0x96 #E0.6 [1] (✖️) multiply
|
||||
| 0xE2 0x9C 0x9D #E0.7 [1] (✝️) latin cross
|
||||
| 0xE2 0x9C 0xA1 #E0.7 [1] (✡️) star of David
|
||||
| 0xE2 0x9C 0xA8 #E0.6 [1] (✨) sparkles
|
||||
| 0xE2 0x9C 0xB3..0xB4 #E0.6 [2] (✳️..✴️) eight-spoked asteris...
|
||||
| 0xE2 0x9D 0x84 #E0.6 [1] (❄️) snowflake
|
||||
| 0xE2 0x9D 0x87 #E0.6 [1] (❇️) sparkle
|
||||
| 0xE2 0x9D 0x8C #E0.6 [1] (❌) cross mark
|
||||
| 0xE2 0x9D 0x8E #E0.6 [1] (❎) cross mark button
|
||||
| 0xE2 0x9D 0x93..0x95 #E0.6 [3] (❓..❕) question mark..white e...
|
||||
| 0xE2 0x9D 0x97 #E0.6 [1] (❗) exclamation mark
|
||||
| 0xE2 0x9D 0xA3 #E1.0 [1] (❣️) heart exclamation
|
||||
| 0xE2 0x9D 0xA4 #E0.6 [1] (❤️) red heart
|
||||
| 0xE2 0x9D 0xA5..0xA7 #E0.0 [3] (❥..❧) ROTATED HEAVY BLACK HE...
|
||||
| 0xE2 0x9E 0x95..0x97 #E0.6 [3] (➕..➗) plus..divide
|
||||
| 0xE2 0x9E 0xA1 #E0.6 [1] (➡️) right arrow
|
||||
| 0xE2 0x9E 0xB0 #E0.6 [1] (➰) curly loop
|
||||
| 0xE2 0x9E 0xBF #E1.0 [1] (➿) double curly loop
|
||||
| 0xE2 0xA4 0xB4..0xB5 #E0.6 [2] (⤴️..⤵️) right arrow curving ...
|
||||
| 0xE2 0xAC 0x85..0x87 #E0.6 [3] (⬅️..⬇️) left arrow..down arrow
|
||||
| 0xE2 0xAC 0x9B..0x9C #E0.6 [2] (⬛..⬜) black large square..wh...
|
||||
| 0xE2 0xAD 0x90 #E0.6 [1] (⭐) star
|
||||
| 0xE2 0xAD 0x95 #E0.6 [1] (⭕) hollow red circle
|
||||
| 0xE3 0x80 0xB0 #E0.6 [1] (〰️) wavy dash
|
||||
| 0xE3 0x80 0xBD #E0.6 [1] (〽️) part alternation mark
|
||||
| 0xE3 0x8A 0x97 #E0.6 [1] (㊗️) Japanese “congratulat...
|
||||
| 0xE3 0x8A 0x99 #E0.6 [1] (㊙️) Japanese “secret” button
|
||||
| 0xF0 0x9F 0x80 0x80..0x83 #E0.0 [4] (🀀..🀃) MAHJONG TILE EAST W...
|
||||
| 0xF0 0x9F 0x80 0x84 #E0.6 [1] (🀄) mahjong red dragon
|
||||
| 0xF0 0x9F 0x80 0x85..0xFF #E0.0 [202] (🀅..🃎) MAHJONG TILE ...
|
||||
| 0xF0 0x9F 0x81..0x82 0x00..0xFF #
|
||||
| 0xF0 0x9F 0x83 0x00..0x8E #
|
||||
| 0xF0 0x9F 0x83 0x8F #E0.6 [1] (🃏) joker
|
||||
| 0xF0 0x9F 0x83 0x90..0xBF #E0.0 [48] (..) <reserved-1F0D0>..<...
|
||||
| 0xF0 0x9F 0x84 0x8D..0x8F #E0.0 [3] (🄍..🄏) CIRCLED ZERO WITH S...
|
||||
| 0xF0 0x9F 0x84 0xAF #E0.0 [1] (🄯) COPYLEFT SYMBOL
|
||||
| 0xF0 0x9F 0x85 0xAC..0xAF #E0.0 [4] (🅬..🅯) RAISED MR SIGN..CIR...
|
||||
| 0xF0 0x9F 0x85 0xB0..0xB1 #E0.6 [2] (🅰️..🅱️) A button (blood t...
|
||||
| 0xF0 0x9F 0x85 0xBE..0xBF #E0.6 [2] (🅾️..🅿️) O button (blood t...
|
||||
| 0xF0 0x9F 0x86 0x8E #E0.6 [1] (🆎) AB button (blood type)
|
||||
| 0xF0 0x9F 0x86 0x91..0x9A #E0.6 [10] (🆑..🆚) CL button..VS button
|
||||
| 0xF0 0x9F 0x86 0xAD..0xFF #E0.0 [57] (🆭..) MASK WORK SYMBOL..<...
|
||||
| 0xF0 0x9F 0x87 0x00..0xA5 #
|
||||
| 0xF0 0x9F 0x88 0x81..0x82 #E0.6 [2] (🈁..🈂️) Japanese “here” bu...
|
||||
| 0xF0 0x9F 0x88 0x83..0x8F #E0.0 [13] (..) <reserved-1F203>..<...
|
||||
| 0xF0 0x9F 0x88 0x9A #E0.6 [1] (🈚) Japanese “free of char...
|
||||
| 0xF0 0x9F 0x88 0xAF #E0.6 [1] (🈯) Japanese “reserved” bu...
|
||||
| 0xF0 0x9F 0x88 0xB2..0xBA #E0.6 [9] (🈲..🈺) Japanese “prohibite...
|
||||
| 0xF0 0x9F 0x88 0xBC..0xBF #E0.0 [4] (..) <reserved-1F23C>..<...
|
||||
| 0xF0 0x9F 0x89 0x89..0x8F #E0.0 [7] (..) <reserved-1F249>..<...
|
||||
| 0xF0 0x9F 0x89 0x90..0x91 #E0.6 [2] (🉐..🉑) Japanese “bargain” ...
|
||||
| 0xF0 0x9F 0x89 0x92..0xFF #E0.0 [174] (..) <reserved-1F2...
|
||||
| 0xF0 0x9F 0x8A..0x8A 0x00..0xFF #
|
||||
| 0xF0 0x9F 0x8B 0x00..0xBF #
|
||||
| 0xF0 0x9F 0x8C 0x80..0x8C #E0.6 [13] (🌀..🌌) cyclone..milky way
|
||||
| 0xF0 0x9F 0x8C 0x8D..0x8E #E0.7 [2] (🌍..🌎) globe showing Europ...
|
||||
| 0xF0 0x9F 0x8C 0x8F #E0.6 [1] (🌏) globe showing Asia-Aus...
|
||||
| 0xF0 0x9F 0x8C 0x90 #E1.0 [1] (🌐) globe with meridians
|
||||
| 0xF0 0x9F 0x8C 0x91 #E0.6 [1] (🌑) new moon
|
||||
| 0xF0 0x9F 0x8C 0x92 #E1.0 [1] (🌒) waxing crescent moon
|
||||
| 0xF0 0x9F 0x8C 0x93..0x95 #E0.6 [3] (🌓..🌕) first quarter moon....
|
||||
| 0xF0 0x9F 0x8C 0x96..0x98 #E1.0 [3] (🌖..🌘) waning gibbous moon...
|
||||
| 0xF0 0x9F 0x8C 0x99 #E0.6 [1] (🌙) crescent moon
|
||||
| 0xF0 0x9F 0x8C 0x9A #E1.0 [1] (🌚) new moon face
|
||||
| 0xF0 0x9F 0x8C 0x9B #E0.6 [1] (🌛) first quarter moon face
|
||||
| 0xF0 0x9F 0x8C 0x9C #E0.7 [1] (🌜) last quarter moon face
|
||||
| 0xF0 0x9F 0x8C 0x9D..0x9E #E1.0 [2] (🌝..🌞) full moon face..sun...
|
||||
| 0xF0 0x9F 0x8C 0x9F..0xA0 #E0.6 [2] (🌟..🌠) glowing star..shoot...
|
||||
| 0xF0 0x9F 0x8C 0xA1 #E0.7 [1] (🌡️) thermometer
|
||||
| 0xF0 0x9F 0x8C 0xA2..0xA3 #E0.0 [2] (🌢..🌣) BLACK DROPLET..WHIT...
|
||||
| 0xF0 0x9F 0x8C 0xA4..0xAC #E0.7 [9] (🌤️..🌬️) sun behind small ...
|
||||
| 0xF0 0x9F 0x8C 0xAD..0xAF #E1.0 [3] (🌭..🌯) hot dog..burrito
|
||||
| 0xF0 0x9F 0x8C 0xB0..0xB1 #E0.6 [2] (🌰..🌱) chestnut..seedling
|
||||
| 0xF0 0x9F 0x8C 0xB2..0xB3 #E1.0 [2] (🌲..🌳) evergreen tree..dec...
|
||||
| 0xF0 0x9F 0x8C 0xB4..0xB5 #E0.6 [2] (🌴..🌵) palm tree..cactus
|
||||
| 0xF0 0x9F 0x8C 0xB6 #E0.7 [1] (🌶️) hot pepper
|
||||
| 0xF0 0x9F 0x8C 0xB7..0xFF #E0.6 [20] (🌷..🍊) tulip..tangerine
|
||||
| 0xF0 0x9F 0x8D 0x00..0x8A #
|
||||
| 0xF0 0x9F 0x8D 0x8B #E1.0 [1] (🍋) lemon
|
||||
| 0xF0 0x9F 0x8D 0x8C..0x8F #E0.6 [4] (🍌..🍏) banana..green apple
|
||||
| 0xF0 0x9F 0x8D 0x90 #E1.0 [1] (🍐) pear
|
||||
| 0xF0 0x9F 0x8D 0x91..0xBB #E0.6 [43] (🍑..🍻) peach..clinking bee...
|
||||
| 0xF0 0x9F 0x8D 0xBC #E1.0 [1] (🍼) baby bottle
|
||||
| 0xF0 0x9F 0x8D 0xBD #E0.7 [1] (🍽️) fork and knife with p...
|
||||
| 0xF0 0x9F 0x8D 0xBE..0xBF #E1.0 [2] (🍾..🍿) bottle with popping...
|
||||
| 0xF0 0x9F 0x8E 0x80..0x93 #E0.6 [20] (🎀..🎓) ribbon..graduation cap
|
||||
| 0xF0 0x9F 0x8E 0x94..0x95 #E0.0 [2] (🎔..🎕) HEART WITH TIP ON T...
|
||||
| 0xF0 0x9F 0x8E 0x96..0x97 #E0.7 [2] (🎖️..🎗️) military medal..r...
|
||||
| 0xF0 0x9F 0x8E 0x98 #E0.0 [1] (🎘) MUSICAL KEYBOARD WITH ...
|
||||
| 0xF0 0x9F 0x8E 0x99..0x9B #E0.7 [3] (🎙️..🎛️) studio microphone...
|
||||
| 0xF0 0x9F 0x8E 0x9C..0x9D #E0.0 [2] (🎜..🎝) BEAMED ASCENDING MU...
|
||||
| 0xF0 0x9F 0x8E 0x9E..0x9F #E0.7 [2] (🎞️..🎟️) film frames..admi...
|
||||
| 0xF0 0x9F 0x8E 0xA0..0xFF #E0.6 [37] (🎠..🏄) carousel horse..per...
|
||||
| 0xF0 0x9F 0x8F 0x00..0x84 #
|
||||
| 0xF0 0x9F 0x8F 0x85 #E1.0 [1] (🏅) sports medal
|
||||
| 0xF0 0x9F 0x8F 0x86 #E0.6 [1] (🏆) trophy
|
||||
| 0xF0 0x9F 0x8F 0x87 #E1.0 [1] (🏇) horse racing
|
||||
| 0xF0 0x9F 0x8F 0x88 #E0.6 [1] (🏈) american football
|
||||
| 0xF0 0x9F 0x8F 0x89 #E1.0 [1] (🏉) rugby football
|
||||
| 0xF0 0x9F 0x8F 0x8A #E0.6 [1] (🏊) person swimming
|
||||
| 0xF0 0x9F 0x8F 0x8B..0x8E #E0.7 [4] (🏋️..🏎️) person lifting we...
|
||||
| 0xF0 0x9F 0x8F 0x8F..0x93 #E1.0 [5] (🏏..🏓) cricket game..ping ...
|
||||
| 0xF0 0x9F 0x8F 0x94..0x9F #E0.7 [12] (🏔️..🏟️) snow-capped mount...
|
||||
| 0xF0 0x9F 0x8F 0xA0..0xA3 #E0.6 [4] (🏠..🏣) house..Japanese pos...
|
||||
| 0xF0 0x9F 0x8F 0xA4 #E1.0 [1] (🏤) post office
|
||||
| 0xF0 0x9F 0x8F 0xA5..0xB0 #E0.6 [12] (🏥..🏰) hospital..castle
|
||||
| 0xF0 0x9F 0x8F 0xB1..0xB2 #E0.0 [2] (🏱..🏲) WHITE PENNANT..BLAC...
|
||||
| 0xF0 0x9F 0x8F 0xB3 #E0.7 [1] (🏳️) white flag
|
||||
| 0xF0 0x9F 0x8F 0xB4 #E1.0 [1] (🏴) black flag
|
||||
| 0xF0 0x9F 0x8F 0xB5 #E0.7 [1] (🏵️) rosette
|
||||
| 0xF0 0x9F 0x8F 0xB6 #E0.0 [1] (🏶) BLACK ROSETTE
|
||||
| 0xF0 0x9F 0x8F 0xB7 #E0.7 [1] (🏷️) label
|
||||
| 0xF0 0x9F 0x8F 0xB8..0xBA #E1.0 [3] (🏸..🏺) badminton..amphora
|
||||
| 0xF0 0x9F 0x90 0x80..0x87 #E1.0 [8] (🐀..🐇) rat..rabbit
|
||||
| 0xF0 0x9F 0x90 0x88 #E0.7 [1] (🐈) cat
|
||||
| 0xF0 0x9F 0x90 0x89..0x8B #E1.0 [3] (🐉..🐋) dragon..whale
|
||||
| 0xF0 0x9F 0x90 0x8C..0x8E #E0.6 [3] (🐌..🐎) snail..horse
|
||||
| 0xF0 0x9F 0x90 0x8F..0x90 #E1.0 [2] (🐏..🐐) ram..goat
|
||||
| 0xF0 0x9F 0x90 0x91..0x92 #E0.6 [2] (🐑..🐒) ewe..monkey
|
||||
| 0xF0 0x9F 0x90 0x93 #E1.0 [1] (🐓) rooster
|
||||
| 0xF0 0x9F 0x90 0x94 #E0.6 [1] (🐔) chicken
|
||||
| 0xF0 0x9F 0x90 0x95 #E0.7 [1] (🐕) dog
|
||||
| 0xF0 0x9F 0x90 0x96 #E1.0 [1] (🐖) pig
|
||||
| 0xF0 0x9F 0x90 0x97..0xA9 #E0.6 [19] (🐗..🐩) boar..poodle
|
||||
| 0xF0 0x9F 0x90 0xAA #E1.0 [1] (🐪) camel
|
||||
| 0xF0 0x9F 0x90 0xAB..0xBE #E0.6 [20] (🐫..🐾) two-hump camel..paw...
|
||||
| 0xF0 0x9F 0x90 0xBF #E0.7 [1] (🐿️) chipmunk
|
||||
| 0xF0 0x9F 0x91 0x80 #E0.6 [1] (👀) eyes
|
||||
| 0xF0 0x9F 0x91 0x81 #E0.7 [1] (👁️) eye
|
||||
| 0xF0 0x9F 0x91 0x82..0xA4 #E0.6 [35] (👂..👤) ear..bust in silhou...
|
||||
| 0xF0 0x9F 0x91 0xA5 #E1.0 [1] (👥) busts in silhouette
|
||||
| 0xF0 0x9F 0x91 0xA6..0xAB #E0.6 [6] (👦..👫) boy..woman and man ...
|
||||
| 0xF0 0x9F 0x91 0xAC..0xAD #E1.0 [2] (👬..👭) men holding hands.....
|
||||
| 0xF0 0x9F 0x91 0xAE..0xFF #E0.6 [63] (👮..💬) police officer..spe...
|
||||
| 0xF0 0x9F 0x92 0x00..0xAC #
|
||||
| 0xF0 0x9F 0x92 0xAD #E1.0 [1] (💭) thought balloon
|
||||
| 0xF0 0x9F 0x92 0xAE..0xB5 #E0.6 [8] (💮..💵) white flower..dolla...
|
||||
| 0xF0 0x9F 0x92 0xB6..0xB7 #E1.0 [2] (💶..💷) euro banknote..poun...
|
||||
| 0xF0 0x9F 0x92 0xB8..0xFF #E0.6 [52] (💸..📫) money with wings..c...
|
||||
| 0xF0 0x9F 0x93 0x00..0xAB #
|
||||
| 0xF0 0x9F 0x93 0xAC..0xAD #E0.7 [2] (📬..📭) open mailbox with r...
|
||||
| 0xF0 0x9F 0x93 0xAE #E0.6 [1] (📮) postbox
|
||||
| 0xF0 0x9F 0x93 0xAF #E1.0 [1] (📯) postal horn
|
||||
| 0xF0 0x9F 0x93 0xB0..0xB4 #E0.6 [5] (📰..📴) newspaper..mobile p...
|
||||
| 0xF0 0x9F 0x93 0xB5 #E1.0 [1] (📵) no mobile phones
|
||||
| 0xF0 0x9F 0x93 0xB6..0xB7 #E0.6 [2] (📶..📷) antenna bars..camera
|
||||
| 0xF0 0x9F 0x93 0xB8 #E1.0 [1] (📸) camera with flash
|
||||
| 0xF0 0x9F 0x93 0xB9..0xBC #E0.6 [4] (📹..📼) video camera..video...
|
||||
| 0xF0 0x9F 0x93 0xBD #E0.7 [1] (📽️) film projector
|
||||
| 0xF0 0x9F 0x93 0xBE #E0.0 [1] (📾) PORTABLE STEREO
|
||||
| 0xF0 0x9F 0x93 0xBF..0xFF #E1.0 [4] (📿..🔂) prayer beads..repea...
|
||||
| 0xF0 0x9F 0x94 0x00..0x82 #
|
||||
| 0xF0 0x9F 0x94 0x83 #E0.6 [1] (🔃) clockwise vertical arrows
|
||||
| 0xF0 0x9F 0x94 0x84..0x87 #E1.0 [4] (🔄..🔇) counterclockwise ar...
|
||||
| 0xF0 0x9F 0x94 0x88 #E0.7 [1] (🔈) speaker low volume
|
||||
| 0xF0 0x9F 0x94 0x89 #E1.0 [1] (🔉) speaker medium volume
|
||||
| 0xF0 0x9F 0x94 0x8A..0x94 #E0.6 [11] (🔊..🔔) speaker high volume...
|
||||
| 0xF0 0x9F 0x94 0x95 #E1.0 [1] (🔕) bell with slash
|
||||
| 0xF0 0x9F 0x94 0x96..0xAB #E0.6 [22] (🔖..🔫) bookmark..pistol
|
||||
| 0xF0 0x9F 0x94 0xAC..0xAD #E1.0 [2] (🔬..🔭) microscope..telescope
|
||||
| 0xF0 0x9F 0x94 0xAE..0xBD #E0.6 [16] (🔮..🔽) crystal ball..downw...
|
||||
| 0xF0 0x9F 0x95 0x86..0x88 #E0.0 [3] (🕆..🕈) WHITE LATIN CROSS.....
|
||||
| 0xF0 0x9F 0x95 0x89..0x8A #E0.7 [2] (🕉️..🕊️) om..dove
|
||||
| 0xF0 0x9F 0x95 0x8B..0x8E #E1.0 [4] (🕋..🕎) kaaba..menorah
|
||||
| 0xF0 0x9F 0x95 0x8F #E0.0 [1] (🕏) BOWL OF HYGIEIA
|
||||
| 0xF0 0x9F 0x95 0x90..0x9B #E0.6 [12] (🕐..🕛) one o’clock..twelve...
|
||||
| 0xF0 0x9F 0x95 0x9C..0xA7 #E0.7 [12] (🕜..🕧) one-thirty..twelve-...
|
||||
| 0xF0 0x9F 0x95 0xA8..0xAE #E0.0 [7] (🕨..🕮) RIGHT SPEAKER..BOOK
|
||||
| 0xF0 0x9F 0x95 0xAF..0xB0 #E0.7 [2] (🕯️..🕰️) candle..mantelpie...
|
||||
| 0xF0 0x9F 0x95 0xB1..0xB2 #E0.0 [2] (🕱..🕲) BLACK SKULL AND CRO...
|
||||
| 0xF0 0x9F 0x95 0xB3..0xB9 #E0.7 [7] (🕳️..🕹️) hole..joystick
|
||||
| 0xF0 0x9F 0x95 0xBA #E3.0 [1] (🕺) man dancing
|
||||
| 0xF0 0x9F 0x95 0xBB..0xFF #E0.0 [12] (🕻..🖆) LEFT HAND TELEPHONE...
|
||||
| 0xF0 0x9F 0x96 0x00..0x86 #
|
||||
| 0xF0 0x9F 0x96 0x87 #E0.7 [1] (🖇️) linked paperclips
|
||||
| 0xF0 0x9F 0x96 0x88..0x89 #E0.0 [2] (🖈..🖉) BLACK PUSHPIN..LOWE...
|
||||
| 0xF0 0x9F 0x96 0x8A..0x8D #E0.7 [4] (🖊️..🖍️) pen..crayon
|
||||
| 0xF0 0x9F 0x96 0x8E..0x8F #E0.0 [2] (🖎..🖏) LEFT WRITING HAND.....
|
||||
| 0xF0 0x9F 0x96 0x90 #E0.7 [1] (🖐️) hand with fingers spl...
|
||||
| 0xF0 0x9F 0x96 0x91..0x94 #E0.0 [4] (🖑..🖔) REVERSED RAISED HAN...
|
||||
| 0xF0 0x9F 0x96 0x95..0x96 #E1.0 [2] (🖕..🖖) middle finger..vulc...
|
||||
| 0xF0 0x9F 0x96 0x97..0xA3 #E0.0 [13] (🖗..🖣) WHITE DOWN POINTING...
|
||||
| 0xF0 0x9F 0x96 0xA4 #E3.0 [1] (🖤) black heart
|
||||
| 0xF0 0x9F 0x96 0xA5 #E0.7 [1] (🖥️) desktop computer
|
||||
| 0xF0 0x9F 0x96 0xA6..0xA7 #E0.0 [2] (🖦..🖧) KEYBOARD AND MOUSE....
|
||||
| 0xF0 0x9F 0x96 0xA8 #E0.7 [1] (🖨️) printer
|
||||
| 0xF0 0x9F 0x96 0xA9..0xB0 #E0.0 [8] (🖩..🖰) POCKET CALCULATOR.....
|
||||
| 0xF0 0x9F 0x96 0xB1..0xB2 #E0.7 [2] (🖱️..🖲️) computer mouse..t...
|
||||
| 0xF0 0x9F 0x96 0xB3..0xBB #E0.0 [9] (🖳..🖻) OLD PERSONAL COMPUT...
|
||||
| 0xF0 0x9F 0x96 0xBC #E0.7 [1] (🖼️) framed picture
|
||||
| 0xF0 0x9F 0x96 0xBD..0xFF #E0.0 [5] (🖽..🗁) FRAME WITH TILES..O...
|
||||
| 0xF0 0x9F 0x97 0x00..0x81 #
|
||||
| 0xF0 0x9F 0x97 0x82..0x84 #E0.7 [3] (🗂️..🗄️) card index divide...
|
||||
| 0xF0 0x9F 0x97 0x85..0x90 #E0.0 [12] (🗅..🗐) EMPTY NOTE..PAGES
|
||||
| 0xF0 0x9F 0x97 0x91..0x93 #E0.7 [3] (🗑️..🗓️) wastebasket..spir...
|
||||
| 0xF0 0x9F 0x97 0x94..0x9B #E0.0 [8] (🗔..🗛) DESKTOP WINDOW..DEC...
|
||||
| 0xF0 0x9F 0x97 0x9C..0x9E #E0.7 [3] (🗜️..🗞️) clamp..rolled-up ...
|
||||
| 0xF0 0x9F 0x97 0x9F..0xA0 #E0.0 [2] (🗟..🗠) PAGE WITH CIRCLED T...
|
||||
| 0xF0 0x9F 0x97 0xA1 #E0.7 [1] (🗡️) dagger
|
||||
| 0xF0 0x9F 0x97 0xA2 #E0.0 [1] (🗢) LIPS
|
||||
| 0xF0 0x9F 0x97 0xA3 #E0.7 [1] (🗣️) speaking head
|
||||
| 0xF0 0x9F 0x97 0xA4..0xA7 #E0.0 [4] (🗤..🗧) THREE RAYS ABOVE..T...
|
||||
| 0xF0 0x9F 0x97 0xA8 #E2.0 [1] (🗨️) left speech bubble
|
||||
| 0xF0 0x9F 0x97 0xA9..0xAE #E0.0 [6] (🗩..🗮) RIGHT SPEECH BUBBLE...
|
||||
| 0xF0 0x9F 0x97 0xAF #E0.7 [1] (🗯️) right anger bubble
|
||||
| 0xF0 0x9F 0x97 0xB0..0xB2 #E0.0 [3] (🗰..🗲) MOOD BUBBLE..LIGHTN...
|
||||
| 0xF0 0x9F 0x97 0xB3 #E0.7 [1] (🗳️) ballot box with ballot
|
||||
| 0xF0 0x9F 0x97 0xB4..0xB9 #E0.0 [6] (🗴..🗹) BALLOT SCRIPT X..BA...
|
||||
| 0xF0 0x9F 0x97 0xBA #E0.7 [1] (🗺️) world map
|
||||
| 0xF0 0x9F 0x97 0xBB..0xBF #E0.6 [5] (🗻..🗿) mount fuji..moai
|
||||
| 0xF0 0x9F 0x98 0x80 #E1.0 [1] (😀) grinning face
|
||||
| 0xF0 0x9F 0x98 0x81..0x86 #E0.6 [6] (😁..😆) beaming face with s...
|
||||
| 0xF0 0x9F 0x98 0x87..0x88 #E1.0 [2] (😇..😈) smiling face with h...
|
||||
| 0xF0 0x9F 0x98 0x89..0x8D #E0.6 [5] (😉..😍) winking face..smili...
|
||||
| 0xF0 0x9F 0x98 0x8E #E1.0 [1] (😎) smiling face with sung...
|
||||
| 0xF0 0x9F 0x98 0x8F #E0.6 [1] (😏) smirking face
|
||||
| 0xF0 0x9F 0x98 0x90 #E0.7 [1] (😐) neutral face
|
||||
| 0xF0 0x9F 0x98 0x91 #E1.0 [1] (😑) expressionless face
|
||||
| 0xF0 0x9F 0x98 0x92..0x94 #E0.6 [3] (😒..😔) unamused face..pens...
|
||||
| 0xF0 0x9F 0x98 0x95 #E1.0 [1] (😕) confused face
|
||||
| 0xF0 0x9F 0x98 0x96 #E0.6 [1] (😖) confounded face
|
||||
| 0xF0 0x9F 0x98 0x97 #E1.0 [1] (😗) kissing face
|
||||
| 0xF0 0x9F 0x98 0x98 #E0.6 [1] (😘) face blowing a kiss
|
||||
| 0xF0 0x9F 0x98 0x99 #E1.0 [1] (😙) kissing face with smil...
|
||||
| 0xF0 0x9F 0x98 0x9A #E0.6 [1] (😚) kissing face with clos...
|
||||
| 0xF0 0x9F 0x98 0x9B #E1.0 [1] (😛) face with tongue
|
||||
| 0xF0 0x9F 0x98 0x9C..0x9E #E0.6 [3] (😜..😞) winking face with t...
|
||||
| 0xF0 0x9F 0x98 0x9F #E1.0 [1] (😟) worried face
|
||||
| 0xF0 0x9F 0x98 0xA0..0xA5 #E0.6 [6] (😠..😥) angry face..sad but...
|
||||
| 0xF0 0x9F 0x98 0xA6..0xA7 #E1.0 [2] (😦..😧) frowning face with ...
|
||||
| 0xF0 0x9F 0x98 0xA8..0xAB #E0.6 [4] (😨..😫) fearful face..tired...
|
||||
| 0xF0 0x9F 0x98 0xAC #E1.0 [1] (😬) grimacing face
|
||||
| 0xF0 0x9F 0x98 0xAD #E0.6 [1] (😭) loudly crying face
|
||||
| 0xF0 0x9F 0x98 0xAE..0xAF #E1.0 [2] (😮..😯) face with open mout...
|
||||
| 0xF0 0x9F 0x98 0xB0..0xB3 #E0.6 [4] (😰..😳) anxious face with s...
|
||||
| 0xF0 0x9F 0x98 0xB4 #E1.0 [1] (😴) sleeping face
|
||||
| 0xF0 0x9F 0x98 0xB5 #E0.6 [1] (😵) dizzy face
|
||||
| 0xF0 0x9F 0x98 0xB6 #E1.0 [1] (😶) face without mouth
|
||||
| 0xF0 0x9F 0x98 0xB7..0xFF #E0.6 [10] (😷..🙀) face with medical m...
|
||||
| 0xF0 0x9F 0x99 0x00..0x80 #
|
||||
| 0xF0 0x9F 0x99 0x81..0x84 #E1.0 [4] (🙁..🙄) slightly frowning f...
|
||||
| 0xF0 0x9F 0x99 0x85..0x8F #E0.6 [11] (🙅..🙏) person gesturing NO...
|
||||
| 0xF0 0x9F 0x9A 0x80 #E0.6 [1] (🚀) rocket
|
||||
| 0xF0 0x9F 0x9A 0x81..0x82 #E1.0 [2] (🚁..🚂) helicopter..locomotive
|
||||
| 0xF0 0x9F 0x9A 0x83..0x85 #E0.6 [3] (🚃..🚅) railway car..bullet...
|
||||
| 0xF0 0x9F 0x9A 0x86 #E1.0 [1] (🚆) train
|
||||
| 0xF0 0x9F 0x9A 0x87 #E0.6 [1] (🚇) metro
|
||||
| 0xF0 0x9F 0x9A 0x88 #E1.0 [1] (🚈) light rail
|
||||
| 0xF0 0x9F 0x9A 0x89 #E0.6 [1] (🚉) station
|
||||
| 0xF0 0x9F 0x9A 0x8A..0x8B #E1.0 [2] (🚊..🚋) tram..tram car
|
||||
| 0xF0 0x9F 0x9A 0x8C #E0.6 [1] (🚌) bus
|
||||
| 0xF0 0x9F 0x9A 0x8D #E0.7 [1] (🚍) oncoming bus
|
||||
| 0xF0 0x9F 0x9A 0x8E #E1.0 [1] (🚎) trolleybus
|
||||
| 0xF0 0x9F 0x9A 0x8F #E0.6 [1] (🚏) bus stop
|
||||
| 0xF0 0x9F 0x9A 0x90 #E1.0 [1] (🚐) minibus
|
||||
| 0xF0 0x9F 0x9A 0x91..0x93 #E0.6 [3] (🚑..🚓) ambulance..police car
|
||||
| 0xF0 0x9F 0x9A 0x94 #E0.7 [1] (🚔) oncoming police car
|
||||
| 0xF0 0x9F 0x9A 0x95 #E0.6 [1] (🚕) taxi
|
||||
| 0xF0 0x9F 0x9A 0x96 #E1.0 [1] (🚖) oncoming taxi
|
||||
| 0xF0 0x9F 0x9A 0x97 #E0.6 [1] (🚗) automobile
|
||||
| 0xF0 0x9F 0x9A 0x98 #E0.7 [1] (🚘) oncoming automobile
|
||||
| 0xF0 0x9F 0x9A 0x99..0x9A #E0.6 [2] (🚙..🚚) sport utility vehic...
|
||||
| 0xF0 0x9F 0x9A 0x9B..0xA1 #E1.0 [7] (🚛..🚡) articulated lorry.....
|
||||
| 0xF0 0x9F 0x9A 0xA2 #E0.6 [1] (🚢) ship
|
||||
| 0xF0 0x9F 0x9A 0xA3 #E1.0 [1] (🚣) person rowing boat
|
||||
| 0xF0 0x9F 0x9A 0xA4..0xA5 #E0.6 [2] (🚤..🚥) speedboat..horizont...
|
||||
| 0xF0 0x9F 0x9A 0xA6 #E1.0 [1] (🚦) vertical traffic light
|
||||
| 0xF0 0x9F 0x9A 0xA7..0xAD #E0.6 [7] (🚧..🚭) construction..no sm...
|
||||
| 0xF0 0x9F 0x9A 0xAE..0xB1 #E1.0 [4] (🚮..🚱) litter in bin sign....
|
||||
| 0xF0 0x9F 0x9A 0xB2 #E0.6 [1] (🚲) bicycle
|
||||
| 0xF0 0x9F 0x9A 0xB3..0xB5 #E1.0 [3] (🚳..🚵) no bicycles..person...
|
||||
| 0xF0 0x9F 0x9A 0xB6 #E0.6 [1] (🚶) person walking
|
||||
| 0xF0 0x9F 0x9A 0xB7..0xB8 #E1.0 [2] (🚷..🚸) no pedestrians..chi...
|
||||
| 0xF0 0x9F 0x9A 0xB9..0xBE #E0.6 [6] (🚹..🚾) men’s room..water c...
|
||||
| 0xF0 0x9F 0x9A 0xBF #E1.0 [1] (🚿) shower
|
||||
| 0xF0 0x9F 0x9B 0x80 #E0.6 [1] (🛀) person taking bath
|
||||
| 0xF0 0x9F 0x9B 0x81..0x85 #E1.0 [5] (🛁..🛅) bathtub..left luggage
|
||||
| 0xF0 0x9F 0x9B 0x86..0x8A #E0.0 [5] (🛆..🛊) TRIANGLE WITH ROUND...
|
||||
| 0xF0 0x9F 0x9B 0x8B #E0.7 [1] (🛋️) couch and lamp
|
||||
| 0xF0 0x9F 0x9B 0x8C #E1.0 [1] (🛌) person in bed
|
||||
| 0xF0 0x9F 0x9B 0x8D..0x8F #E0.7 [3] (🛍️..🛏️) shopping bags..bed
|
||||
| 0xF0 0x9F 0x9B 0x90 #E1.0 [1] (🛐) place of worship
|
||||
| 0xF0 0x9F 0x9B 0x91..0x92 #E3.0 [2] (🛑..🛒) stop sign..shopping...
|
||||
| 0xF0 0x9F 0x9B 0x93..0x94 #E0.0 [2] (🛓..🛔) STUPA..PAGODA
|
||||
| 0xF0 0x9F 0x9B 0x95 #E12.0 [1] (🛕) hindu temple
|
||||
| 0xF0 0x9F 0x9B 0x96..0x97 #E13.0 [2] (🛖..🛗) hut..elevator
|
||||
| 0xF0 0x9F 0x9B 0x98..0x9F #E0.0 [8] (..🛟) <reserved-1F6D8>..<...
|
||||
| 0xF0 0x9F 0x9B 0xA0..0xA5 #E0.7 [6] (🛠️..🛥️) hammer and wrench...
|
||||
| 0xF0 0x9F 0x9B 0xA6..0xA8 #E0.0 [3] (🛦..🛨) UP-POINTING MILITAR...
|
||||
| 0xF0 0x9F 0x9B 0xA9 #E0.7 [1] (🛩️) small airplane
|
||||
| 0xF0 0x9F 0x9B 0xAA #E0.0 [1] (🛪) NORTHEAST-POINTING AIR...
|
||||
| 0xF0 0x9F 0x9B 0xAB..0xAC #E1.0 [2] (🛫..🛬) airplane departure....
|
||||
| 0xF0 0x9F 0x9B 0xAD..0xAF #E0.0 [3] (..) <reserved-1F6ED>..<...
|
||||
| 0xF0 0x9F 0x9B 0xB0 #E0.7 [1] (🛰️) satellite
|
||||
| 0xF0 0x9F 0x9B 0xB1..0xB2 #E0.0 [2] (🛱..🛲) ONCOMING FIRE ENGIN...
|
||||
| 0xF0 0x9F 0x9B 0xB3 #E0.7 [1] (🛳️) passenger ship
|
||||
| 0xF0 0x9F 0x9B 0xB4..0xB6 #E3.0 [3] (🛴..🛶) kick scooter..canoe
|
||||
| 0xF0 0x9F 0x9B 0xB7..0xB8 #E5.0 [2] (🛷..🛸) sled..flying saucer
|
||||
| 0xF0 0x9F 0x9B 0xB9 #E11.0 [1] (🛹) skateboard
|
||||
| 0xF0 0x9F 0x9B 0xBA #E12.0 [1] (🛺) auto rickshaw
|
||||
| 0xF0 0x9F 0x9B 0xBB..0xBC #E13.0 [2] (🛻..🛼) pickup truck..rolle...
|
||||
| 0xF0 0x9F 0x9B 0xBD..0xBF #E0.0 [3] (..) <reserved-1F6FD>..<...
|
||||
| 0xF0 0x9F 0x9D 0xB4..0xBF #E0.0 [12] (🝴..🝿) <reserved-1F774>..<...
|
||||
| 0xF0 0x9F 0x9F 0x95..0x9F #E0.0 [11] (🟕..) CIRCLED TRIANGLE..<...
|
||||
| 0xF0 0x9F 0x9F 0xA0..0xAB #E12.0 [12] (🟠..🟫) orange circle..brow...
|
||||
| 0xF0 0x9F 0x9F 0xAC..0xBF #E0.0 [20] (..) <reserved-1F7EC>..<...
|
||||
| 0xF0 0x9F 0xA0 0x8C..0x8F #E0.0 [4] (..) <reserved-1F80C>..<...
|
||||
| 0xF0 0x9F 0xA1 0x88..0x8F #E0.0 [8] (..) <reserved-1F848>..<...
|
||||
| 0xF0 0x9F 0xA1 0x9A..0x9F #E0.0 [6] (..) <reserved-1F85A>..<...
|
||||
| 0xF0 0x9F 0xA2 0x88..0x8F #E0.0 [8] (..) <reserved-1F888>..<...
|
||||
| 0xF0 0x9F 0xA2 0xAE..0xFF #E0.0 [82] (..) <reserved-1F8AE>..<...
|
||||
| 0xF0 0x9F 0xA3 0x00..0xBF #
|
||||
| 0xF0 0x9F 0xA4 0x8C #E13.0 [1] (🤌) pinched fingers
|
||||
| 0xF0 0x9F 0xA4 0x8D..0x8F #E12.0 [3] (🤍..🤏) white heart..pinchi...
|
||||
| 0xF0 0x9F 0xA4 0x90..0x98 #E1.0 [9] (🤐..🤘) zipper-mouth face.....
|
||||
| 0xF0 0x9F 0xA4 0x99..0x9E #E3.0 [6] (🤙..🤞) call me hand..cross...
|
||||
| 0xF0 0x9F 0xA4 0x9F #E5.0 [1] (🤟) love-you gesture
|
||||
| 0xF0 0x9F 0xA4 0xA0..0xA7 #E3.0 [8] (🤠..🤧) cowboy hat face..sn...
|
||||
| 0xF0 0x9F 0xA4 0xA8..0xAF #E5.0 [8] (🤨..🤯) face with raised ey...
|
||||
| 0xF0 0x9F 0xA4 0xB0 #E3.0 [1] (🤰) pregnant woman
|
||||
| 0xF0 0x9F 0xA4 0xB1..0xB2 #E5.0 [2] (🤱..🤲) breast-feeding..pal...
|
||||
| 0xF0 0x9F 0xA4 0xB3..0xBA #E3.0 [8] (🤳..🤺) selfie..person fencing
|
||||
| 0xF0 0x9F 0xA4 0xBC..0xBE #E3.0 [3] (🤼..🤾) people wrestling..p...
|
||||
| 0xF0 0x9F 0xA4 0xBF #E12.0 [1] (🤿) diving mask
|
||||
| 0xF0 0x9F 0xA5 0x80..0x85 #E3.0 [6] (🥀..🥅) wilted flower..goal...
|
||||
| 0xF0 0x9F 0xA5 0x87..0x8B #E3.0 [5] (🥇..🥋) 1st place medal..ma...
|
||||
| 0xF0 0x9F 0xA5 0x8C #E5.0 [1] (🥌) curling stone
|
||||
| 0xF0 0x9F 0xA5 0x8D..0x8F #E11.0 [3] (🥍..🥏) lacrosse..flying disc
|
||||
| 0xF0 0x9F 0xA5 0x90..0x9E #E3.0 [15] (🥐..🥞) croissant..pancakes
|
||||
| 0xF0 0x9F 0xA5 0x9F..0xAB #E5.0 [13] (🥟..🥫) dumpling..canned food
|
||||
| 0xF0 0x9F 0xA5 0xAC..0xB0 #E11.0 [5] (🥬..🥰) leafy green..smilin...
|
||||
| 0xF0 0x9F 0xA5 0xB1 #E12.0 [1] (🥱) yawning face
|
||||
| 0xF0 0x9F 0xA5 0xB2 #E13.0 [1] (🥲) smiling face with tear
|
||||
| 0xF0 0x9F 0xA5 0xB3..0xB6 #E11.0 [4] (🥳..🥶) partying face..cold...
|
||||
| 0xF0 0x9F 0xA5 0xB7..0xB8 #E13.0 [2] (🥷..🥸) ninja..disguised face
|
||||
| 0xF0 0x9F 0xA5 0xB9 #E0.0 [1] (🥹) <reserved-1F979>
|
||||
| 0xF0 0x9F 0xA5 0xBA #E11.0 [1] (🥺) pleading face
|
||||
| 0xF0 0x9F 0xA5 0xBB #E12.0 [1] (🥻) sari
|
||||
| 0xF0 0x9F 0xA5 0xBC..0xBF #E11.0 [4] (🥼..🥿) lab coat..flat shoe
|
||||
| 0xF0 0x9F 0xA6 0x80..0x84 #E1.0 [5] (🦀..🦄) crab..unicorn
|
||||
| 0xF0 0x9F 0xA6 0x85..0x91 #E3.0 [13] (🦅..🦑) eagle..squid
|
||||
| 0xF0 0x9F 0xA6 0x92..0x97 #E5.0 [6] (🦒..🦗) giraffe..cricket
|
||||
| 0xF0 0x9F 0xA6 0x98..0xA2 #E11.0 [11] (🦘..🦢) kangaroo..swan
|
||||
| 0xF0 0x9F 0xA6 0xA3..0xA4 #E13.0 [2] (🦣..🦤) mammoth..dodo
|
||||
| 0xF0 0x9F 0xA6 0xA5..0xAA #E12.0 [6] (🦥..🦪) sloth..oyster
|
||||
| 0xF0 0x9F 0xA6 0xAB..0xAD #E13.0 [3] (🦫..🦭) beaver..seal
|
||||
| 0xF0 0x9F 0xA6 0xAE..0xAF #E12.0 [2] (🦮..🦯) guide dog..white cane
|
||||
| 0xF0 0x9F 0xA6 0xB0..0xB9 #E11.0 [10] (🦰..🦹) red hair..supervillain
|
||||
| 0xF0 0x9F 0xA6 0xBA..0xBF #E12.0 [6] (🦺..🦿) safety vest..mechan...
|
||||
| 0xF0 0x9F 0xA7 0x80 #E1.0 [1] (🧀) cheese wedge
|
||||
| 0xF0 0x9F 0xA7 0x81..0x82 #E11.0 [2] (🧁..🧂) cupcake..salt
|
||||
| 0xF0 0x9F 0xA7 0x83..0x8A #E12.0 [8] (🧃..🧊) beverage box..ice
|
||||
| 0xF0 0x9F 0xA7 0x8B #E13.0 [1] (🧋) bubble tea
|
||||
| 0xF0 0x9F 0xA7 0x8C #E0.0 [1] (🧌) <reserved-1F9CC>
|
||||
| 0xF0 0x9F 0xA7 0x8D..0x8F #E12.0 [3] (🧍..🧏) person standing..de...
|
||||
| 0xF0 0x9F 0xA7 0x90..0xA6 #E5.0 [23] (🧐..🧦) face with monocle.....
|
||||
| 0xF0 0x9F 0xA7 0xA7..0xBF #E11.0 [25] (🧧..🧿) red envelope..nazar...
|
||||
| 0xF0 0x9F 0xA8 0x80..0xFF #E0.0 [112] (🨀..) NEUTRAL CHESS KING....
|
||||
| 0xF0 0x9F 0xA9 0x00..0xAF #
|
||||
| 0xF0 0x9F 0xA9 0xB0..0xB3 #E12.0 [4] (🩰..🩳) ballet shoes..shorts
|
||||
| 0xF0 0x9F 0xA9 0xB4 #E13.0 [1] (🩴) thong sandal
|
||||
| 0xF0 0x9F 0xA9 0xB5..0xB7 #E0.0 [3] (🩵..🩷) <reserved-1FA75>..<...
|
||||
| 0xF0 0x9F 0xA9 0xB8..0xBA #E12.0 [3] (🩸..🩺) drop of blood..stet...
|
||||
| 0xF0 0x9F 0xA9 0xBB..0xBF #E0.0 [5] (🩻..) <reserved-1FA7B>..<...
|
||||
| 0xF0 0x9F 0xAA 0x80..0x82 #E12.0 [3] (🪀..🪂) yo-yo..parachute
|
||||
| 0xF0 0x9F 0xAA 0x83..0x86 #E13.0 [4] (🪃..🪆) boomerang..nesting ...
|
||||
| 0xF0 0x9F 0xAA 0x87..0x8F #E0.0 [9] (🪇..) <reserved-1FA87>..<...
|
||||
| 0xF0 0x9F 0xAA 0x90..0x95 #E12.0 [6] (🪐..🪕) ringed planet..banjo
|
||||
| 0xF0 0x9F 0xAA 0x96..0xA8 #E13.0 [19] (🪖..🪨) military helmet..rock
|
||||
| 0xF0 0x9F 0xAA 0xA9..0xAF #E0.0 [7] (🪩..🪯) <reserved-1FAA9>..<...
|
||||
| 0xF0 0x9F 0xAA 0xB0..0xB6 #E13.0 [7] (🪰..🪶) fly..feather
|
||||
| 0xF0 0x9F 0xAA 0xB7..0xBF #E0.0 [9] (🪷..🪿) <reserved-1FAB7>..<...
|
||||
| 0xF0 0x9F 0xAB 0x80..0x82 #E13.0 [3] (🫀..🫂) anatomical heart..p...
|
||||
| 0xF0 0x9F 0xAB 0x83..0x8F #E0.0 [13] (🫃..🫏) <reserved-1FAC3>..<...
|
||||
| 0xF0 0x9F 0xAB 0x90..0x96 #E13.0 [7] (🫐..🫖) blueberries..teapot
|
||||
| 0xF0 0x9F 0xAB 0x97..0xBF #E0.0 [41] (🫗..) <reserved-1FAD7>..<...
|
||||
| 0xF0 0x9F 0xB0 0x80..0xFF #E0.0[1022] (..) <reserved-1FC...
|
||||
| 0xF0 0x9F 0xB1..0xBE 0x00..0xFF #
|
||||
| 0xF0 0x9F 0xBF 0x00..0xBD #
|
||||
;
|
||||
|
||||
}%%
|
8
vendor/github.com/apparentlymart/go-textseg/v13/textseg/generate.go
generated
vendored
Normal file
8
vendor/github.com/apparentlymart/go-textseg/v13/textseg/generate.go
generated
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
package textseg
|
||||
|
||||
//go:generate go run make_tables.go -output tables.go
|
||||
//go:generate go run make_test_tables.go -output tables_test.go
|
||||
//go:generate ruby unicode2ragel.rb --url=https://www.unicode.org/Public/13.0.0/ucd/auxiliary/GraphemeBreakProperty.txt -m GraphemeCluster -p "Prepend,CR,LF,Control,Extend,Regional_Indicator,SpacingMark,L,V,T,LV,LVT,ZWJ" -o grapheme_clusters_table.rl
|
||||
//go:generate ruby unicode2ragel.rb --url=https://www.unicode.org/Public/13.0.0/ucd/emoji/emoji-data.txt -m Emoji -p "Extended_Pictographic" -o emoji_table.rl
|
||||
//go:generate ragel -Z grapheme_clusters.rl
|
||||
//go:generate gofmt -w grapheme_clusters.go
|
4138
vendor/github.com/apparentlymart/go-textseg/v13/textseg/grapheme_clusters.go
generated
vendored
Normal file
4138
vendor/github.com/apparentlymart/go-textseg/v13/textseg/grapheme_clusters.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
133
vendor/github.com/apparentlymart/go-textseg/v13/textseg/grapheme_clusters.rl
generated
vendored
Normal file
133
vendor/github.com/apparentlymart/go-textseg/v13/textseg/grapheme_clusters.rl
generated
vendored
Normal file
@@ -0,0 +1,133 @@
|
||||
package textseg
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
// Generated from grapheme_clusters.rl. DO NOT EDIT
|
||||
%%{
|
||||
# (except you are actually in grapheme_clusters.rl here, so edit away!)
|
||||
|
||||
machine graphclust;
|
||||
write data;
|
||||
}%%
|
||||
|
||||
var Error = errors.New("invalid UTF8 text")
|
||||
|
||||
// ScanGraphemeClusters is a split function for bufio.Scanner that splits
|
||||
// on grapheme cluster boundaries.
|
||||
func ScanGraphemeClusters(data []byte, atEOF bool) (int, []byte, error) {
|
||||
if len(data) == 0 {
|
||||
return 0, nil, nil
|
||||
}
|
||||
|
||||
// Ragel state
|
||||
cs := 0 // Current State
|
||||
p := 0 // "Pointer" into data
|
||||
pe := len(data) // End-of-data "pointer"
|
||||
ts := 0
|
||||
te := 0
|
||||
act := 0
|
||||
eof := pe
|
||||
|
||||
// Make Go compiler happy
|
||||
_ = ts
|
||||
_ = te
|
||||
_ = act
|
||||
_ = eof
|
||||
|
||||
startPos := 0
|
||||
endPos := 0
|
||||
|
||||
%%{
|
||||
include GraphemeCluster "grapheme_clusters_table.rl";
|
||||
include Emoji "emoji_table.rl";
|
||||
|
||||
action start {
|
||||
startPos = p
|
||||
}
|
||||
|
||||
action end {
|
||||
endPos = p
|
||||
}
|
||||
|
||||
action emit {
|
||||
return endPos+1, data[startPos:endPos+1], nil
|
||||
}
|
||||
|
||||
ZWJGlue = ZWJ (Extended_Pictographic Extend*)?;
|
||||
AnyExtender = Extend | ZWJGlue | SpacingMark;
|
||||
Extension = AnyExtender*;
|
||||
ReplacementChar = (0xEF 0xBF 0xBD);
|
||||
|
||||
CRLFSeq = CR LF;
|
||||
ControlSeq = Control | ReplacementChar;
|
||||
HangulSeq = (
|
||||
L+ (((LV? V+ | LVT) T*)?|LV?) |
|
||||
LV V* T* |
|
||||
V+ T* |
|
||||
LVT T* |
|
||||
T+
|
||||
) Extension;
|
||||
EmojiSeq = Extended_Pictographic Extend* Extension;
|
||||
ZWJSeq = ZWJ (ZWJ | Extend | SpacingMark)*;
|
||||
EmojiFlagSeq = Regional_Indicator Regional_Indicator? Extension;
|
||||
|
||||
UTF8Cont = 0x80 .. 0xBF;
|
||||
AnyUTF8 = (
|
||||
0x00..0x7F |
|
||||
0xC0..0xDF . UTF8Cont |
|
||||
0xE0..0xEF . UTF8Cont . UTF8Cont |
|
||||
0xF0..0xF7 . UTF8Cont . UTF8Cont . UTF8Cont
|
||||
);
|
||||
|
||||
# OtherSeq is any character that isn't at the start of one of the extended sequences above, followed by extension
|
||||
OtherSeq = (AnyUTF8 - (CR|LF|Control|ReplacementChar|L|LV|V|LVT|T|Extended_Pictographic|ZWJ|Regional_Indicator|Prepend)) (Extend | ZWJ | SpacingMark)*;
|
||||
|
||||
# PrependSeq is prepend followed by any of the other patterns above, except control characters which explicitly break
|
||||
PrependSeq = Prepend+ (HangulSeq|EmojiSeq|ZWJSeq|EmojiFlagSeq|OtherSeq)?;
|
||||
|
||||
CRLFTok = CRLFSeq >start @end;
|
||||
ControlTok = ControlSeq >start @end;
|
||||
HangulTok = HangulSeq >start @end;
|
||||
EmojiTok = EmojiSeq >start @end;
|
||||
ZWJTok = ZWJSeq >start @end;
|
||||
EmojiFlagTok = EmojiFlagSeq >start @end;
|
||||
OtherTok = OtherSeq >start @end;
|
||||
PrependTok = PrependSeq >start @end;
|
||||
|
||||
main := |*
|
||||
CRLFTok => emit;
|
||||
ControlTok => emit;
|
||||
HangulTok => emit;
|
||||
EmojiTok => emit;
|
||||
ZWJTok => emit;
|
||||
EmojiFlagTok => emit;
|
||||
PrependTok => emit;
|
||||
OtherTok => emit;
|
||||
|
||||
# any single valid UTF-8 character would also be valid per spec,
|
||||
# but we'll handle that separately after the loop so we can deal
|
||||
# with requesting more bytes if we're not at EOF.
|
||||
*|;
|
||||
|
||||
write init;
|
||||
write exec;
|
||||
}%%
|
||||
|
||||
// If we fall out here then we were unable to complete a sequence.
|
||||
// If we weren't able to complete a sequence then either we've
|
||||
// reached the end of a partial buffer (so there's more data to come)
|
||||
// or we have an isolated symbol that would normally be part of a
|
||||
// grapheme cluster but has appeared in isolation here.
|
||||
|
||||
if !atEOF {
|
||||
// Request more
|
||||
return 0, nil, nil
|
||||
}
|
||||
|
||||
// Just take the first UTF-8 sequence and return that.
|
||||
_, seqLen := utf8.DecodeRune(data)
|
||||
return seqLen, data[:seqLen], nil
|
||||
}
|
1609
vendor/github.com/apparentlymart/go-textseg/v13/textseg/grapheme_clusters_table.rl
generated
vendored
Normal file
1609
vendor/github.com/apparentlymart/go-textseg/v13/textseg/grapheme_clusters_table.rl
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
5833
vendor/github.com/apparentlymart/go-textseg/v13/textseg/tables.go
generated
vendored
Normal file
5833
vendor/github.com/apparentlymart/go-textseg/v13/textseg/tables.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
335
vendor/github.com/apparentlymart/go-textseg/v13/textseg/unicode2ragel.rb
generated
vendored
Normal file
335
vendor/github.com/apparentlymart/go-textseg/v13/textseg/unicode2ragel.rb
generated
vendored
Normal file
@@ -0,0 +1,335 @@
|
||||
#!/usr/bin/env ruby
|
||||
#
|
||||
# This scripted has been updated to accept more command-line arguments:
|
||||
#
|
||||
# -u, --url URL to process
|
||||
# -m, --machine Machine name
|
||||
# -p, --properties Properties to add to the machine
|
||||
# -o, --output Write output to file
|
||||
#
|
||||
# Updated by: Marty Schoch <marty.schoch@gmail.com>
|
||||
#
|
||||
# This script uses the unicode spec to generate a Ragel state machine
|
||||
# that recognizes unicode alphanumeric characters. It generates 5
|
||||
# character classes: uupper, ulower, ualpha, udigit, and ualnum.
|
||||
# Currently supported encodings are UTF-8 [default] and UCS-4.
|
||||
#
|
||||
# Usage: unicode2ragel.rb [options]
|
||||
# -e, --encoding [ucs4 | utf8] Data encoding
|
||||
# -h, --help Show this message
|
||||
#
|
||||
# This script was originally written as part of the Ferret search
|
||||
# engine library.
|
||||
#
|
||||
# Author: Rakan El-Khalil <rakan@well.com>
|
||||
|
||||
require 'optparse'
|
||||
require 'open-uri'
|
||||
|
||||
ENCODINGS = [ :utf8, :ucs4 ]
|
||||
ALPHTYPES = { :utf8 => "byte", :ucs4 => "rune" }
|
||||
DEFAULT_CHART_URL = "http://www.unicode.org/Public/5.1.0/ucd/DerivedCoreProperties.txt"
|
||||
DEFAULT_MACHINE_NAME= "WChar"
|
||||
|
||||
###
|
||||
# Display vars & default option
|
||||
|
||||
TOTAL_WIDTH = 80
|
||||
RANGE_WIDTH = 23
|
||||
@encoding = :utf8
|
||||
@chart_url = DEFAULT_CHART_URL
|
||||
machine_name = DEFAULT_MACHINE_NAME
|
||||
properties = []
|
||||
@output = $stdout
|
||||
|
||||
###
|
||||
# Option parsing
|
||||
|
||||
cli_opts = OptionParser.new do |opts|
|
||||
opts.on("-e", "--encoding [ucs4 | utf8]", "Data encoding") do |o|
|
||||
@encoding = o.downcase.to_sym
|
||||
end
|
||||
opts.on("-h", "--help", "Show this message") do
|
||||
puts opts
|
||||
exit
|
||||
end
|
||||
opts.on("-u", "--url URL", "URL to process") do |o|
|
||||
@chart_url = o
|
||||
end
|
||||
opts.on("-m", "--machine MACHINE_NAME", "Machine name") do |o|
|
||||
machine_name = o
|
||||
end
|
||||
opts.on("-p", "--properties x,y,z", Array, "Properties to add to machine") do |o|
|
||||
properties = o
|
||||
end
|
||||
opts.on("-o", "--output FILE", "output file") do |o|
|
||||
@output = File.new(o, "w+")
|
||||
end
|
||||
end
|
||||
|
||||
cli_opts.parse(ARGV)
|
||||
unless ENCODINGS.member? @encoding
|
||||
puts "Invalid encoding: #{@encoding}"
|
||||
puts cli_opts
|
||||
exit
|
||||
end
|
||||
|
||||
##
|
||||
# Downloads the document at url and yields every alpha line's hex
|
||||
# range and description.
|
||||
|
||||
def each_alpha( url, property )
|
||||
URI.open( url ) do |file|
|
||||
file.each_line do |line|
|
||||
next if line =~ /^#/;
|
||||
next if line !~ /; #{property} *#/;
|
||||
|
||||
range, description = line.split(/;/)
|
||||
range.strip!
|
||||
description.gsub!(/.*#/, '').strip!
|
||||
|
||||
if range =~ /\.\./
|
||||
start, stop = range.split '..'
|
||||
else start = stop = range
|
||||
end
|
||||
|
||||
yield start.hex .. stop.hex, description
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
###
|
||||
# Formats to hex at minimum width
|
||||
|
||||
def to_hex( n )
|
||||
r = "%0X" % n
|
||||
r = "0#{r}" unless (r.length % 2).zero?
|
||||
r
|
||||
end
|
||||
|
||||
###
|
||||
# UCS4 is just a straight hex conversion of the unicode codepoint.
|
||||
|
||||
def to_ucs4( range )
|
||||
rangestr = "0x" + to_hex(range.begin)
|
||||
rangestr << "..0x" + to_hex(range.end) if range.begin != range.end
|
||||
[ rangestr ]
|
||||
end
|
||||
|
||||
##
|
||||
# 0x00 - 0x7f -> 0zzzzzzz[7]
|
||||
# 0x80 - 0x7ff -> 110yyyyy[5] 10zzzzzz[6]
|
||||
# 0x800 - 0xffff -> 1110xxxx[4] 10yyyyyy[6] 10zzzzzz[6]
|
||||
# 0x010000 - 0x10ffff -> 11110www[3] 10xxxxxx[6] 10yyyyyy[6] 10zzzzzz[6]
|
||||
|
||||
UTF8_BOUNDARIES = [0x7f, 0x7ff, 0xffff, 0x10ffff]
|
||||
|
||||
def to_utf8_enc( n )
|
||||
r = 0
|
||||
if n <= 0x7f
|
||||
r = n
|
||||
elsif n <= 0x7ff
|
||||
y = 0xc0 | (n >> 6)
|
||||
z = 0x80 | (n & 0x3f)
|
||||
r = y << 8 | z
|
||||
elsif n <= 0xffff
|
||||
x = 0xe0 | (n >> 12)
|
||||
y = 0x80 | (n >> 6) & 0x3f
|
||||
z = 0x80 | n & 0x3f
|
||||
r = x << 16 | y << 8 | z
|
||||
elsif n <= 0x10ffff
|
||||
w = 0xf0 | (n >> 18)
|
||||
x = 0x80 | (n >> 12) & 0x3f
|
||||
y = 0x80 | (n >> 6) & 0x3f
|
||||
z = 0x80 | n & 0x3f
|
||||
r = w << 24 | x << 16 | y << 8 | z
|
||||
end
|
||||
|
||||
to_hex(r)
|
||||
end
|
||||
|
||||
def from_utf8_enc( n )
|
||||
n = n.hex
|
||||
r = 0
|
||||
if n <= 0x7f
|
||||
r = n
|
||||
elsif n <= 0xdfff
|
||||
y = (n >> 8) & 0x1f
|
||||
z = n & 0x3f
|
||||
r = y << 6 | z
|
||||
elsif n <= 0xefffff
|
||||
x = (n >> 16) & 0x0f
|
||||
y = (n >> 8) & 0x3f
|
||||
z = n & 0x3f
|
||||
r = x << 10 | y << 6 | z
|
||||
elsif n <= 0xf7ffffff
|
||||
w = (n >> 24) & 0x07
|
||||
x = (n >> 16) & 0x3f
|
||||
y = (n >> 8) & 0x3f
|
||||
z = n & 0x3f
|
||||
r = w << 18 | x << 12 | y << 6 | z
|
||||
end
|
||||
r
|
||||
end
|
||||
|
||||
###
|
||||
# Given a range, splits it up into ranges that can be continuously
|
||||
# encoded into utf8. Eg: 0x00 .. 0xff => [0x00..0x7f, 0x80..0xff]
|
||||
# This is not strictly needed since the current [5.1] unicode standard
|
||||
# doesn't have ranges that straddle utf8 boundaries. This is included
|
||||
# for completeness as there is no telling if that will ever change.
|
||||
|
||||
def utf8_ranges( range )
|
||||
ranges = []
|
||||
UTF8_BOUNDARIES.each do |max|
|
||||
if range.begin <= max
|
||||
if range.end <= max
|
||||
ranges << range
|
||||
return ranges
|
||||
end
|
||||
|
||||
ranges << (range.begin .. max)
|
||||
range = (max + 1) .. range.end
|
||||
end
|
||||
end
|
||||
ranges
|
||||
end
|
||||
|
||||
def build_range( start, stop )
|
||||
size = start.size/2
|
||||
left = size - 1
|
||||
return [""] if size < 1
|
||||
|
||||
a = start[0..1]
|
||||
b = stop[0..1]
|
||||
|
||||
###
|
||||
# Shared prefix
|
||||
|
||||
if a == b
|
||||
return build_range(start[2..-1], stop[2..-1]).map do |elt|
|
||||
"0x#{a} " + elt
|
||||
end
|
||||
end
|
||||
|
||||
###
|
||||
# Unshared prefix, end of run
|
||||
|
||||
return ["0x#{a}..0x#{b} "] if left.zero?
|
||||
|
||||
###
|
||||
# Unshared prefix, not end of run
|
||||
# Range can be 0x123456..0x56789A
|
||||
# Which is equivalent to:
|
||||
# 0x123456 .. 0x12FFFF
|
||||
# 0x130000 .. 0x55FFFF
|
||||
# 0x560000 .. 0x56789A
|
||||
|
||||
ret = []
|
||||
ret << build_range(start, a + "FF" * left)
|
||||
|
||||
###
|
||||
# Only generate middle range if need be.
|
||||
|
||||
if a.hex+1 != b.hex
|
||||
max = to_hex(b.hex - 1)
|
||||
max = "FF" if b == "FF"
|
||||
ret << "0x#{to_hex(a.hex+1)}..0x#{max} " + "0x00..0xFF " * left
|
||||
end
|
||||
|
||||
###
|
||||
# Don't generate last range if it is covered by first range
|
||||
|
||||
ret << build_range(b + "00" * left, stop) unless b == "FF"
|
||||
ret.flatten!
|
||||
end
|
||||
|
||||
def to_utf8( range )
|
||||
utf8_ranges( range ).map do |r|
|
||||
begin_enc = to_utf8_enc(r.begin)
|
||||
end_enc = to_utf8_enc(r.end)
|
||||
build_range begin_enc, end_enc
|
||||
end.flatten!
|
||||
end
|
||||
|
||||
##
|
||||
# Perform a 3-way comparison of the number of codepoints advertised by
|
||||
# the unicode spec for the given range, the originally parsed range,
|
||||
# and the resulting utf8 encoded range.
|
||||
|
||||
def count_codepoints( code )
|
||||
code.split(' ').inject(1) do |acc, elt|
|
||||
if elt =~ /0x(.+)\.\.0x(.+)/
|
||||
if @encoding == :utf8
|
||||
acc * (from_utf8_enc($2) - from_utf8_enc($1) + 1)
|
||||
else
|
||||
acc * ($2.hex - $1.hex + 1)
|
||||
end
|
||||
else
|
||||
acc
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
def is_valid?( range, desc, codes )
|
||||
spec_count = 1
|
||||
spec_count = $1.to_i if desc =~ /\[(\d+)\]/
|
||||
range_count = range.end - range.begin + 1
|
||||
|
||||
sum = codes.inject(0) { |acc, elt| acc + count_codepoints(elt) }
|
||||
sum == spec_count and sum == range_count
|
||||
end
|
||||
|
||||
##
|
||||
# Generate the state maching to stdout
|
||||
|
||||
def generate_machine( name, property )
|
||||
pipe = " "
|
||||
@output.puts " #{name} = "
|
||||
each_alpha( @chart_url, property ) do |range, desc|
|
||||
|
||||
codes = (@encoding == :ucs4) ? to_ucs4(range) : to_utf8(range)
|
||||
|
||||
#raise "Invalid encoding of range #{range}: #{codes.inspect}" unless
|
||||
# is_valid? range, desc, codes
|
||||
|
||||
range_width = codes.map { |a| a.size }.max
|
||||
range_width = RANGE_WIDTH if range_width < RANGE_WIDTH
|
||||
|
||||
desc_width = TOTAL_WIDTH - RANGE_WIDTH - 11
|
||||
desc_width -= (range_width - RANGE_WIDTH) if range_width > RANGE_WIDTH
|
||||
|
||||
if desc.size > desc_width
|
||||
desc = desc[0..desc_width - 4] + "..."
|
||||
end
|
||||
|
||||
codes.each_with_index do |r, idx|
|
||||
desc = "" unless idx.zero?
|
||||
code = "%-#{range_width}s" % r
|
||||
@output.puts " #{pipe} #{code} ##{desc}"
|
||||
pipe = "|"
|
||||
end
|
||||
end
|
||||
@output.puts " ;"
|
||||
@output.puts ""
|
||||
end
|
||||
|
||||
@output.puts <<EOF
|
||||
# The following Ragel file was autogenerated with #{$0}
|
||||
# from: #{@chart_url}
|
||||
#
|
||||
# It defines #{properties}.
|
||||
#
|
||||
# To use this, make sure that your alphtype is set to #{ALPHTYPES[@encoding]},
|
||||
# and that your input is in #{@encoding}.
|
||||
|
||||
%%{
|
||||
machine #{machine_name};
|
||||
|
||||
EOF
|
||||
|
||||
properties.each { |x| generate_machine( x, x ) }
|
||||
|
||||
@output.puts <<EOF
|
||||
}%%
|
||||
EOF
|
19
vendor/github.com/apparentlymart/go-textseg/v13/textseg/utf8_seqs.go
generated
vendored
Normal file
19
vendor/github.com/apparentlymart/go-textseg/v13/textseg/utf8_seqs.go
generated
vendored
Normal file
@@ -0,0 +1,19 @@
|
||||
package textseg
|
||||
|
||||
import "unicode/utf8"
|
||||
|
||||
// ScanGraphemeClusters is a split function for bufio.Scanner that splits
|
||||
// on UTF8 sequence boundaries.
|
||||
//
|
||||
// This is included largely for completeness, since this behavior is already
|
||||
// built in to Go when ranging over a string.
|
||||
func ScanUTF8Sequences(data []byte, atEOF bool) (int, []byte, error) {
|
||||
if len(data) == 0 {
|
||||
return 0, nil, nil
|
||||
}
|
||||
r, seqLen := utf8.DecodeRune(data)
|
||||
if r == utf8.RuneError && !atEOF {
|
||||
return 0, nil, nil
|
||||
}
|
||||
return seqLen, data[:seqLen], nil
|
||||
}
|
8
vendor/github.com/cespare/xxhash/v2/.travis.yml
generated
vendored
8
vendor/github.com/cespare/xxhash/v2/.travis.yml
generated
vendored
@@ -1,8 +0,0 @@
|
||||
language: go
|
||||
go:
|
||||
- "1.x"
|
||||
- master
|
||||
env:
|
||||
- TAGS=""
|
||||
- TAGS="-tags purego"
|
||||
script: go test $TAGS -v ./...
|
6
vendor/github.com/cespare/xxhash/v2/README.md
generated
vendored
6
vendor/github.com/cespare/xxhash/v2/README.md
generated
vendored
@@ -1,7 +1,7 @@
|
||||
# xxhash
|
||||
|
||||
[](https://godoc.org/github.com/cespare/xxhash)
|
||||
[](https://travis-ci.org/cespare/xxhash)
|
||||
[](https://pkg.go.dev/github.com/cespare/xxhash/v2)
|
||||
[](https://github.com/cespare/xxhash/actions/workflows/test.yml)
|
||||
|
||||
xxhash is a Go implementation of the 64-bit
|
||||
[xxHash](http://cyan4973.github.io/xxHash/) algorithm, XXH64. This is a
|
||||
@@ -64,4 +64,6 @@ $ go test -benchtime 10s -bench '/xxhash,direct,bytes'
|
||||
|
||||
- [InfluxDB](https://github.com/influxdata/influxdb)
|
||||
- [Prometheus](https://github.com/prometheus/prometheus)
|
||||
- [VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics)
|
||||
- [FreeCache](https://github.com/coocood/freecache)
|
||||
- [FastCache](https://github.com/VictoriaMetrics/fastcache)
|
||||
|
1
vendor/github.com/cespare/xxhash/v2/xxhash.go
generated
vendored
1
vendor/github.com/cespare/xxhash/v2/xxhash.go
generated
vendored
@@ -193,7 +193,6 @@ func (d *Digest) UnmarshalBinary(b []byte) error {
|
||||
b, d.v4 = consumeUint64(b)
|
||||
b, d.total = consumeUint64(b)
|
||||
copy(d.mem[:], b)
|
||||
b = b[len(d.mem):]
|
||||
d.n = int(d.total % uint64(len(d.mem)))
|
||||
return nil
|
||||
}
|
||||
|
62
vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s
generated
vendored
62
vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s
generated
vendored
@@ -6,7 +6,7 @@
|
||||
|
||||
// Register allocation:
|
||||
// AX h
|
||||
// CX pointer to advance through b
|
||||
// SI pointer to advance through b
|
||||
// DX n
|
||||
// BX loop end
|
||||
// R8 v1, k1
|
||||
@@ -16,39 +16,39 @@
|
||||
// R12 tmp
|
||||
// R13 prime1v
|
||||
// R14 prime2v
|
||||
// R15 prime4v
|
||||
// DI prime4v
|
||||
|
||||
// round reads from and advances the buffer pointer in CX.
|
||||
// round reads from and advances the buffer pointer in SI.
|
||||
// It assumes that R13 has prime1v and R14 has prime2v.
|
||||
#define round(r) \
|
||||
MOVQ (CX), R12 \
|
||||
ADDQ $8, CX \
|
||||
MOVQ (SI), R12 \
|
||||
ADDQ $8, SI \
|
||||
IMULQ R14, R12 \
|
||||
ADDQ R12, r \
|
||||
ROLQ $31, r \
|
||||
IMULQ R13, r
|
||||
|
||||
// mergeRound applies a merge round on the two registers acc and val.
|
||||
// It assumes that R13 has prime1v, R14 has prime2v, and R15 has prime4v.
|
||||
// It assumes that R13 has prime1v, R14 has prime2v, and DI has prime4v.
|
||||
#define mergeRound(acc, val) \
|
||||
IMULQ R14, val \
|
||||
ROLQ $31, val \
|
||||
IMULQ R13, val \
|
||||
XORQ val, acc \
|
||||
IMULQ R13, acc \
|
||||
ADDQ R15, acc
|
||||
ADDQ DI, acc
|
||||
|
||||
// func Sum64(b []byte) uint64
|
||||
TEXT ·Sum64(SB), NOSPLIT, $0-32
|
||||
// Load fixed primes.
|
||||
MOVQ ·prime1v(SB), R13
|
||||
MOVQ ·prime2v(SB), R14
|
||||
MOVQ ·prime4v(SB), R15
|
||||
MOVQ ·prime4v(SB), DI
|
||||
|
||||
// Load slice.
|
||||
MOVQ b_base+0(FP), CX
|
||||
MOVQ b_base+0(FP), SI
|
||||
MOVQ b_len+8(FP), DX
|
||||
LEAQ (CX)(DX*1), BX
|
||||
LEAQ (SI)(DX*1), BX
|
||||
|
||||
// The first loop limit will be len(b)-32.
|
||||
SUBQ $32, BX
|
||||
@@ -65,14 +65,14 @@ TEXT ·Sum64(SB), NOSPLIT, $0-32
|
||||
XORQ R11, R11
|
||||
SUBQ R13, R11
|
||||
|
||||
// Loop until CX > BX.
|
||||
// Loop until SI > BX.
|
||||
blockLoop:
|
||||
round(R8)
|
||||
round(R9)
|
||||
round(R10)
|
||||
round(R11)
|
||||
|
||||
CMPQ CX, BX
|
||||
CMPQ SI, BX
|
||||
JLE blockLoop
|
||||
|
||||
MOVQ R8, AX
|
||||
@@ -100,16 +100,16 @@ noBlocks:
|
||||
afterBlocks:
|
||||
ADDQ DX, AX
|
||||
|
||||
// Right now BX has len(b)-32, and we want to loop until CX > len(b)-8.
|
||||
// Right now BX has len(b)-32, and we want to loop until SI > len(b)-8.
|
||||
ADDQ $24, BX
|
||||
|
||||
CMPQ CX, BX
|
||||
CMPQ SI, BX
|
||||
JG fourByte
|
||||
|
||||
wordLoop:
|
||||
// Calculate k1.
|
||||
MOVQ (CX), R8
|
||||
ADDQ $8, CX
|
||||
MOVQ (SI), R8
|
||||
ADDQ $8, SI
|
||||
IMULQ R14, R8
|
||||
ROLQ $31, R8
|
||||
IMULQ R13, R8
|
||||
@@ -117,18 +117,18 @@ wordLoop:
|
||||
XORQ R8, AX
|
||||
ROLQ $27, AX
|
||||
IMULQ R13, AX
|
||||
ADDQ R15, AX
|
||||
ADDQ DI, AX
|
||||
|
||||
CMPQ CX, BX
|
||||
CMPQ SI, BX
|
||||
JLE wordLoop
|
||||
|
||||
fourByte:
|
||||
ADDQ $4, BX
|
||||
CMPQ CX, BX
|
||||
CMPQ SI, BX
|
||||
JG singles
|
||||
|
||||
MOVL (CX), R8
|
||||
ADDQ $4, CX
|
||||
MOVL (SI), R8
|
||||
ADDQ $4, SI
|
||||
IMULQ R13, R8
|
||||
XORQ R8, AX
|
||||
|
||||
@@ -138,19 +138,19 @@ fourByte:
|
||||
|
||||
singles:
|
||||
ADDQ $4, BX
|
||||
CMPQ CX, BX
|
||||
CMPQ SI, BX
|
||||
JGE finalize
|
||||
|
||||
singlesLoop:
|
||||
MOVBQZX (CX), R12
|
||||
ADDQ $1, CX
|
||||
MOVBQZX (SI), R12
|
||||
ADDQ $1, SI
|
||||
IMULQ ·prime5v(SB), R12
|
||||
XORQ R12, AX
|
||||
|
||||
ROLQ $11, AX
|
||||
IMULQ R13, AX
|
||||
|
||||
CMPQ CX, BX
|
||||
CMPQ SI, BX
|
||||
JL singlesLoop
|
||||
|
||||
finalize:
|
||||
@@ -179,9 +179,9 @@ TEXT ·writeBlocks(SB), NOSPLIT, $0-40
|
||||
MOVQ ·prime2v(SB), R14
|
||||
|
||||
// Load slice.
|
||||
MOVQ b_base+8(FP), CX
|
||||
MOVQ b_base+8(FP), SI
|
||||
MOVQ b_len+16(FP), DX
|
||||
LEAQ (CX)(DX*1), BX
|
||||
LEAQ (SI)(DX*1), BX
|
||||
SUBQ $32, BX
|
||||
|
||||
// Load vN from d.
|
||||
@@ -199,7 +199,7 @@ blockLoop:
|
||||
round(R10)
|
||||
round(R11)
|
||||
|
||||
CMPQ CX, BX
|
||||
CMPQ SI, BX
|
||||
JLE blockLoop
|
||||
|
||||
// Copy vN back to d.
|
||||
@@ -208,8 +208,8 @@ blockLoop:
|
||||
MOVQ R10, 16(AX)
|
||||
MOVQ R11, 24(AX)
|
||||
|
||||
// The number of bytes written is CX minus the old base pointer.
|
||||
SUBQ b_base+8(FP), CX
|
||||
MOVQ CX, ret+32(FP)
|
||||
// The number of bytes written is SI minus the old base pointer.
|
||||
SUBQ b_base+8(FP), SI
|
||||
MOVQ SI, ret+32(FP)
|
||||
|
||||
RET
|
||||
|
55
vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go
generated
vendored
55
vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go
generated
vendored
@@ -6,41 +6,52 @@
|
||||
package xxhash
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// Notes:
|
||||
//
|
||||
// See https://groups.google.com/d/msg/golang-nuts/dcjzJy-bSpw/tcZYBzQqAQAJ
|
||||
// for some discussion about these unsafe conversions.
|
||||
//
|
||||
// In the future it's possible that compiler optimizations will make these
|
||||
// unsafe operations unnecessary: https://golang.org/issue/2205.
|
||||
// XxxString functions unnecessary by realizing that calls such as
|
||||
// Sum64([]byte(s)) don't need to copy s. See https://golang.org/issue/2205.
|
||||
// If that happens, even if we keep these functions they can be replaced with
|
||||
// the trivial safe code.
|
||||
|
||||
// NOTE: The usual way of doing an unsafe string-to-[]byte conversion is:
|
||||
//
|
||||
// Both of these wrapper functions still incur function call overhead since they
|
||||
// will not be inlined. We could write Go/asm copies of Sum64 and Digest.Write
|
||||
// for strings to squeeze out a bit more speed. Mid-stack inlining should
|
||||
// eventually fix this.
|
||||
// var b []byte
|
||||
// bh := (*reflect.SliceHeader)(unsafe.Pointer(&b))
|
||||
// bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data
|
||||
// bh.Len = len(s)
|
||||
// bh.Cap = len(s)
|
||||
//
|
||||
// Unfortunately, as of Go 1.15.3 the inliner's cost model assigns a high enough
|
||||
// weight to this sequence of expressions that any function that uses it will
|
||||
// not be inlined. Instead, the functions below use a different unsafe
|
||||
// conversion designed to minimize the inliner weight and allow both to be
|
||||
// inlined. There is also a test (TestInlining) which verifies that these are
|
||||
// inlined.
|
||||
//
|
||||
// See https://github.com/golang/go/issues/42739 for discussion.
|
||||
|
||||
// Sum64String computes the 64-bit xxHash digest of s.
|
||||
// It may be faster than Sum64([]byte(s)) by avoiding a copy.
|
||||
func Sum64String(s string) uint64 {
|
||||
var b []byte
|
||||
bh := (*reflect.SliceHeader)(unsafe.Pointer(&b))
|
||||
bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data
|
||||
bh.Len = len(s)
|
||||
bh.Cap = len(s)
|
||||
b := *(*[]byte)(unsafe.Pointer(&sliceHeader{s, len(s)}))
|
||||
return Sum64(b)
|
||||
}
|
||||
|
||||
// WriteString adds more data to d. It always returns len(s), nil.
|
||||
// It may be faster than Write([]byte(s)) by avoiding a copy.
|
||||
func (d *Digest) WriteString(s string) (n int, err error) {
|
||||
var b []byte
|
||||
bh := (*reflect.SliceHeader)(unsafe.Pointer(&b))
|
||||
bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data
|
||||
bh.Len = len(s)
|
||||
bh.Cap = len(s)
|
||||
return d.Write(b)
|
||||
d.Write(*(*[]byte)(unsafe.Pointer(&sliceHeader{s, len(s)})))
|
||||
// d.Write always returns len(s), nil.
|
||||
// Ignoring the return output and returning these fixed values buys a
|
||||
// savings of 6 in the inliner's cost model.
|
||||
return len(s), nil
|
||||
}
|
||||
|
||||
// sliceHeader is similar to reflect.SliceHeader, but it assumes that the layout
|
||||
// of the first two words is the same as the layout of a string.
|
||||
type sliceHeader struct {
|
||||
s string
|
||||
cap int
|
||||
}
|
||||
|
23
vendor/github.com/compose-spec/compose-go/consts/consts.go
generated
vendored
Normal file
23
vendor/github.com/compose-spec/compose-go/consts/consts.go
generated
vendored
Normal file
@@ -0,0 +1,23 @@
|
||||
/*
|
||||
Copyright 2020 The Compose Specification Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package consts
|
||||
|
||||
const (
|
||||
ComposeProjectName = "COMPOSE_PROJECT_NAME"
|
||||
ComposePathSeparator = "COMPOSE_PATH_SEPARATOR"
|
||||
ComposeFilePath = "COMPOSE_FILE"
|
||||
)
|
@@ -20,4 +20,3 @@ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
||||
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
|
@@ -1,4 +1,4 @@
|
||||
// Package godotenv is a go port of the ruby dotenv library (https://github.com/bkeepers/dotenv)
|
||||
// Package dotenv is a go port of the ruby dotenv library (https://github.com/bkeepers/dotenv)
|
||||
//
|
||||
// Examples/readme can be found on the github page at https://github.com/joho/godotenv
|
||||
//
|
||||
@@ -11,22 +11,45 @@
|
||||
// godotenv.Load()
|
||||
//
|
||||
// and all the env vars declared in .env will be available through os.Getenv("SOME_ENV_VAR")
|
||||
package godotenv
|
||||
package dotenv
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"os/exec"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const doubleQuoteSpecialChars = "\\\n\r\"!$`"
|
||||
|
||||
// LookupFn represents a lookup function to resolve variables from
|
||||
type LookupFn func(string) (string, bool)
|
||||
|
||||
var noLookupFn = func(s string) (string, bool) {
|
||||
return "", false
|
||||
}
|
||||
|
||||
// Parse reads an env file from io.Reader, returning a map of keys and values.
|
||||
func Parse(r io.Reader) (map[string]string, error) {
|
||||
return ParseWithLookup(r, nil)
|
||||
}
|
||||
|
||||
// ParseWithLookup reads an env file from io.Reader, returning a map of keys and values.
|
||||
func ParseWithLookup(r io.Reader, lookupFn LookupFn) (map[string]string, error) {
|
||||
data, err := ioutil.ReadAll(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return UnmarshalBytesWithLookup(data, lookupFn)
|
||||
}
|
||||
|
||||
// Load will read your env file(s) and load them into ENV for this process.
|
||||
//
|
||||
// Call this function as close as possible to the start of your program (ideally in main)
|
||||
@@ -39,15 +62,7 @@ const doubleQuoteSpecialChars = "\\\n\r\"!$`"
|
||||
//
|
||||
// It's important to note that it WILL NOT OVERRIDE an env variable that already exists - consider the .env file to set dev vars or sensible defaults
|
||||
func Load(filenames ...string) (err error) {
|
||||
filenames = filenamesOrDefault(filenames)
|
||||
|
||||
for _, filename := range filenames {
|
||||
err = loadFile(filename, false)
|
||||
if err != nil {
|
||||
return // return early on a spazout
|
||||
}
|
||||
}
|
||||
return
|
||||
return load(false, filenames...)
|
||||
}
|
||||
|
||||
// Overload will read your env file(s) and load them into ENV for this process.
|
||||
@@ -62,10 +77,14 @@ func Load(filenames ...string) (err error) {
|
||||
//
|
||||
// It's important to note this WILL OVERRIDE an env variable that already exists - consider the .env file to forcefilly set all vars.
|
||||
func Overload(filenames ...string) (err error) {
|
||||
return load(true, filenames...)
|
||||
}
|
||||
|
||||
func load(overload bool, filenames ...string) (err error) {
|
||||
filenames = filenamesOrDefault(filenames)
|
||||
|
||||
for _, filename := range filenames {
|
||||
err = loadFile(filename, true)
|
||||
err = loadFile(filename, overload)
|
||||
if err != nil {
|
||||
return // return early on a spazout
|
||||
}
|
||||
@@ -73,14 +92,14 @@ func Overload(filenames ...string) (err error) {
|
||||
return
|
||||
}
|
||||
|
||||
// Read all env (with same file loading semantics as Load) but return values as
|
||||
// ReadWithLookup gets all env vars from the files and/or lookup function and return values as
|
||||
// a map rather than automatically writing values into env
|
||||
func Read(filenames ...string) (envMap map[string]string, err error) {
|
||||
func ReadWithLookup(lookupFn LookupFn, filenames ...string) (envMap map[string]string, err error) {
|
||||
filenames = filenamesOrDefault(filenames)
|
||||
envMap = make(map[string]string)
|
||||
|
||||
for _, filename := range filenames {
|
||||
individualEnvMap, individualErr := readFile(filename)
|
||||
individualEnvMap, individualErr := readFile(filename, lookupFn)
|
||||
|
||||
if individualErr != nil {
|
||||
err = individualErr
|
||||
@@ -95,37 +114,27 @@ func Read(filenames ...string) (envMap map[string]string, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
// Parse reads an env file from io.Reader, returning a map of keys and values.
|
||||
func Parse(r io.Reader) (envMap map[string]string, err error) {
|
||||
envMap = make(map[string]string)
|
||||
|
||||
var lines []string
|
||||
scanner := bufio.NewScanner(r)
|
||||
for scanner.Scan() {
|
||||
lines = append(lines, scanner.Text())
|
||||
}
|
||||
|
||||
if err = scanner.Err(); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
for _, fullLine := range lines {
|
||||
if !isIgnoredLine(fullLine) {
|
||||
var key, value string
|
||||
key, value, err = parseLine(fullLine, envMap)
|
||||
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
envMap[key] = value
|
||||
}
|
||||
}
|
||||
return
|
||||
// Read all env (with same file loading semantics as Load) but return values as
|
||||
// a map rather than automatically writing values into env
|
||||
func Read(filenames ...string) (envMap map[string]string, err error) {
|
||||
return ReadWithLookup(nil, filenames...)
|
||||
}
|
||||
|
||||
//Unmarshal reads an env file from a string, returning a map of keys and values.
|
||||
// Unmarshal reads an env file from a string, returning a map of keys and values.
|
||||
func Unmarshal(str string) (envMap map[string]string, err error) {
|
||||
return Parse(strings.NewReader(str))
|
||||
return UnmarshalBytes([]byte(str))
|
||||
}
|
||||
|
||||
// UnmarshalBytes parses env file from byte slice of chars, returning a map of keys and values.
|
||||
func UnmarshalBytes(src []byte) (map[string]string, error) {
|
||||
return UnmarshalBytesWithLookup(src, nil)
|
||||
}
|
||||
|
||||
// UnmarshalBytesWithLookup parses env file from byte slice of chars, returning a map of keys and values.
|
||||
func UnmarshalBytesWithLookup(src []byte, lookupFn LookupFn) (map[string]string, error) {
|
||||
out := make(map[string]string)
|
||||
err := parseBytes(src, out, lookupFn)
|
||||
return out, err
|
||||
}
|
||||
|
||||
// Exec loads env vars from the specified filenames (empty map falls back to default)
|
||||
@@ -136,7 +145,9 @@ func Unmarshal(str string) (envMap map[string]string, err error) {
|
||||
// If you want more fine grained control over your command it's recommended
|
||||
// that you use `Load()` or `Read()` and the `os/exec` package yourself.
|
||||
func Exec(filenames []string, cmd string, cmdArgs []string) error {
|
||||
Load(filenames...)
|
||||
if err := Load(filenames...); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
command := exec.Command(cmd, cmdArgs...)
|
||||
command.Stdin = os.Stdin
|
||||
@@ -147,16 +158,20 @@ func Exec(filenames []string, cmd string, cmdArgs []string) error {
|
||||
|
||||
// Write serializes the given environment and writes it to a file
|
||||
func Write(envMap map[string]string, filename string) error {
|
||||
content, error := Marshal(envMap)
|
||||
if error != nil {
|
||||
return error
|
||||
content, err := Marshal(envMap)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
file, error := os.Create(filename)
|
||||
if error != nil {
|
||||
return error
|
||||
file, err := os.Create(filename)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err := file.WriteString(content)
|
||||
return err
|
||||
defer file.Close()
|
||||
_, err = file.WriteString(content + "\n")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return file.Sync()
|
||||
}
|
||||
|
||||
// Marshal outputs the given environment as a dotenv-formatted environment file.
|
||||
@@ -164,7 +179,11 @@ func Write(envMap map[string]string, filename string) error {
|
||||
func Marshal(envMap map[string]string) (string, error) {
|
||||
lines := make([]string, 0, len(envMap))
|
||||
for k, v := range envMap {
|
||||
lines = append(lines, fmt.Sprintf(`%s="%s"`, k, doubleQuoteEscape(v)))
|
||||
if d, err := strconv.Atoi(v); err == nil {
|
||||
lines = append(lines, fmt.Sprintf(`%s=%d`, k, d))
|
||||
} else {
|
||||
lines = append(lines, fmt.Sprintf(`%s="%s"`, k, doubleQuoteEscape(v)))
|
||||
}
|
||||
}
|
||||
sort.Strings(lines)
|
||||
return strings.Join(lines, "\n"), nil
|
||||
@@ -178,7 +197,7 @@ func filenamesOrDefault(filenames []string) []string {
|
||||
}
|
||||
|
||||
func loadFile(filename string, overload bool) error {
|
||||
envMap, err := readFile(filename)
|
||||
envMap, err := readFile(filename, nil)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -192,24 +211,29 @@ func loadFile(filename string, overload bool) error {
|
||||
|
||||
for key, value := range envMap {
|
||||
if !currentEnv[key] || overload {
|
||||
os.Setenv(key, value)
|
||||
_ = os.Setenv(key, value)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func readFile(filename string) (envMap map[string]string, err error) {
|
||||
func readFile(filename string, lookupFn LookupFn) (envMap map[string]string, err error) {
|
||||
file, err := os.Open(filename)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
return Parse(file)
|
||||
return ParseWithLookup(file, lookupFn)
|
||||
}
|
||||
|
||||
var exportRegex = regexp.MustCompile(`^\s*(?:export\s+)?(.*?)\s*$`)
|
||||
|
||||
func parseLine(line string, envMap map[string]string) (key string, value string, err error) {
|
||||
return parseLineWithLookup(line, envMap, nil)
|
||||
}
|
||||
func parseLineWithLookup(line string, envMap map[string]string, lookupFn LookupFn) (key string, value string, err error) {
|
||||
if len(line) == 0 {
|
||||
err = errors.New("zero length string")
|
||||
return
|
||||
@@ -242,39 +266,38 @@ func parseLine(line string, envMap map[string]string) (key string, value string,
|
||||
firstColon := strings.Index(line, ":")
|
||||
splitString := strings.SplitN(line, "=", 2)
|
||||
if firstColon != -1 && (firstColon < firstEquals || firstEquals == -1) {
|
||||
//this is a yaml-style line
|
||||
// This is a yaml-style line
|
||||
splitString = strings.SplitN(line, ":", 2)
|
||||
}
|
||||
|
||||
if len(splitString) != 2 {
|
||||
err = errors.New("Can't separate key from value")
|
||||
err = errors.New("can't separate key from value")
|
||||
return
|
||||
}
|
||||
|
||||
// Parse the key
|
||||
key = splitString[0]
|
||||
if strings.HasPrefix(key, "export") {
|
||||
key = strings.TrimPrefix(key, "export")
|
||||
}
|
||||
key = strings.Trim(key, " ")
|
||||
key = exportRegex.ReplaceAllString(splitString[0], "$1")
|
||||
|
||||
// Parse the value
|
||||
value = parseValue(splitString[1], envMap)
|
||||
value = parseValue(splitString[1], envMap, lookupFn)
|
||||
return
|
||||
}
|
||||
|
||||
func parseValue(value string, envMap map[string]string) string {
|
||||
var (
|
||||
singleQuotesRegex = regexp.MustCompile(`\A'(.*)'\z`)
|
||||
doubleQuotesRegex = regexp.MustCompile(`\A"(.*)"\z`)
|
||||
escapeRegex = regexp.MustCompile(`\\.`)
|
||||
unescapeCharsRegex = regexp.MustCompile(`\\([^$])`)
|
||||
)
|
||||
|
||||
func parseValue(value string, envMap map[string]string, lookupFn LookupFn) string {
|
||||
|
||||
// trim
|
||||
value = strings.Trim(value, " ")
|
||||
|
||||
// check if we've got quoted values or possible escapes
|
||||
if len(value) > 1 {
|
||||
rs := regexp.MustCompile(`\A'(.*)'\z`)
|
||||
singleQuotes := rs.FindStringSubmatch(value)
|
||||
singleQuotes := singleQuotesRegex.FindStringSubmatch(value)
|
||||
|
||||
rd := regexp.MustCompile(`\A"(.*)"\z`)
|
||||
doubleQuotes := rd.FindStringSubmatch(value)
|
||||
doubleQuotes := doubleQuotesRegex.FindStringSubmatch(value)
|
||||
|
||||
if singleQuotes != nil || doubleQuotes != nil {
|
||||
// pull the quotes off the edges
|
||||
@@ -283,7 +306,6 @@ func parseValue(value string, envMap map[string]string) string {
|
||||
|
||||
if doubleQuotes != nil {
|
||||
// expand newlines
|
||||
escapeRegex := regexp.MustCompile(`\\.`)
|
||||
value = escapeRegex.ReplaceAllStringFunc(value, func(match string) string {
|
||||
c := strings.TrimPrefix(match, `\`)
|
||||
switch c {
|
||||
@@ -296,23 +318,22 @@ func parseValue(value string, envMap map[string]string) string {
|
||||
}
|
||||
})
|
||||
// unescape characters
|
||||
e := regexp.MustCompile(`\\([^$])`)
|
||||
value = e.ReplaceAllString(value, "$1")
|
||||
value = unescapeCharsRegex.ReplaceAllString(value, "$1")
|
||||
}
|
||||
|
||||
if singleQuotes == nil {
|
||||
value = expandVariables(value, envMap)
|
||||
value = expandVariables(value, envMap, lookupFn)
|
||||
}
|
||||
}
|
||||
|
||||
return value
|
||||
}
|
||||
|
||||
func expandVariables(v string, m map[string]string) string {
|
||||
r := regexp.MustCompile(`(\\)?(\$)(\()?\{?([A-Z0-9_]+)?\}?`)
|
||||
var expandVarRegex = regexp.MustCompile(`(\\)?(\$)(\()?\{?([A-Z0-9_]+)?\}?`)
|
||||
|
||||
return r.ReplaceAllStringFunc(v, func(s string) string {
|
||||
submatch := r.FindStringSubmatch(s)
|
||||
func expandVariables(v string, envMap map[string]string, lookupFn LookupFn) string {
|
||||
return expandVarRegex.ReplaceAllStringFunc(v, func(s string) string {
|
||||
submatch := expandVarRegex.FindStringSubmatch(s)
|
||||
|
||||
if submatch == nil {
|
||||
return s
|
||||
@@ -320,17 +341,25 @@ func expandVariables(v string, m map[string]string) string {
|
||||
if submatch[1] == "\\" || submatch[2] == "(" {
|
||||
return submatch[0][1:]
|
||||
} else if submatch[4] != "" {
|
||||
return m[submatch[4]]
|
||||
// first check if we have defined this already earlier
|
||||
if envMap[submatch[4]] != "" {
|
||||
return envMap[submatch[4]]
|
||||
}
|
||||
if lookupFn == nil {
|
||||
return ""
|
||||
}
|
||||
// if we have not defined it, check the lookup function provided
|
||||
// by the user
|
||||
s2, ok := lookupFn(submatch[4])
|
||||
if ok {
|
||||
return s2
|
||||
}
|
||||
return ""
|
||||
}
|
||||
return s
|
||||
})
|
||||
}
|
||||
|
||||
func isIgnoredLine(line string) bool {
|
||||
trimmedLine := strings.Trim(line, " \n\t")
|
||||
return len(trimmedLine) == 0 || strings.HasPrefix(trimmedLine, "#")
|
||||
}
|
||||
|
||||
func doubleQuoteEscape(line string) string {
|
||||
for _, c := range doubleQuoteSpecialChars {
|
||||
toReplace := "\\" + string(c)
|
234
vendor/github.com/compose-spec/compose-go/dotenv/parser.go
generated
vendored
Normal file
234
vendor/github.com/compose-spec/compose-go/dotenv/parser.go
generated
vendored
Normal file
@@ -0,0 +1,234 @@
|
||||
package dotenv
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"strings"
|
||||
"unicode"
|
||||
)
|
||||
|
||||
const (
|
||||
charComment = '#'
|
||||
prefixSingleQuote = '\''
|
||||
prefixDoubleQuote = '"'
|
||||
|
||||
exportPrefix = "export"
|
||||
)
|
||||
|
||||
func parseBytes(src []byte, out map[string]string, lookupFn LookupFn) error {
|
||||
cutset := src
|
||||
for {
|
||||
cutset = getStatementStart(cutset)
|
||||
if cutset == nil {
|
||||
// reached end of file
|
||||
break
|
||||
}
|
||||
|
||||
key, left, inherited, err := locateKeyName(cutset)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if strings.Contains(key, " ") {
|
||||
return errors.New("key cannot contain a space")
|
||||
}
|
||||
|
||||
if inherited {
|
||||
if lookupFn == nil {
|
||||
lookupFn = noLookupFn
|
||||
}
|
||||
|
||||
value, ok := lookupFn(key)
|
||||
if ok {
|
||||
out[key] = value
|
||||
}
|
||||
cutset = left
|
||||
continue
|
||||
}
|
||||
|
||||
value, left, err := extractVarValue(left, out, lookupFn)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
out[key] = value
|
||||
cutset = left
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// getStatementPosition returns position of statement begin.
|
||||
//
|
||||
// It skips any comment line or non-whitespace character.
|
||||
func getStatementStart(src []byte) []byte {
|
||||
pos := indexOfNonSpaceChar(src)
|
||||
if pos == -1 {
|
||||
return nil
|
||||
}
|
||||
|
||||
src = src[pos:]
|
||||
if src[0] != charComment {
|
||||
return src
|
||||
}
|
||||
|
||||
// skip comment section
|
||||
pos = bytes.IndexFunc(src, isCharFunc('\n'))
|
||||
if pos == -1 {
|
||||
return nil
|
||||
}
|
||||
|
||||
return getStatementStart(src[pos:])
|
||||
}
|
||||
|
||||
// locateKeyName locates and parses key name and returns rest of slice
|
||||
func locateKeyName(src []byte) (key string, cutset []byte, inherited bool, err error) {
|
||||
// trim "export" and space at beginning
|
||||
src = bytes.TrimLeftFunc(bytes.TrimPrefix(src, []byte(exportPrefix)), isSpace)
|
||||
|
||||
// locate key name end and validate it in single loop
|
||||
offset := 0
|
||||
loop:
|
||||
for i, char := range src {
|
||||
rchar := rune(char)
|
||||
if isSpace(rchar) {
|
||||
continue
|
||||
}
|
||||
|
||||
switch char {
|
||||
case '=', ':', '\n':
|
||||
// library also supports yaml-style value declaration
|
||||
key = string(src[0:i])
|
||||
offset = i + 1
|
||||
inherited = char == '\n'
|
||||
break loop
|
||||
case '_':
|
||||
default:
|
||||
// variable name should match [A-Za-z0-9_]
|
||||
if unicode.IsLetter(rchar) || unicode.IsNumber(rchar) {
|
||||
continue
|
||||
}
|
||||
|
||||
return "", nil, inherited, fmt.Errorf(
|
||||
`unexpected character %q in variable name near %q`,
|
||||
string(char), string(src))
|
||||
}
|
||||
}
|
||||
|
||||
if len(src) == 0 {
|
||||
return "", nil, inherited, errors.New("zero length string")
|
||||
}
|
||||
|
||||
// trim whitespace
|
||||
key = strings.TrimRightFunc(key, unicode.IsSpace)
|
||||
cutset = bytes.TrimLeftFunc(src[offset:], isSpace)
|
||||
return key, cutset, inherited, nil
|
||||
}
|
||||
|
||||
// extractVarValue extracts variable value and returns rest of slice
|
||||
func extractVarValue(src []byte, envMap map[string]string, lookupFn LookupFn) (value string, rest []byte, err error) {
|
||||
quote, isQuoted := hasQuotePrefix(src)
|
||||
if !isQuoted {
|
||||
// unquoted value - read until new line
|
||||
end := bytes.IndexFunc(src, isNewLine)
|
||||
var rest []byte
|
||||
if end < 0 {
|
||||
value := strings.Split(string(src), "#")[0] // Remove inline comments on unquoted lines
|
||||
value = strings.TrimRightFunc(value, unicode.IsSpace)
|
||||
return expandVariables(value, envMap, lookupFn), nil, nil
|
||||
}
|
||||
|
||||
value := strings.Split(string(src[0:end]), "#")[0]
|
||||
value = strings.TrimRightFunc(value, unicode.IsSpace)
|
||||
rest = src[end:]
|
||||
return expandVariables(value, envMap, lookupFn), rest, nil
|
||||
}
|
||||
|
||||
// lookup quoted string terminator
|
||||
for i := 1; i < len(src); i++ {
|
||||
if char := src[i]; char != quote {
|
||||
continue
|
||||
}
|
||||
|
||||
// skip escaped quote symbol (\" or \', depends on quote)
|
||||
if prevChar := src[i-1]; prevChar == '\\' {
|
||||
continue
|
||||
}
|
||||
|
||||
// trim quotes
|
||||
trimFunc := isCharFunc(rune(quote))
|
||||
value = string(bytes.TrimLeftFunc(bytes.TrimRightFunc(src[0:i], trimFunc), trimFunc))
|
||||
if quote == prefixDoubleQuote {
|
||||
// unescape newlines for double quote (this is compat feature)
|
||||
// and expand environment variables
|
||||
value = expandVariables(expandEscapes(value), envMap, lookupFn)
|
||||
}
|
||||
|
||||
return value, src[i+1:], nil
|
||||
}
|
||||
|
||||
// return formatted error if quoted string is not terminated
|
||||
valEndIndex := bytes.IndexFunc(src, isCharFunc('\n'))
|
||||
if valEndIndex == -1 {
|
||||
valEndIndex = len(src)
|
||||
}
|
||||
|
||||
return "", nil, fmt.Errorf("unterminated quoted value %s", src[:valEndIndex])
|
||||
}
|
||||
|
||||
func expandEscapes(str string) string {
|
||||
out := escapeRegex.ReplaceAllStringFunc(str, func(match string) string {
|
||||
c := strings.TrimPrefix(match, `\`)
|
||||
switch c {
|
||||
case "n":
|
||||
return "\n"
|
||||
case "r":
|
||||
return "\r"
|
||||
default:
|
||||
return match
|
||||
}
|
||||
})
|
||||
return unescapeCharsRegex.ReplaceAllString(out, "$1")
|
||||
}
|
||||
|
||||
func indexOfNonSpaceChar(src []byte) int {
|
||||
return bytes.IndexFunc(src, func(r rune) bool {
|
||||
return !unicode.IsSpace(r)
|
||||
})
|
||||
}
|
||||
|
||||
// hasQuotePrefix reports whether charset starts with single or double quote and returns quote character
|
||||
func hasQuotePrefix(src []byte) (quote byte, isQuoted bool) {
|
||||
if len(src) == 0 {
|
||||
return 0, false
|
||||
}
|
||||
|
||||
switch prefix := src[0]; prefix {
|
||||
case prefixDoubleQuote, prefixSingleQuote:
|
||||
return prefix, true
|
||||
default:
|
||||
return 0, false
|
||||
}
|
||||
}
|
||||
|
||||
func isCharFunc(char rune) func(rune) bool {
|
||||
return func(v rune) bool {
|
||||
return v == char
|
||||
}
|
||||
}
|
||||
|
||||
// isSpace reports whether the rune is a space character but not line break character
|
||||
//
|
||||
// this differs from unicode.IsSpace, which also applies line break as space
|
||||
func isSpace(r rune) bool {
|
||||
switch r {
|
||||
case '\t', '\v', '\f', '\r', ' ', 0x85, 0xA0:
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// isNewLine reports whether the rune is a new line character
|
||||
func isNewLine(r rune) bool {
|
||||
return r == '\n'
|
||||
}
|
2
vendor/github.com/compose-spec/compose-go/interpolation/interpolation.go
generated
vendored
2
vendor/github.com/compose-spec/compose-go/interpolation/interpolation.go
generated
vendored
@@ -115,7 +115,7 @@ func newPathError(path Path, err error) error {
|
||||
return nil
|
||||
case *template.InvalidTemplateError:
|
||||
return errors.Errorf(
|
||||
"invalid interpolation format for %s: %#v. You may need to escape any $ with another $.",
|
||||
"invalid interpolation format for %s: %#v. You may need to escape any $ with another $",
|
||||
path, err.Template)
|
||||
default:
|
||||
return errors.Wrapf(err, "error while interpolating %s", path)
|
||||
|
9
vendor/github.com/compose-spec/compose-go/loader/full-example.yml
generated
vendored
9
vendor/github.com/compose-spec/compose-go/loader/full-example.yml
generated
vendored
@@ -1,3 +1,4 @@
|
||||
name: Full_Example_project_name
|
||||
services:
|
||||
foo:
|
||||
|
||||
@@ -6,6 +7,8 @@ services:
|
||||
dockerfile: Dockerfile
|
||||
args:
|
||||
foo: bar
|
||||
ssh:
|
||||
- default
|
||||
target: foo
|
||||
network: foo
|
||||
cache_from:
|
||||
@@ -85,6 +88,10 @@ services:
|
||||
- spread: node.labels.az
|
||||
endpoint_mode: dnsrr
|
||||
|
||||
device_cgroup_rules:
|
||||
- "c 1:3 mr"
|
||||
- "a 7:* rmw"
|
||||
|
||||
devices:
|
||||
- "/dev/ttyUSB0:/dev/ttyUSB0"
|
||||
|
||||
@@ -268,7 +275,7 @@ services:
|
||||
- .:/code
|
||||
- ./static:/var/www/html
|
||||
# User-relative path
|
||||
- ~/configs:/etc/configs/:ro
|
||||
- ~/configs:/etc/configs:ro
|
||||
# Named volume
|
||||
- datavolume:/var/lib/mysql
|
||||
- type: bind
|
||||
|
56
vendor/github.com/compose-spec/compose-go/loader/interpolate.go
generated
vendored
56
vendor/github.com/compose-spec/compose-go/loader/interpolate.go
generated
vendored
@@ -26,9 +26,15 @@ import (
|
||||
|
||||
var interpolateTypeCastMapping = map[interp.Path]interp.Cast{
|
||||
servicePath("configs", interp.PathMatchList, "mode"): toInt,
|
||||
servicePath("secrets", interp.PathMatchList, "mode"): toInt,
|
||||
servicePath("healthcheck", "retries"): toInt,
|
||||
servicePath("healthcheck", "disable"): toBoolean,
|
||||
servicePath("cpu_count"): toInt64,
|
||||
servicePath("cpu_percent"): toFloat,
|
||||
servicePath("cpu_period"): toInt64,
|
||||
servicePath("cpu_quota"): toInt64,
|
||||
servicePath("cpu_rt_period"): toInt64,
|
||||
servicePath("cpu_rt_runtime"): toInt64,
|
||||
servicePath("cpus"): toFloat32,
|
||||
servicePath("cpu_shares"): toInt64,
|
||||
servicePath("init"): toBoolean,
|
||||
servicePath("deploy", "replicas"): toInt,
|
||||
servicePath("deploy", "update_config", "parallelism"): toInt,
|
||||
servicePath("deploy", "update_config", "max_failure_ratio"): toFloat,
|
||||
@@ -36,20 +42,34 @@ var interpolateTypeCastMapping = map[interp.Path]interp.Cast{
|
||||
servicePath("deploy", "rollback_config", "max_failure_ratio"): toFloat,
|
||||
servicePath("deploy", "restart_policy", "max_attempts"): toInt,
|
||||
servicePath("deploy", "placement", "max_replicas_per_node"): toInt,
|
||||
servicePath("healthcheck", "retries"): toInt,
|
||||
servicePath("healthcheck", "disable"): toBoolean,
|
||||
servicePath("mem_limit"): toUnitBytes,
|
||||
servicePath("mem_reservation"): toUnitBytes,
|
||||
servicePath("memswap_limit"): toUnitBytes,
|
||||
servicePath("mem_swappiness"): toUnitBytes,
|
||||
servicePath("oom_kill_disable"): toBoolean,
|
||||
servicePath("oom_score_adj"): toInt64,
|
||||
servicePath("pids_limit"): toInt64,
|
||||
servicePath("ports", interp.PathMatchList, "target"): toInt,
|
||||
servicePath("ports", interp.PathMatchList, "published"): toInt,
|
||||
servicePath("privileged"): toBoolean,
|
||||
servicePath("read_only"): toBoolean,
|
||||
servicePath("scale"): toInt,
|
||||
servicePath("secrets", interp.PathMatchList, "mode"): toInt,
|
||||
servicePath("shm_size"): toUnitBytes,
|
||||
servicePath("stdin_open"): toBoolean,
|
||||
servicePath("stop_grace_period"): toDuration,
|
||||
servicePath("tty"): toBoolean,
|
||||
servicePath("ulimits", interp.PathMatchAll): toInt,
|
||||
servicePath("ulimits", interp.PathMatchAll, "hard"): toInt,
|
||||
servicePath("ulimits", interp.PathMatchAll, "soft"): toInt,
|
||||
servicePath("privileged"): toBoolean,
|
||||
servicePath("read_only"): toBoolean,
|
||||
servicePath("stdin_open"): toBoolean,
|
||||
servicePath("tty"): toBoolean,
|
||||
servicePath("volumes", interp.PathMatchList, "read_only"): toBoolean,
|
||||
servicePath("volumes", interp.PathMatchList, "volume", "nocopy"): toBoolean,
|
||||
servicePath("volumes", interp.PathMatchList, "tmpfs", "size"): toUnitBytes,
|
||||
iPath("networks", interp.PathMatchAll, "external"): toBoolean,
|
||||
iPath("networks", interp.PathMatchAll, "internal"): toBoolean,
|
||||
iPath("networks", interp.PathMatchAll, "attachable"): toBoolean,
|
||||
iPath("networks", interp.PathMatchAll, "enable_ipv6"): toBoolean,
|
||||
iPath("volumes", interp.PathMatchAll, "external"): toBoolean,
|
||||
iPath("secrets", interp.PathMatchAll, "external"): toBoolean,
|
||||
iPath("configs", interp.PathMatchAll, "external"): toBoolean,
|
||||
@@ -67,10 +87,30 @@ func toInt(value string) (interface{}, error) {
|
||||
return strconv.Atoi(value)
|
||||
}
|
||||
|
||||
func toInt64(value string) (interface{}, error) {
|
||||
return strconv.ParseInt(value, 10, 64)
|
||||
}
|
||||
|
||||
func toUnitBytes(value string) (interface{}, error) {
|
||||
return transformSize(value)
|
||||
}
|
||||
|
||||
func toDuration(value string) (interface{}, error) {
|
||||
return transformStringToDuration(value)
|
||||
}
|
||||
|
||||
func toFloat(value string) (interface{}, error) {
|
||||
return strconv.ParseFloat(value, 64)
|
||||
}
|
||||
|
||||
func toFloat32(value string) (interface{}, error) {
|
||||
f, err := strconv.ParseFloat(value, 32)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return float32(f), nil
|
||||
}
|
||||
|
||||
// should match http://yaml.org/type/bool.html
|
||||
func toBoolean(value string) (interface{}, error) {
|
||||
switch strings.ToLower(value) {
|
||||
|
335
vendor/github.com/compose-spec/compose-go/loader/loader.go
generated
vendored
335
vendor/github.com/compose-spec/compose-go/loader/loader.go
generated
vendored
@@ -23,22 +23,24 @@ import (
|
||||
"path"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/compose-spec/compose-go/consts"
|
||||
"github.com/compose-spec/compose-go/dotenv"
|
||||
interp "github.com/compose-spec/compose-go/interpolation"
|
||||
"github.com/compose-spec/compose-go/schema"
|
||||
"github.com/compose-spec/compose-go/template"
|
||||
"github.com/compose-spec/compose-go/types"
|
||||
units "github.com/docker/go-units"
|
||||
"github.com/imdario/mergo"
|
||||
"github.com/joho/godotenv"
|
||||
shellwords "github.com/mattn/go-shellwords"
|
||||
"github.com/docker/go-units"
|
||||
"github.com/mattn/go-shellwords"
|
||||
"github.com/mitchellh/mapstructure"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
yaml "gopkg.in/yaml.v2"
|
||||
"gopkg.in/yaml.v2"
|
||||
)
|
||||
|
||||
// Options supported by Load
|
||||
@@ -49,6 +51,8 @@ type Options struct {
|
||||
SkipInterpolation bool
|
||||
// Skip normalization
|
||||
SkipNormalization bool
|
||||
// Resolve paths
|
||||
ResolvePaths bool
|
||||
// Skip consistency check
|
||||
SkipConsistencyCheck bool
|
||||
// Skip extends
|
||||
@@ -57,8 +61,19 @@ type Options struct {
|
||||
Interpolate *interp.Options
|
||||
// Discard 'env_file' entries after resolving to 'environment' section
|
||||
discardEnvFiles bool
|
||||
// Set project name
|
||||
Name string
|
||||
// Set project projectName
|
||||
projectName string
|
||||
// Indicates when the projectName was imperatively set or guessed from path
|
||||
projectNameImperativelySet bool
|
||||
}
|
||||
|
||||
func (o *Options) SetProjectName(name string, imperativelySet bool) {
|
||||
o.projectName = normalizeProjectName(name)
|
||||
o.projectNameImperativelySet = imperativelySet
|
||||
}
|
||||
|
||||
func (o Options) GetProjectName() (string, bool) {
|
||||
return o.projectName, o.projectNameImperativelySet
|
||||
}
|
||||
|
||||
// serviceRef identifies a reference to a service. It's used to detect cyclic
|
||||
@@ -103,6 +118,11 @@ func WithDiscardEnvFiles(opts *Options) {
|
||||
opts.discardEnvFiles = true
|
||||
}
|
||||
|
||||
// WithSkipValidation sets the Options to skip validation when loading sections
|
||||
func WithSkipValidation(opts *Options) {
|
||||
opts.SkipValidation = true
|
||||
}
|
||||
|
||||
// ParseYAML reads the bytes from a file, parses the bytes into a mapping
|
||||
// structure, and returns it.
|
||||
func ParseYAML(source []byte) (map[string]interface{}, error) {
|
||||
@@ -139,7 +159,7 @@ func Load(configDetails types.ConfigDetails, options ...func(*Options)) (*types.
|
||||
op(opts)
|
||||
}
|
||||
|
||||
configs := []*types.Config{}
|
||||
var configs []*types.Config
|
||||
for i, file := range configDetails.ConfigFiles {
|
||||
configDict := file.Config
|
||||
if configDict == nil {
|
||||
@@ -186,8 +206,17 @@ func Load(configDetails types.ConfigDetails, options ...func(*Options)) (*types.
|
||||
s.EnvFile = newEnvFiles
|
||||
}
|
||||
|
||||
projectName, projectNameImperativelySet := opts.GetProjectName()
|
||||
model.Name = normalizeProjectName(model.Name)
|
||||
if !projectNameImperativelySet && model.Name != "" {
|
||||
projectName = model.Name
|
||||
}
|
||||
|
||||
if projectName != "" {
|
||||
configDetails.Environment[consts.ComposeProjectName] = projectName
|
||||
}
|
||||
project := &types.Project{
|
||||
Name: opts.Name,
|
||||
Name: projectName,
|
||||
WorkingDir: configDetails.WorkingDir,
|
||||
Services: model.Services,
|
||||
Networks: model.Networks,
|
||||
@@ -199,7 +228,7 @@ func Load(configDetails types.ConfigDetails, options ...func(*Options)) (*types.
|
||||
}
|
||||
|
||||
if !opts.SkipNormalization {
|
||||
err = normalize(project)
|
||||
err = normalize(project, opts.ResolvePaths)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -215,35 +244,22 @@ func Load(configDetails types.ConfigDetails, options ...func(*Options)) (*types.
|
||||
return project, nil
|
||||
}
|
||||
|
||||
func parseConfig(b []byte, opts *Options) (map[string]interface{}, error) {
|
||||
if !opts.SkipInterpolation {
|
||||
withoutComments, err := removeYamlComments(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
substituted, err := opts.Interpolate.Substitute(string(withoutComments), template.Mapping(opts.Interpolate.LookupValue))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
b = []byte(substituted)
|
||||
}
|
||||
|
||||
return ParseYAML(b)
|
||||
func normalizeProjectName(s string) string {
|
||||
r := regexp.MustCompile("[a-z0-9_-]")
|
||||
s = strings.ToLower(s)
|
||||
s = strings.Join(r.FindAllString(s, -1), "")
|
||||
return strings.TrimLeft(s, "_-")
|
||||
}
|
||||
|
||||
// removeYamlComments drop all comments from the yaml file, so we don't try to apply string substitutions on irrelevant places
|
||||
func removeYamlComments(b []byte) ([]byte, error) {
|
||||
var cfg interface{}
|
||||
err := yaml.Unmarshal(b, &cfg)
|
||||
func parseConfig(b []byte, opts *Options) (map[string]interface{}, error) {
|
||||
yml, err := ParseYAML(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
b, err = yaml.Marshal(cfg)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
if !opts.SkipInterpolation {
|
||||
return interp.Interpolate(yml, *opts.Interpolate)
|
||||
}
|
||||
return b, nil
|
||||
return yml, err
|
||||
}
|
||||
|
||||
func groupXFieldsIntoExtensions(dict map[string]interface{}) map[string]interface{} {
|
||||
@@ -268,13 +284,20 @@ func loadSections(filename string, config map[string]interface{}, configDetails
|
||||
cfg := types.Config{
|
||||
Filename: filename,
|
||||
}
|
||||
|
||||
name := ""
|
||||
if n, ok := config["name"]; ok {
|
||||
name, ok = n.(string)
|
||||
if !ok {
|
||||
return nil, errors.New("project name must be a string")
|
||||
}
|
||||
}
|
||||
cfg.Name = name
|
||||
cfg.Services, err = LoadServices(filename, getSection(config, "services"), configDetails.WorkingDir, configDetails.LookupEnv, opts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cfg.Networks, err = LoadNetworks(getSection(config, "networks"), configDetails.Version)
|
||||
cfg.Networks, err = LoadNetworks(getSection(config, "networks"))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -282,11 +305,11 @@ func loadSections(filename string, config map[string]interface{}, configDetails
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cfg.Secrets, err = LoadSecrets(getSection(config, "secrets"), configDetails)
|
||||
cfg.Secrets, err = LoadSecrets(getSection(config, "secrets"), configDetails, opts.ResolvePaths)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cfg.Configs, err = LoadConfigObjs(getSection(config, "configs"), configDetails)
|
||||
cfg.Configs, err = LoadConfigObjs(getSection(config, "configs"), configDetails, opts.ResolvePaths)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -294,10 +317,6 @@ func loadSections(filename string, config map[string]interface{}, configDetails
|
||||
if len(extensions) > 0 {
|
||||
cfg.Extensions = extensions
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &cfg, nil
|
||||
}
|
||||
|
||||
@@ -356,8 +375,8 @@ func createTransformHook(additionalTransformers ...Transformer) mapstructure.Dec
|
||||
reflect.TypeOf(types.UlimitsConfig{}): transformUlimits,
|
||||
reflect.TypeOf(types.UnitBytes(0)): transformSize,
|
||||
reflect.TypeOf([]types.ServicePortConfig{}): transformServicePort,
|
||||
reflect.TypeOf(types.ServiceSecretConfig{}): transformStringSourceMap,
|
||||
reflect.TypeOf(types.ServiceConfigObjConfig{}): transformStringSourceMap,
|
||||
reflect.TypeOf(types.ServiceSecretConfig{}): transformFileReferenceConfig,
|
||||
reflect.TypeOf(types.ServiceConfigObjConfig{}): transformFileReferenceConfig,
|
||||
reflect.TypeOf(types.StringOrNumberList{}): transformStringOrNumberList,
|
||||
reflect.TypeOf(map[string]*types.ServiceNetworkConfig{}): transformServiceNetworkMap,
|
||||
reflect.TypeOf(types.Mapping{}): transformMappingOrListFunc("=", false),
|
||||
@@ -371,6 +390,7 @@ func createTransformHook(additionalTransformers ...Transformer) mapstructure.Dec
|
||||
reflect.TypeOf(types.DependsOnConfig{}): transformDependsOnConfig,
|
||||
reflect.TypeOf(types.ExtendsConfig{}): transformExtendsConfig,
|
||||
reflect.TypeOf(types.DeviceRequest{}): transformServiceDeviceRequest,
|
||||
reflect.TypeOf(types.SSHConfig{}): transformSSHConfig,
|
||||
}
|
||||
|
||||
for _, transformer := range additionalTransformers {
|
||||
@@ -386,7 +406,7 @@ func createTransformHook(additionalTransformers ...Transformer) mapstructure.Dec
|
||||
}
|
||||
}
|
||||
|
||||
// keys needs to be converted to strings for jsonschema
|
||||
// keys need to be converted to strings for jsonschema
|
||||
func convertToStringKeysRecursive(value interface{}, keyPrefix string) (interface{}, error) {
|
||||
if mapping, ok := value.(map[interface{}]interface{}); ok {
|
||||
dict := make(map[string]interface{})
|
||||
@@ -410,7 +430,7 @@ func convertToStringKeysRecursive(value interface{}, keyPrefix string) (interfac
|
||||
return dict, nil
|
||||
}
|
||||
if list, ok := value.([]interface{}); ok {
|
||||
convertedList := []interface{}{}
|
||||
var convertedList []interface{}
|
||||
for index, entry := range list {
|
||||
newKeyPrefix := fmt.Sprintf("%s[%d]", keyPrefix, index)
|
||||
convertedEntry, err := convertToStringKeysRecursive(entry, newKeyPrefix)
|
||||
@@ -439,6 +459,14 @@ func formatInvalidKeyError(keyPrefix string, key interface{}) error {
|
||||
func LoadServices(filename string, servicesDict map[string]interface{}, workingDir string, lookupEnv template.Mapping, opts *Options) ([]types.ServiceConfig, error) {
|
||||
var services []types.ServiceConfig
|
||||
|
||||
x, ok := servicesDict["extensions"]
|
||||
if ok {
|
||||
// as a top-level attribute, "services" doesn't support extensions, and a service can be named `x-foo`
|
||||
for k, v := range x.(map[string]interface{}) {
|
||||
servicesDict[k] = v
|
||||
}
|
||||
}
|
||||
|
||||
for name := range servicesDict {
|
||||
serviceConfig, err := loadServiceWithExtends(filename, name, servicesDict, workingDir, lookupEnv, opts, &cycleTracker{})
|
||||
if err != nil {
|
||||
@@ -456,7 +484,12 @@ func loadServiceWithExtends(filename, name string, servicesDict map[string]inter
|
||||
return nil, err
|
||||
}
|
||||
|
||||
serviceConfig, err := LoadService(name, servicesDict[name].(map[string]interface{}), workingDir, lookupEnv)
|
||||
target, ok := servicesDict[name]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("cannot extend service %q in %s: service not found", name, filename)
|
||||
}
|
||||
|
||||
serviceConfig, err := LoadService(name, target.(map[string]interface{}), workingDir, lookupEnv, opts.ResolvePaths)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -478,15 +511,7 @@ func loadServiceWithExtends(filename, name string, servicesDict map[string]inter
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !opts.SkipInterpolation {
|
||||
substitute, err := opts.Interpolate.Substitute(string(bytes), template.Mapping(opts.Interpolate.LookupValue))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
bytes = []byte(substitute)
|
||||
}
|
||||
|
||||
baseFile, err := ParseYAML(bytes)
|
||||
baseFile, err := parseConfig(bytes, opts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -516,10 +541,10 @@ func loadServiceWithExtends(filename, name string, servicesDict map[string]inter
|
||||
}
|
||||
}
|
||||
|
||||
if err := mergo.Merge(baseService, serviceConfig, mergo.WithAppendSlice, mergo.WithOverride, mergo.WithTransformers(serviceSpecials)); err != nil {
|
||||
return nil, errors.Wrapf(err, "cannot merge service %s", name)
|
||||
serviceConfig, err = _merge(baseService, serviceConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
serviceConfig = baseService
|
||||
}
|
||||
|
||||
return serviceConfig, nil
|
||||
@@ -527,8 +552,10 @@ func loadServiceWithExtends(filename, name string, servicesDict map[string]inter
|
||||
|
||||
// LoadService produces a single ServiceConfig from a compose file Dict
|
||||
// the serviceDict is not validated if directly used. Use Load() to enable validation
|
||||
func LoadService(name string, serviceDict map[string]interface{}, workingDir string, lookupEnv template.Mapping) (*types.ServiceConfig, error) {
|
||||
serviceConfig := &types.ServiceConfig{}
|
||||
func LoadService(name string, serviceDict map[string]interface{}, workingDir string, lookupEnv template.Mapping, resolvePaths bool) (*types.ServiceConfig, error) {
|
||||
serviceConfig := &types.ServiceConfig{
|
||||
Scale: 1,
|
||||
}
|
||||
if err := Transform(serviceDict, serviceConfig); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -538,8 +565,18 @@ func LoadService(name string, serviceDict map[string]interface{}, workingDir str
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := resolveVolumePaths(serviceConfig.Volumes, workingDir, lookupEnv); err != nil {
|
||||
return nil, err
|
||||
for i, volume := range serviceConfig.Volumes {
|
||||
if volume.Type != types.VolumeTypeBind {
|
||||
continue
|
||||
}
|
||||
|
||||
if volume.Source == "" {
|
||||
return nil, errors.New(`invalid mount config for type "bind": field Source must not be empty`)
|
||||
}
|
||||
|
||||
if resolvePaths {
|
||||
serviceConfig.Volumes[i] = resolveVolumePath(volume, workingDir, lookupEnv)
|
||||
}
|
||||
}
|
||||
|
||||
return serviceConfig, nil
|
||||
@@ -549,14 +586,14 @@ func resolveEnvironment(serviceConfig *types.ServiceConfig, workingDir string, l
|
||||
environment := types.MappingWithEquals{}
|
||||
|
||||
if len(serviceConfig.EnvFile) > 0 {
|
||||
for _, file := range serviceConfig.EnvFile {
|
||||
filePath := absPath(workingDir, file)
|
||||
for _, envFile := range serviceConfig.EnvFile {
|
||||
filePath := absPath(workingDir, envFile)
|
||||
file, err := os.Open(filePath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer file.Close()
|
||||
fileVars, err := godotenv.Parse(file)
|
||||
fileVars, err := dotenv.ParseWithLookup(file, dotenv.LookupFn(lookupEnv))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -574,30 +611,19 @@ func resolveEnvironment(serviceConfig *types.ServiceConfig, workingDir string, l
|
||||
return nil
|
||||
}
|
||||
|
||||
func resolveVolumePaths(volumes []types.ServiceVolumeConfig, workingDir string, lookupEnv template.Mapping) error {
|
||||
for i, volume := range volumes {
|
||||
if volume.Type != "bind" {
|
||||
continue
|
||||
}
|
||||
|
||||
if volume.Source == "" {
|
||||
return errors.New(`invalid mount config for type "bind": field Source must not be empty`)
|
||||
}
|
||||
|
||||
filePath := expandUser(volume.Source, lookupEnv)
|
||||
// Check if source is an absolute path (either Unix or Windows), to
|
||||
// handle a Windows client with a Unix daemon or vice-versa.
|
||||
//
|
||||
// Note that this is not required for Docker for Windows when specifying
|
||||
// a local Windows path, because Docker for Windows translates the Windows
|
||||
// path into a valid path within the VM.
|
||||
if !path.IsAbs(filePath) && !isAbs(filePath) {
|
||||
filePath = absPath(workingDir, filePath)
|
||||
}
|
||||
volume.Source = filePath
|
||||
volumes[i] = volume
|
||||
func resolveVolumePath(volume types.ServiceVolumeConfig, workingDir string, lookupEnv template.Mapping) types.ServiceVolumeConfig {
|
||||
filePath := expandUser(volume.Source, lookupEnv)
|
||||
// Check if source is an absolute path (either Unix or Windows), to
|
||||
// handle a Windows client with a Unix daemon or vice-versa.
|
||||
//
|
||||
// Note that this is not required for Docker for Windows when specifying
|
||||
// a local Windows path, because Docker for Windows translates the Windows
|
||||
// path into a valid path within the VM.
|
||||
if !path.IsAbs(filePath) && !isAbs(filePath) {
|
||||
filePath = absPath(workingDir, filePath)
|
||||
}
|
||||
return nil
|
||||
volume.Source = filePath
|
||||
return volume
|
||||
}
|
||||
|
||||
// TODO: make this more robust
|
||||
@@ -633,7 +659,7 @@ func transformUlimits(data interface{}) (interface{}, error) {
|
||||
|
||||
// LoadNetworks produces a NetworkConfig map from a compose file Dict
|
||||
// the source Dict is not validated if directly used. Use Load() to enable validation
|
||||
func LoadNetworks(source map[string]interface{}, version string) (map[string]types.NetworkConfig, error) {
|
||||
func LoadNetworks(source map[string]interface{}) (map[string]types.NetworkConfig, error) {
|
||||
networks := make(map[string]types.NetworkConfig)
|
||||
err := Transform(source, &networks)
|
||||
if err != nil {
|
||||
@@ -701,13 +727,13 @@ func LoadVolumes(source map[string]interface{}) (map[string]types.VolumeConfig,
|
||||
|
||||
// LoadSecrets produces a SecretConfig map from a compose file Dict
|
||||
// the source Dict is not validated if directly used. Use Load() to enable validation
|
||||
func LoadSecrets(source map[string]interface{}, details types.ConfigDetails) (map[string]types.SecretConfig, error) {
|
||||
func LoadSecrets(source map[string]interface{}, details types.ConfigDetails, resolvePaths bool) (map[string]types.SecretConfig, error) {
|
||||
secrets := make(map[string]types.SecretConfig)
|
||||
if err := Transform(source, &secrets); err != nil {
|
||||
return secrets, err
|
||||
}
|
||||
for name, secret := range secrets {
|
||||
obj, err := loadFileObjectConfig(name, "secret", types.FileObjectConfig(secret), details)
|
||||
obj, err := loadFileObjectConfig(name, "secret", types.FileObjectConfig(secret), details, resolvePaths)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -719,13 +745,13 @@ func LoadSecrets(source map[string]interface{}, details types.ConfigDetails) (ma
|
||||
|
||||
// LoadConfigObjs produces a ConfigObjConfig map from a compose file Dict
|
||||
// the source Dict is not validated if directly used. Use Load() to enable validation
|
||||
func LoadConfigObjs(source map[string]interface{}, details types.ConfigDetails) (map[string]types.ConfigObjConfig, error) {
|
||||
func LoadConfigObjs(source map[string]interface{}, details types.ConfigDetails, resolvePaths bool) (map[string]types.ConfigObjConfig, error) {
|
||||
configs := make(map[string]types.ConfigObjConfig)
|
||||
if err := Transform(source, &configs); err != nil {
|
||||
return configs, err
|
||||
}
|
||||
for name, config := range configs {
|
||||
obj, err := loadFileObjectConfig(name, "config", types.FileObjectConfig(config), details)
|
||||
obj, err := loadFileObjectConfig(name, "config", types.FileObjectConfig(config), details, resolvePaths)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -735,7 +761,7 @@ func LoadConfigObjs(source map[string]interface{}, details types.ConfigDetails)
|
||||
return configs, nil
|
||||
}
|
||||
|
||||
func loadFileObjectConfig(name string, objType string, obj types.FileObjectConfig, details types.ConfigDetails) (types.FileObjectConfig, error) {
|
||||
func loadFileObjectConfig(name string, objType string, obj types.FileObjectConfig, details types.ConfigDetails, resolvePaths bool) (types.FileObjectConfig, error) {
|
||||
// if "external: true"
|
||||
switch {
|
||||
case obj.External.External:
|
||||
@@ -758,7 +784,9 @@ func loadFileObjectConfig(name string, objType string, obj types.FileObjectConfi
|
||||
return obj, errors.Errorf("%[1]s %[2]s: %[1]s.driver and %[1]s.file conflict; only use %[1]s.driver", objType, name)
|
||||
}
|
||||
default:
|
||||
obj.File = absPath(details.WorkingDir, obj.File)
|
||||
if resolvePaths {
|
||||
obj.File = absPath(details.WorkingDir, obj.File)
|
||||
}
|
||||
}
|
||||
|
||||
return obj, nil
|
||||
@@ -803,7 +831,7 @@ var transformServicePort TransformerFunc = func(data interface{}) (interface{},
|
||||
// We process the list instead of individual items here.
|
||||
// The reason is that one entry might be mapped to multiple ServicePortConfig.
|
||||
// Therefore we take an input of a list and return an output of a list.
|
||||
ports := []interface{}{}
|
||||
var ports []interface{}
|
||||
for _, entry := range entries {
|
||||
switch value := entry.(type) {
|
||||
case int:
|
||||
@@ -823,6 +851,10 @@ var transformServicePort TransformerFunc = func(data interface{}) (interface{},
|
||||
ports = append(ports, v)
|
||||
}
|
||||
case map[string]interface{}:
|
||||
published := value["published"]
|
||||
if v, ok := published.(int); ok {
|
||||
value["published"] = strconv.Itoa(v)
|
||||
}
|
||||
ports = append(ports, groupXFieldsIntoExtensions(value))
|
||||
default:
|
||||
return data, errors.Errorf("invalid type %T for port", value)
|
||||
@@ -858,17 +890,27 @@ var transformServiceDeviceRequest TransformerFunc = func(data interface{}) (inte
|
||||
}
|
||||
}
|
||||
|
||||
var transformStringSourceMap TransformerFunc = func(data interface{}) (interface{}, error) {
|
||||
var transformFileReferenceConfig TransformerFunc = func(data interface{}) (interface{}, error) {
|
||||
switch value := data.(type) {
|
||||
case string:
|
||||
return map[string]interface{}{"source": value}, nil
|
||||
case map[string]interface{}:
|
||||
return groupXFieldsIntoExtensions(data.(map[string]interface{})), nil
|
||||
if target, ok := value["target"]; ok {
|
||||
value["target"] = cleanTarget(target.(string))
|
||||
}
|
||||
return groupXFieldsIntoExtensions(value), nil
|
||||
default:
|
||||
return data, errors.Errorf("invalid type %T for secret", value)
|
||||
}
|
||||
}
|
||||
|
||||
func cleanTarget(target string) string {
|
||||
if target == "" {
|
||||
return ""
|
||||
}
|
||||
return path.Clean(target)
|
||||
}
|
||||
|
||||
var transformBuildConfig TransformerFunc = func(data interface{}) (interface{}, error) {
|
||||
switch value := data.(type) {
|
||||
case string:
|
||||
@@ -887,7 +929,7 @@ var transformDependsOnConfig TransformerFunc = func(data interface{}) (interface
|
||||
for _, serviceIntf := range value {
|
||||
service, ok := serviceIntf.(string)
|
||||
if !ok {
|
||||
return data, errors.Errorf("invalid type %T for service depends_on element. Expected string.", value)
|
||||
return data, errors.Errorf("invalid type %T for service depends_on elementn, expected string", value)
|
||||
}
|
||||
transformed[service] = map[string]interface{}{"condition": types.ServiceConditionStarted}
|
||||
}
|
||||
@@ -912,9 +954,15 @@ var transformExtendsConfig TransformerFunc = func(data interface{}) (interface{}
|
||||
var transformServiceVolumeConfig TransformerFunc = func(data interface{}) (interface{}, error) {
|
||||
switch value := data.(type) {
|
||||
case string:
|
||||
return ParseVolume(value)
|
||||
volume, err := ParseVolume(value)
|
||||
volume.Target = cleanTarget(volume.Target)
|
||||
return volume, err
|
||||
case map[string]interface{}:
|
||||
return groupXFieldsIntoExtensions(data.(map[string]interface{})), nil
|
||||
data := groupXFieldsIntoExtensions(data.(map[string]interface{}))
|
||||
if target, ok := data["target"]; ok {
|
||||
data["target"] = cleanTarget(target.(string))
|
||||
}
|
||||
return data, nil
|
||||
default:
|
||||
return data, errors.Errorf("invalid type %T for service volume", value)
|
||||
}
|
||||
@@ -931,6 +979,35 @@ var transformServiceNetworkMap TransformerFunc = func(value interface{}) (interf
|
||||
return value, nil
|
||||
}
|
||||
|
||||
var transformSSHConfig TransformerFunc = func(data interface{}) (interface{}, error) {
|
||||
switch value := data.(type) {
|
||||
case map[string]interface{}:
|
||||
var result []types.SSHKey
|
||||
for key, val := range value {
|
||||
if val == nil {
|
||||
val = ""
|
||||
}
|
||||
result = append(result, types.SSHKey{ID: key, Path: val.(string)})
|
||||
}
|
||||
return result, nil
|
||||
case []interface{}:
|
||||
var result []types.SSHKey
|
||||
for _, v := range value {
|
||||
key, val := transformValueToMapEntry(v.(string), "=", false)
|
||||
result = append(result, types.SSHKey{ID: key, Path: val.(string)})
|
||||
}
|
||||
return result, nil
|
||||
case string:
|
||||
if value == "" {
|
||||
value = "default"
|
||||
}
|
||||
key, val := transformValueToMapEntry(value, "=", false)
|
||||
result := []types.SSHKey{{ID: key, Path: val.(string)}}
|
||||
return result, nil
|
||||
}
|
||||
return nil, errors.Errorf("expected a sting, map or a list, got %T: %#v", data, data)
|
||||
}
|
||||
|
||||
var transformStringOrNumberList TransformerFunc = func(value interface{}) (interface{}, error) {
|
||||
list := value.([]interface{})
|
||||
result := make([]string, len(list))
|
||||
@@ -953,47 +1030,52 @@ var transformStringList TransformerFunc = func(data interface{}) (interface{}, e
|
||||
|
||||
func transformMappingOrListFunc(sep string, allowNil bool) TransformerFunc {
|
||||
return func(data interface{}) (interface{}, error) {
|
||||
return transformMappingOrList(data, sep, allowNil), nil
|
||||
return transformMappingOrList(data, sep, allowNil)
|
||||
}
|
||||
}
|
||||
|
||||
func transformListOrMappingFunc(sep string, allowNil bool) TransformerFunc {
|
||||
return func(data interface{}) (interface{}, error) {
|
||||
return transformListOrMapping(data, sep, allowNil), nil
|
||||
return transformListOrMapping(data, sep, allowNil)
|
||||
}
|
||||
}
|
||||
|
||||
func transformListOrMapping(listOrMapping interface{}, sep string, allowNil bool) interface{} {
|
||||
func transformListOrMapping(listOrMapping interface{}, sep string, allowNil bool) (interface{}, error) {
|
||||
switch value := listOrMapping.(type) {
|
||||
case map[string]interface{}:
|
||||
return toStringList(value, sep, allowNil)
|
||||
return toStringList(value, sep, allowNil), nil
|
||||
case []interface{}:
|
||||
return listOrMapping
|
||||
return listOrMapping, nil
|
||||
}
|
||||
panic(errors.Errorf("expected a map or a list, got %T: %#v", listOrMapping, listOrMapping))
|
||||
return nil, errors.Errorf("expected a map or a list, got %T: %#v", listOrMapping, listOrMapping)
|
||||
}
|
||||
|
||||
func transformMappingOrList(mappingOrList interface{}, sep string, allowNil bool) interface{} {
|
||||
func transformMappingOrList(mappingOrList interface{}, sep string, allowNil bool) (interface{}, error) {
|
||||
switch value := mappingOrList.(type) {
|
||||
case map[string]interface{}:
|
||||
return toMapStringString(value, allowNil)
|
||||
case ([]interface{}):
|
||||
return toMapStringString(value, allowNil), nil
|
||||
case []interface{}:
|
||||
result := make(map[string]interface{})
|
||||
for _, value := range value {
|
||||
parts := strings.SplitN(value.(string), sep, 2)
|
||||
key := parts[0]
|
||||
switch {
|
||||
case len(parts) == 1 && allowNil:
|
||||
result[key] = nil
|
||||
case len(parts) == 1 && !allowNil:
|
||||
result[key] = ""
|
||||
default:
|
||||
result[key] = parts[1]
|
||||
}
|
||||
key, val := transformValueToMapEntry(value.(string), sep, allowNil)
|
||||
result[key] = val
|
||||
}
|
||||
return result
|
||||
return result, nil
|
||||
}
|
||||
return nil, errors.Errorf("expected a map or a list, got %T: %#v", mappingOrList, mappingOrList)
|
||||
}
|
||||
|
||||
func transformValueToMapEntry(value string, separator string, allowNil bool) (string, interface{}) {
|
||||
parts := strings.SplitN(value, separator, 2)
|
||||
key := parts[0]
|
||||
switch {
|
||||
case len(parts) == 1 && allowNil:
|
||||
return key, nil
|
||||
case len(parts) == 1 && !allowNil:
|
||||
return key, ""
|
||||
default:
|
||||
return key, parts[1]
|
||||
}
|
||||
panic(errors.Errorf("expected a map or a list, got %T: %#v", mappingOrList, mappingOrList))
|
||||
}
|
||||
|
||||
var transformShellCommand TransformerFunc = func(value interface{}) (interface{}, error) {
|
||||
@@ -1018,10 +1100,13 @@ var transformSize TransformerFunc = func(value interface{}) (interface{}, error)
|
||||
switch value := value.(type) {
|
||||
case int:
|
||||
return int64(value), nil
|
||||
case int64, types.UnitBytes:
|
||||
return value, nil
|
||||
case string:
|
||||
return units.RAMInBytes(value)
|
||||
default:
|
||||
return value, errors.Errorf("invalid type for size %T", value)
|
||||
}
|
||||
panic(errors.Errorf("invalid type for size %T", value))
|
||||
}
|
||||
|
||||
var transformStringToDuration TransformerFunc = func(value interface{}) (interface{}, error) {
|
||||
@@ -1032,6 +1117,8 @@ var transformStringToDuration TransformerFunc = func(value interface{}) (interfa
|
||||
return value, err
|
||||
}
|
||||
return types.Duration(d), nil
|
||||
case types.Duration:
|
||||
return value, nil
|
||||
default:
|
||||
return value, errors.Errorf("invalid type %T for duration", value)
|
||||
}
|
||||
@@ -1057,7 +1144,7 @@ func toString(value interface{}, allowNil bool) interface{} {
|
||||
}
|
||||
|
||||
func toStringList(value map[string]interface{}, separator string, allowNil bool) []string {
|
||||
output := []string{}
|
||||
var output []string
|
||||
for key, value := range value {
|
||||
if value == nil && !allowNil {
|
||||
continue
|
||||
|
109
vendor/github.com/compose-spec/compose-go/loader/merge.go
generated
vendored
109
vendor/github.com/compose-spec/compose-go/loader/merge.go
generated
vendored
@@ -33,6 +33,7 @@ var serviceSpecials = &specials{
|
||||
m: map[reflect.Type]func(dst, src reflect.Value) error{
|
||||
reflect.TypeOf(&types.LoggingConfig{}): safelyMerge(mergeLoggingConfig),
|
||||
reflect.TypeOf(&types.UlimitsConfig{}): safelyMerge(mergeUlimitsConfig),
|
||||
reflect.TypeOf([]types.ServiceVolumeConfig{}): mergeSlice(toServiceVolumeConfigsMap, toServiceVolumeConfigsSlice),
|
||||
reflect.TypeOf([]types.ServicePortConfig{}): mergeSlice(toServicePortConfigsMap, toServicePortConfigsSlice),
|
||||
reflect.TypeOf([]types.ServiceSecretConfig{}): mergeSlice(toServiceSecretConfigsMap, toServiceSecretConfigsSlice),
|
||||
reflect.TypeOf([]types.ServiceConfigObjConfig{}): mergeSlice(toServiceConfigObjConfigsMap, toSServiceConfigObjConfigsSlice),
|
||||
@@ -52,6 +53,7 @@ func merge(configs []*types.Config) (*types.Config, error) {
|
||||
base := configs[0]
|
||||
for _, override := range configs[1:] {
|
||||
var err error
|
||||
base.Name = mergeNames(base.Name, override.Name)
|
||||
base.Services, err = mergeServices(base.Services, override.Services)
|
||||
if err != nil {
|
||||
return base, errors.Wrapf(err, "cannot merge services from %s", override.Filename)
|
||||
@@ -80,19 +82,24 @@ func merge(configs []*types.Config) (*types.Config, error) {
|
||||
return base, nil
|
||||
}
|
||||
|
||||
func mergeNames(base, override string) string {
|
||||
if override != "" {
|
||||
return override
|
||||
}
|
||||
return base
|
||||
}
|
||||
|
||||
func mergeServices(base, override []types.ServiceConfig) ([]types.ServiceConfig, error) {
|
||||
baseServices := mapByName(base)
|
||||
overrideServices := mapByName(override)
|
||||
for name, overrideService := range overrideServices {
|
||||
overrideService := overrideService
|
||||
if baseService, ok := baseServices[name]; ok {
|
||||
if err := mergo.Merge(&baseService, &overrideService, mergo.WithAppendSlice, mergo.WithOverride, mergo.WithTransformers(serviceSpecials)); err != nil {
|
||||
return base, errors.Wrapf(err, "cannot merge service %s", name)
|
||||
merged, err := _merge(&baseService, &overrideService)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "cannot merge service %s", name)
|
||||
}
|
||||
if len(overrideService.Command) > 0 {
|
||||
baseService.Command = overrideService.Command
|
||||
}
|
||||
baseServices[name] = baseService
|
||||
baseServices[name] = *merged
|
||||
continue
|
||||
}
|
||||
baseServices[name] = overrideService
|
||||
@@ -105,6 +112,24 @@ func mergeServices(base, override []types.ServiceConfig) ([]types.ServiceConfig,
|
||||
return services, nil
|
||||
}
|
||||
|
||||
func _merge(baseService *types.ServiceConfig, overrideService *types.ServiceConfig) (*types.ServiceConfig, error) {
|
||||
if err := mergo.Merge(baseService, overrideService, mergo.WithAppendSlice, mergo.WithOverride, mergo.WithTransformers(serviceSpecials)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if overrideService.Command != nil {
|
||||
baseService.Command = overrideService.Command
|
||||
}
|
||||
if overrideService.Entrypoint != nil {
|
||||
baseService.Entrypoint = overrideService.Entrypoint
|
||||
}
|
||||
if baseService.Environment != nil {
|
||||
baseService.Environment.OverrideBy(overrideService.Environment)
|
||||
} else {
|
||||
baseService.Environment = overrideService.Environment
|
||||
}
|
||||
return baseService, nil
|
||||
}
|
||||
|
||||
func toServiceSecretConfigsMap(s interface{}) (map[interface{}]interface{}, error) {
|
||||
secrets, ok := s.([]types.ServiceSecretConfig)
|
||||
if !ok {
|
||||
@@ -135,14 +160,39 @@ func toServicePortConfigsMap(s interface{}) (map[interface{}]interface{}, error)
|
||||
return nil, errors.Errorf("not a servicePortConfig slice: %v", s)
|
||||
}
|
||||
m := map[interface{}]interface{}{}
|
||||
type port struct {
|
||||
target uint32
|
||||
published string
|
||||
ip string
|
||||
protocol string
|
||||
}
|
||||
|
||||
for _, p := range ports {
|
||||
m[p.Published] = p
|
||||
mergeKey := port{
|
||||
target: p.Target,
|
||||
published: p.Published,
|
||||
ip: p.HostIP,
|
||||
protocol: p.Protocol,
|
||||
}
|
||||
m[mergeKey] = p
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func toServiceVolumeConfigsMap(s interface{}) (map[interface{}]interface{}, error) {
|
||||
volumes, ok := s.([]types.ServiceVolumeConfig)
|
||||
if !ok {
|
||||
return nil, errors.Errorf("not a ServiceVolumeConfig slice: %v", s)
|
||||
}
|
||||
m := map[interface{}]interface{}{}
|
||||
for _, v := range volumes {
|
||||
m[v.Target] = v
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func toServiceSecretConfigsSlice(dst reflect.Value, m map[interface{}]interface{}) error {
|
||||
s := []types.ServiceSecretConfig{}
|
||||
var s []types.ServiceSecretConfig
|
||||
for _, v := range m {
|
||||
s = append(s, v.(types.ServiceSecretConfig))
|
||||
}
|
||||
@@ -152,7 +202,7 @@ func toServiceSecretConfigsSlice(dst reflect.Value, m map[interface{}]interface{
|
||||
}
|
||||
|
||||
func toSServiceConfigObjConfigsSlice(dst reflect.Value, m map[interface{}]interface{}) error {
|
||||
s := []types.ServiceConfigObjConfig{}
|
||||
var s []types.ServiceConfigObjConfig
|
||||
for _, v := range m {
|
||||
s = append(s, v.(types.ServiceConfigObjConfig))
|
||||
}
|
||||
@@ -162,16 +212,37 @@ func toSServiceConfigObjConfigsSlice(dst reflect.Value, m map[interface{}]interf
|
||||
}
|
||||
|
||||
func toServicePortConfigsSlice(dst reflect.Value, m map[interface{}]interface{}) error {
|
||||
s := []types.ServicePortConfig{}
|
||||
var s []types.ServicePortConfig
|
||||
for _, v := range m {
|
||||
s = append(s, v.(types.ServicePortConfig))
|
||||
}
|
||||
sort.Slice(s, func(i, j int) bool { return s[i].Published < s[j].Published })
|
||||
sort.Slice(s, func(i, j int) bool {
|
||||
if s[i].Target != s[j].Target {
|
||||
return s[i].Target < s[j].Target
|
||||
}
|
||||
if s[i].Published != s[j].Published {
|
||||
return s[i].Published < s[j].Published
|
||||
}
|
||||
if s[i].HostIP != s[j].HostIP {
|
||||
return s[i].HostIP < s[j].HostIP
|
||||
}
|
||||
return s[i].Protocol < s[j].Protocol
|
||||
})
|
||||
dst.Set(reflect.ValueOf(s))
|
||||
return nil
|
||||
}
|
||||
|
||||
type tomapFn func(s interface{}) (map[interface{}]interface{}, error)
|
||||
func toServiceVolumeConfigsSlice(dst reflect.Value, m map[interface{}]interface{}) error {
|
||||
var s []types.ServiceVolumeConfig
|
||||
for _, v := range m {
|
||||
s = append(s, v.(types.ServiceVolumeConfig))
|
||||
}
|
||||
sort.Slice(s, func(i, j int) bool { return s[i].Target < s[j].Target })
|
||||
dst.Set(reflect.ValueOf(s))
|
||||
return nil
|
||||
}
|
||||
|
||||
type toMapFn func(s interface{}) (map[interface{}]interface{}, error)
|
||||
type writeValueFromMapFn func(reflect.Value, map[interface{}]interface{}) error
|
||||
|
||||
func safelyMerge(mergeFn func(dst, src reflect.Value) error) func(dst, src reflect.Value) error {
|
||||
@@ -187,13 +258,13 @@ func safelyMerge(mergeFn func(dst, src reflect.Value) error) func(dst, src refle
|
||||
}
|
||||
}
|
||||
|
||||
func mergeSlice(tomap tomapFn, writeValue writeValueFromMapFn) func(dst, src reflect.Value) error {
|
||||
func mergeSlice(toMap toMapFn, writeValue writeValueFromMapFn) func(dst, src reflect.Value) error {
|
||||
return func(dst, src reflect.Value) error {
|
||||
dstMap, err := sliceToMap(tomap, dst)
|
||||
dstMap, err := sliceToMap(toMap, dst)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
srcMap, err := sliceToMap(tomap, src)
|
||||
srcMap, err := sliceToMap(toMap, src)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -204,12 +275,12 @@ func mergeSlice(tomap tomapFn, writeValue writeValueFromMapFn) func(dst, src ref
|
||||
}
|
||||
}
|
||||
|
||||
func sliceToMap(tomap tomapFn, v reflect.Value) (map[interface{}]interface{}, error) {
|
||||
func sliceToMap(toMap toMapFn, v reflect.Value) (map[interface{}]interface{}, error) {
|
||||
// check if valid
|
||||
if !v.IsValid() {
|
||||
return nil, errors.Errorf("invalid value : %+v", v)
|
||||
}
|
||||
return tomap(v.Interface())
|
||||
return toMap(v.Interface())
|
||||
}
|
||||
|
||||
func mergeLoggingConfig(dst, src reflect.Value) error {
|
||||
@@ -228,7 +299,7 @@ func mergeLoggingConfig(dst, src reflect.Value) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
//nolint: unparam
|
||||
// nolint: unparam
|
||||
func mergeUlimitsConfig(dst, src reflect.Value) error {
|
||||
if src.Interface() != reflect.Zero(reflect.TypeOf(src.Interface())).Interface() {
|
||||
dst.Elem().Set(src.Elem())
|
||||
@@ -236,7 +307,7 @@ func mergeUlimitsConfig(dst, src reflect.Value) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
//nolint: unparam
|
||||
// nolint: unparam
|
||||
func mergeServiceNetworkConfig(dst, src reflect.Value) error {
|
||||
if src.Interface() != reflect.Zero(reflect.TypeOf(src.Interface())).Interface() {
|
||||
dst.Elem().FieldByName("Aliases").Set(src.Elem().FieldByName("Aliases"))
|
||||
|
45
vendor/github.com/compose-spec/compose-go/loader/normalize.go
generated
vendored
45
vendor/github.com/compose-spec/compose-go/loader/normalize.go
generated
vendored
@@ -28,7 +28,7 @@ import (
|
||||
)
|
||||
|
||||
// normalize compose project by moving deprecated attributes to their canonical position and injecting implicit defaults
|
||||
func normalize(project *types.Project) error {
|
||||
func normalize(project *types.Project, resolvePaths bool) error {
|
||||
absWorkingDir, err := filepath.Abs(project.WorkingDir)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -41,6 +41,10 @@ func normalize(project *types.Project) error {
|
||||
}
|
||||
project.ComposeFiles = absComposeFiles
|
||||
|
||||
if project.Networks == nil {
|
||||
project.Networks = make(map[string]types.NetworkConfig)
|
||||
}
|
||||
|
||||
// If not declared explicitly, Compose model involves an implicit "default" network
|
||||
if _, ok := project.Networks["default"]; !ok {
|
||||
project.Networks["default"] = types.NetworkConfig{}
|
||||
@@ -72,9 +76,10 @@ func normalize(project *types.Project) error {
|
||||
}
|
||||
localContext := absPath(project.WorkingDir, s.Build.Context)
|
||||
if _, err := os.Stat(localContext); err == nil {
|
||||
s.Build.Context = localContext
|
||||
s.Build.Dockerfile = absPath(localContext, s.Build.Dockerfile)
|
||||
} else {
|
||||
if resolvePaths {
|
||||
s.Build.Context = localContext
|
||||
}
|
||||
// } else {
|
||||
// might be a remote http/git context. Unfortunately supported "remote" syntax is highly ambiguous
|
||||
// in moby/moby and not defined by compose-spec, so let's assume runtime will check
|
||||
}
|
||||
@@ -82,17 +87,22 @@ func normalize(project *types.Project) error {
|
||||
}
|
||||
s.Environment = s.Environment.Resolve(fn)
|
||||
|
||||
err := relocateLogDriver(s)
|
||||
err := relocateLogDriver(&s)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = relocateLogOpt(s)
|
||||
err = relocateLogOpt(&s)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = relocateDockerfile(s)
|
||||
err = relocateDockerfile(&s)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
err = relocateScale(&s)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -105,6 +115,21 @@ func normalize(project *types.Project) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func relocateScale(s *types.ServiceConfig) error {
|
||||
scale := uint64(s.Scale)
|
||||
if scale != 1 {
|
||||
logrus.Warn("`scale` is deprecated. Use the `deploy.replicas` element")
|
||||
if s.Deploy == nil {
|
||||
s.Deploy = &types.DeployConfig{}
|
||||
}
|
||||
if s.Deploy.Replicas != nil && *s.Deploy.Replicas != scale {
|
||||
return errors.Wrap(errdefs.ErrInvalid, "can't use both 'scale' (deprecated) and 'deploy.replicas'")
|
||||
}
|
||||
s.Deploy.Replicas = &scale
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func absComposeFiles(composeFiles []string) ([]string, error) {
|
||||
absComposeFiles := make([]string, len(composeFiles))
|
||||
for i, composeFile := range composeFiles {
|
||||
@@ -191,7 +216,7 @@ func relocateExternalName(project *types.Project) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func relocateLogOpt(s types.ServiceConfig) error {
|
||||
func relocateLogOpt(s *types.ServiceConfig) error {
|
||||
if len(s.LogOpt) != 0 {
|
||||
logrus.Warn("`log_opts` is deprecated. Use the `logging` element")
|
||||
if s.Logging == nil {
|
||||
@@ -208,7 +233,7 @@ func relocateLogOpt(s types.ServiceConfig) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func relocateLogDriver(s types.ServiceConfig) error {
|
||||
func relocateLogDriver(s *types.ServiceConfig) error {
|
||||
if s.LogDriver != "" {
|
||||
logrus.Warn("`log_driver` is deprecated. Use the `logging` element")
|
||||
if s.Logging == nil {
|
||||
@@ -223,7 +248,7 @@ func relocateLogDriver(s types.ServiceConfig) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func relocateDockerfile(s types.ServiceConfig) error {
|
||||
func relocateDockerfile(s *types.ServiceConfig) error {
|
||||
if s.Dockerfile != "" {
|
||||
logrus.Warn("`dockerfile` is deprecated. Use the `build` element")
|
||||
if s.Build == nil {
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user