mirror of
https://gitea.com/Lydanne/buildx.git
synced 2025-08-15 08:15:55 +08:00
Compare commits
6 Commits
Author | SHA1 | Date | |
---|---|---|---|
![]() |
45a3a79246 | ||
![]() |
72af779e8a | ||
![]() |
05846896d1 | ||
![]() |
906948782e | ||
![]() |
1e1cc940df | ||
![]() |
e89ed1bcb6 |
26
.github/workflows/build.yml
vendored
26
.github/workflows/build.yml
vendored
@@ -15,6 +15,7 @@ on:
|
||||
|
||||
env:
|
||||
REPO_SLUG: "docker/buildx-bin"
|
||||
REPO_SLUG_ORIGIN: "moby/buildkit:master"
|
||||
RELEASE_OUT: "./release-out"
|
||||
|
||||
jobs:
|
||||
@@ -30,6 +31,8 @@ jobs:
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
with:
|
||||
driver-opts: image=${{ env.REPO_SLUG_ORIGIN }}
|
||||
-
|
||||
name: Test
|
||||
run: |
|
||||
@@ -87,26 +90,3 @@ jobs:
|
||||
with:
|
||||
draft: true
|
||||
files: ${{ env.RELEASE_OUT }}/*
|
||||
|
||||
buildkit-edge:
|
||||
runs-on: ubuntu-latest
|
||||
continue-on-error: true
|
||||
steps:
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
-
|
||||
name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v1
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
with:
|
||||
driver-opts: image=moby/buildkit:master
|
||||
buildkitd-flags: --debug
|
||||
-
|
||||
# Just run a bake target to check eveything runs fine
|
||||
name: Build
|
||||
uses: docker/bake-action@v1
|
||||
with:
|
||||
targets: binaries-cross
|
||||
|
1
.github/workflows/e2e.yml
vendored
1
.github/workflows/e2e.yml
vendored
@@ -31,7 +31,6 @@ jobs:
|
||||
- mnode-false
|
||||
- mnode-true
|
||||
platforms:
|
||||
- linux/amd64
|
||||
- linux/amd64,linux/arm64
|
||||
include:
|
||||
- driver: kubernetes
|
||||
|
@@ -6,7 +6,7 @@ ARG DOCKERD_VERSION=20.10.8
|
||||
FROM docker:$DOCKERD_VERSION AS dockerd-release
|
||||
|
||||
# xx is a helper for cross-compilation
|
||||
FROM --platform=$BUILDPLATFORM tonistiigi/xx:1.0.0 AS xx
|
||||
FROM --platform=$BUILDPLATFORM tonistiigi/xx@sha256:21a61be4744f6531cb5f33b0e6f40ede41fa3a1b8c82d5946178f80cc84bfc04 AS xx
|
||||
|
||||
FROM --platform=$BUILDPLATFORM golang:${GO_VERSION}-alpine AS golatest
|
||||
|
||||
|
104
README.md
104
README.md
@@ -1,10 +1,9 @@
|
||||
# buildx
|
||||
|
||||
[](https://github.com/docker/buildx/releases/latest)
|
||||
[](https://pkg.go.dev/github.com/docker/buildx)
|
||||
[](https://github.com/docker/buildx/actions?query=workflow%3Abuild)
|
||||
[](https://goreportcard.com/report/github.com/docker/buildx)
|
||||
[](https://codecov.io/gh/docker/buildx)
|
||||
[](https://pkg.go.dev/github.com/docker/buildx)
|
||||
[](https://github.com/docker/buildx/actions?query=workflow%3Abuild)
|
||||
[](https://goreportcard.com/report/github.com/docker/buildx)
|
||||
[](https://codecov.io/gh/docker/buildx)
|
||||
|
||||
`buildx` is a Docker CLI plugin for extended build capabilities with
|
||||
[BuildKit](https://github.com/moby/buildkit).
|
||||
@@ -22,10 +21,9 @@ Key features:
|
||||
# Table of Contents
|
||||
|
||||
- [Installing](#installing)
|
||||
- [Windows and macOS](#windows-and-macos)
|
||||
- [Linux packages](#linux-packages)
|
||||
- [Manual download](#manual-download)
|
||||
- [Dockerfile](#dockerfile)
|
||||
- [Docker](#docker)
|
||||
- [Binary release](#binary-release)
|
||||
- [From `Dockerfile`](#from-dockerfile)
|
||||
- [Set buildx as the default builder](#set-buildx-as-the-default-builder)
|
||||
- [Building](#building)
|
||||
- [Getting started](#getting-started)
|
||||
@@ -33,25 +31,18 @@ Key features:
|
||||
- [Working with builder instances](#working-with-builder-instances)
|
||||
- [Building multi-platform images](#building-multi-platform-images)
|
||||
- [High-level build options](#high-level-build-options)
|
||||
- [Guides](docs/guides)
|
||||
- [CI/CD](docs/guides/cicd.md)
|
||||
- [CNI networking](docs/guides/cni-networking.md)
|
||||
- [Registry mirror](docs/guides/registry-mirror.md)
|
||||
- [Resource limiting](docs/guides/resource-limiting.md)
|
||||
- [Using a custom network](docs/guides/custom-network.md)
|
||||
- [Using a custom registry configuration](docs/guides/custom-registry-config.md)
|
||||
- [Reference](docs/reference/buildx.md)
|
||||
- [`buildx bake`](docs/reference/buildx_bake.md)
|
||||
- [Documentation](docs/reference/buildx.md)
|
||||
- [`buildx bake`](docs/reference/buildx_bake.md)
|
||||
- [`buildx build`](docs/reference/buildx_build.md)
|
||||
- [`buildx create`](docs/reference/buildx_create.md)
|
||||
- [`buildx du`](docs/reference/buildx_du.md)
|
||||
- [`buildx imagetools`](docs/reference/buildx_imagetools.md)
|
||||
- [`buildx imagetools create`](docs/reference/buildx_imagetools_create.md)
|
||||
- [`buildx imagetools inspect`](docs/reference/buildx_imagetools_inspect.md)
|
||||
- [`buildx imagetools inspect`](docs/reference/buildx_imagetools_inspect.md)
|
||||
- [`buildx inspect`](docs/reference/buildx_inspect.md)
|
||||
- [`buildx install`](docs/reference/buildx_install.md)
|
||||
- [`buildx ls`](docs/reference/buildx_ls.md)
|
||||
- [`buildx prune`](docs/reference/buildx_prune.md)
|
||||
- [`buildx ls`](docs/reference/buildx_ls.md)
|
||||
- [`buildx prune`](docs/reference/buildx_prune.md)
|
||||
- [`buildx rm`](docs/reference/buildx_rm.md)
|
||||
- [`buildx stop`](docs/reference/buildx_stop.md)
|
||||
- [`buildx uninstall`](docs/reference/buildx_uninstall.md)
|
||||
@@ -65,60 +56,27 @@ Using `buildx` as a docker CLI plugin requires using Docker 19.03 or newer.
|
||||
A limited set of functionality works with older versions of Docker when
|
||||
invoking the binary directly.
|
||||
|
||||
## Windows and macOS
|
||||
## Docker
|
||||
|
||||
Docker Buildx is included in [Docker Desktop](https://docs.docker.com/desktop/)
|
||||
for Windows and macOS.
|
||||
`buildx` comes bundled with Docker Desktop and in latest
|
||||
[Docker CE packages](https://docs.docker.com/engine/install/), but may not be
|
||||
included in third-party software components (in which case follow the
|
||||
[binary release](#binary-release) instructions).
|
||||
|
||||
## Linux packages
|
||||
## Binary release
|
||||
|
||||
Docker Linux packages also include Docker Buildx when installed using the
|
||||
[DEB or RPM packages](https://docs.docker.com/engine/install/).
|
||||
You can also download the latest `buildx` binary from the
|
||||
[GitHub releases](https://github.com/docker/buildx/releases/latest) page, copy
|
||||
it to `~/.docker/cli-plugins` folder with name `docker-buildx` and change the
|
||||
permission to execute:
|
||||
|
||||
## Manual download
|
||||
```console
|
||||
$ chmod a+x ~/.docker/cli-plugins/docker-buildx
|
||||
```
|
||||
|
||||
> **Important**
|
||||
>
|
||||
> This section is for unattended installation of the buildx component. These
|
||||
> instructions are mostly suitable for testing purposes. We do not recommend
|
||||
> installing buildx using manual download in production environments as they
|
||||
> will not be updated automatically with security updates.
|
||||
>
|
||||
> On Windows and macOS, we recommend that you install [Docker Desktop](https://docs.docker.com/desktop/)
|
||||
> instead. For Linux, we recommend that you follow the [instructions specific for your distribution](#linux-packages).
|
||||
## From `Dockerfile`
|
||||
|
||||
You can also download the latest binary from the [GitHub releases page](https://github.com/docker/buildx/releases/latest).
|
||||
|
||||
Rename the relevant binary and copy it to the destination matching your OS:
|
||||
|
||||
| OS | Binary name | Destination folder |
|
||||
| -------- | -------------------- | -----------------------------------------|
|
||||
| Linux | `docker-buildx` | `$HOME/.docker/cli-plugins` |
|
||||
| macOS | `docker-buildx` | `$HOME/.docker/cli-plugins` |
|
||||
| Windows | `docker-buildx.exe` | `%USERPROFILE%\.docker\cli-plugins` |
|
||||
|
||||
Or copy it into one of these folders for installing it system-wide.
|
||||
|
||||
On Unix environments:
|
||||
|
||||
* `/usr/local/lib/docker/cli-plugins` OR `/usr/local/libexec/docker/cli-plugins`
|
||||
* `/usr/lib/docker/cli-plugins` OR `/usr/libexec/docker/cli-plugins`
|
||||
|
||||
On Windows:
|
||||
|
||||
* `C:\ProgramData\Docker\cli-plugins`
|
||||
* `C:\Program Files\Docker\cli-plugins`
|
||||
|
||||
> **Note**
|
||||
>
|
||||
> On Unix environments, it may also be necessary to make it executable with `chmod +x`:
|
||||
> ```shell
|
||||
> $ chmod +x ~/.docker/cli-plugins/docker-buildx
|
||||
> ```
|
||||
|
||||
## Dockerfile
|
||||
|
||||
Here is how to install and use Buildx inside a Dockerfile through the
|
||||
Here is how to use buildx inside a Dockerfile through the
|
||||
[`docker/buildx-bin`](https://hub.docker.com/r/docker/buildx-bin) image:
|
||||
|
||||
```Dockerfile
|
||||
@@ -139,17 +97,17 @@ To remove this alias, run [`docker buildx uninstall`](docs/reference/buildx_unin
|
||||
|
||||
```console
|
||||
# Buildx 0.6+
|
||||
$ docker buildx bake "https://github.com/docker/buildx.git"
|
||||
$ docker buildx bake "git://github.com/docker/buildx"
|
||||
$ mkdir -p ~/.docker/cli-plugins
|
||||
$ mv ./bin/buildx ~/.docker/cli-plugins/docker-buildx
|
||||
|
||||
# Docker 19.03+
|
||||
$ DOCKER_BUILDKIT=1 docker build --platform=local -o . "https://github.com/docker/buildx.git"
|
||||
$ DOCKER_BUILDKIT=1 docker build --platform=local -o . "git://github.com/docker/buildx"
|
||||
$ mkdir -p ~/.docker/cli-plugins
|
||||
$ mv buildx ~/.docker/cli-plugins/docker-buildx
|
||||
|
||||
# Local
|
||||
$ git clone https://github.com/docker/buildx.git && cd buildx
|
||||
$ git clone git://github.com/docker/buildx && cd buildx
|
||||
$ make install
|
||||
```
|
||||
|
||||
@@ -204,7 +162,7 @@ with `--output`.
|
||||
## Working with builder instances
|
||||
|
||||
By default, buildx will initially use the `docker` driver if it is supported,
|
||||
providing a very similar user experience to the native `docker build`. Note that
|
||||
providing a very similar user experience to the native `docker build`. Note tha
|
||||
you must use a local shared daemon to build your applications.
|
||||
|
||||
Buildx allows you to create new instances of isolated builders. This can be
|
||||
|
297
bake/bake.go
297
bake/bake.go
@@ -7,9 +7,7 @@ import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
@@ -24,13 +22,8 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var (
|
||||
httpPrefix = regexp.MustCompile(`^https?://`)
|
||||
gitURLPathWithFragmentSuffix = regexp.MustCompile(`\.git(?:#.+)?$`)
|
||||
|
||||
validTargetNameChars = `[a-zA-Z0-9_-]+`
|
||||
targetNamePattern = regexp.MustCompile(`^` + validTargetNameChars + `$`)
|
||||
)
|
||||
var httpPrefix = regexp.MustCompile(`^https?://`)
|
||||
var gitURLPathWithFragmentSuffix = regexp.MustCompile(`\.git(?:#.+)?$`)
|
||||
|
||||
type File struct {
|
||||
Name string
|
||||
@@ -62,21 +55,12 @@ func ReadLocalFiles(names []string) ([]File, error) {
|
||||
out := make([]File, 0, len(names))
|
||||
|
||||
for _, n := range names {
|
||||
var dt []byte
|
||||
var err error
|
||||
if n == "-" {
|
||||
dt, err = ioutil.ReadAll(os.Stdin)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
} else {
|
||||
dt, err = ioutil.ReadFile(n)
|
||||
if err != nil {
|
||||
if isDefault && errors.Is(err, os.ErrNotExist) {
|
||||
continue
|
||||
}
|
||||
return nil, err
|
||||
dt, err := ioutil.ReadFile(n)
|
||||
if err != nil {
|
||||
if isDefault && errors.Is(err, os.ErrNotExist) {
|
||||
continue
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
out = append(out, File{Name: n, Data: dt})
|
||||
}
|
||||
@@ -115,47 +99,12 @@ func ReadTargets(ctx context.Context, files []File, targets, overrides []string,
|
||||
g = []*Group{{Targets: group.Targets}}
|
||||
}
|
||||
} else {
|
||||
var gt []string
|
||||
for _, target := range targets {
|
||||
isGroup := false
|
||||
for _, group := range c.Groups {
|
||||
if target == group.Name {
|
||||
gt = append(gt, group.Targets...)
|
||||
isGroup = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !isGroup {
|
||||
gt = append(gt, target)
|
||||
}
|
||||
}
|
||||
g = []*Group{{Targets: dedupString(gt)}}
|
||||
}
|
||||
|
||||
for name, t := range m {
|
||||
if err := c.loadLinks(name, t, m, o, nil); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
g = []*Group{{Targets: targets}}
|
||||
}
|
||||
|
||||
return m, g, nil
|
||||
}
|
||||
|
||||
func dedupString(s []string) []string {
|
||||
if len(s) == 0 {
|
||||
return s
|
||||
}
|
||||
var res []string
|
||||
seen := make(map[string]struct{})
|
||||
for _, val := range s {
|
||||
if _, ok := seen[val]; !ok {
|
||||
res = append(res, val)
|
||||
seen[val] = struct{}{}
|
||||
}
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func ParseFiles(files []File, defaults map[string]string) (_ *Config, err error) {
|
||||
defer func() {
|
||||
err = formatHCLError(err, files)
|
||||
@@ -189,9 +138,8 @@ func ParseFiles(files []File, defaults map[string]string) (_ *Config, err error)
|
||||
|
||||
if len(fs) > 0 {
|
||||
if err := hclparser.Parse(hcl.MergeFiles(fs), hclparser.Opt{
|
||||
LookupVar: os.LookupEnv,
|
||||
Vars: defaults,
|
||||
ValidateLabel: validateTargetName,
|
||||
LookupVar: os.LookupEnv,
|
||||
Vars: defaults,
|
||||
}, &c); err.HasErrors() {
|
||||
return nil, err
|
||||
}
|
||||
@@ -311,45 +259,10 @@ func (c Config) expandTargets(pattern string) ([]string, error) {
|
||||
return names, nil
|
||||
}
|
||||
|
||||
func (c Config) loadLinks(name string, t *Target, m map[string]*Target, o map[string]map[string]Override, visited []string) error {
|
||||
visited = append(visited, name)
|
||||
for _, v := range t.Contexts {
|
||||
if strings.HasPrefix(v, "target:") {
|
||||
target := strings.TrimPrefix(v, "target:")
|
||||
if target == t.Name {
|
||||
return errors.Errorf("target %s cannot link to itself", target)
|
||||
}
|
||||
for _, v := range visited {
|
||||
if v == target {
|
||||
return errors.Errorf("infinite loop from %s to %s", name, target)
|
||||
}
|
||||
}
|
||||
t2, ok := m[target]
|
||||
if !ok {
|
||||
var err error
|
||||
t2, err = c.ResolveTarget(target, o)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
t2.Outputs = nil
|
||||
m[target] = t2
|
||||
}
|
||||
if err := c.loadLinks(target, t2, m, o, visited); err != nil {
|
||||
return err
|
||||
}
|
||||
if len(t.Platforms) > 1 && len(t2.Platforms) > 1 {
|
||||
if !sliceEqual(t.Platforms, t2.Platforms) {
|
||||
return errors.Errorf("target %s can't be used by %s because it is defined for different platforms %v and %v", target, name, t2.Platforms, t.Platforms)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c Config) newOverrides(v []string) (map[string]map[string]Override, error) {
|
||||
m := map[string]map[string]Override{}
|
||||
for _, v := range v {
|
||||
|
||||
parts := strings.SplitN(v, "=", 2)
|
||||
keys := strings.SplitN(parts[0], ".", 3)
|
||||
if len(keys) < 2 {
|
||||
@@ -394,11 +307,6 @@ func (c Config) newOverrides(v []string) (map[string]map[string]Override, error)
|
||||
o.Value = v
|
||||
}
|
||||
fallthrough
|
||||
case "contexts":
|
||||
if len(keys) != 3 {
|
||||
return nil, errors.Errorf("invalid key %s, contexts requires name", parts[0])
|
||||
}
|
||||
fallthrough
|
||||
default:
|
||||
if len(parts) == 2 {
|
||||
o.Value = parts[1]
|
||||
@@ -432,22 +340,16 @@ func (c Config) group(name string, visited map[string]struct{}) []string {
|
||||
visited[name] = struct{}{}
|
||||
targets := make([]string, 0, len(g.Targets))
|
||||
for _, t := range g.Targets {
|
||||
tgroup := c.group(t, visited)
|
||||
if len(tgroup) > 0 {
|
||||
targets = append(targets, tgroup...)
|
||||
} else {
|
||||
targets = append(targets, t)
|
||||
}
|
||||
targets = append(targets, c.group(t, visited)...)
|
||||
}
|
||||
return targets
|
||||
}
|
||||
|
||||
func (c Config) ResolveTarget(name string, overrides map[string]map[string]Override) (*Target, error) {
|
||||
t, err := c.target(name, map[string]*Target{}, overrides)
|
||||
t, err := c.target(name, map[string]struct{}{}, overrides)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
t.Inherits = nil
|
||||
if t.Context == nil {
|
||||
s := "."
|
||||
t.Context = &s
|
||||
@@ -459,11 +361,11 @@ func (c Config) ResolveTarget(name string, overrides map[string]map[string]Overr
|
||||
return t, nil
|
||||
}
|
||||
|
||||
func (c Config) target(name string, visited map[string]*Target, overrides map[string]map[string]Override) (*Target, error) {
|
||||
if t, ok := visited[name]; ok {
|
||||
return t, nil
|
||||
func (c Config) target(name string, visited map[string]struct{}, overrides map[string]map[string]Override) (*Target, error) {
|
||||
if _, ok := visited[name]; ok {
|
||||
return nil, nil
|
||||
}
|
||||
visited[name] = nil
|
||||
visited[name] = struct{}{}
|
||||
var t *Target
|
||||
for _, target := range c.Targets {
|
||||
if target.Name == name {
|
||||
@@ -484,6 +386,7 @@ func (c Config) target(name string, visited map[string]*Target, overrides map[st
|
||||
tt.Merge(t)
|
||||
}
|
||||
}
|
||||
t.Inherits = nil
|
||||
m := defaultTarget()
|
||||
m.Merge(tt)
|
||||
m.Merge(t)
|
||||
@@ -491,8 +394,8 @@ func (c Config) target(name string, visited map[string]*Target, overrides map[st
|
||||
if err := tt.AddOverrides(overrides[name]); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
tt.normalize()
|
||||
visited[name] = tt
|
||||
return tt, nil
|
||||
}
|
||||
|
||||
@@ -509,7 +412,6 @@ type Target struct {
|
||||
Inherits []string `json:"inherits,omitempty" hcl:"inherits,optional"`
|
||||
|
||||
Context *string `json:"context,omitempty" hcl:"context,optional"`
|
||||
Contexts map[string]string `json:"contexts,omitempty" hcl:"contexts,optional"`
|
||||
Dockerfile *string `json:"dockerfile,omitempty" hcl:"dockerfile,optional"`
|
||||
DockerfileInline *string `json:"dockerfile-inline,omitempty" hcl:"dockerfile-inline,optional"`
|
||||
Args map[string]string `json:"args,omitempty" hcl:"args,optional"`
|
||||
@@ -524,8 +426,7 @@ type Target struct {
|
||||
Outputs []string `json:"output,omitempty" hcl:"output,optional"`
|
||||
Pull *bool `json:"pull,omitempty" hcl:"pull,optional"`
|
||||
NoCache *bool `json:"no-cache,omitempty" hcl:"no-cache,optional"`
|
||||
NetworkMode *string `json:"-" hcl:"-"`
|
||||
NoCacheFilter []string `json:"no-cache-filter,omitempty" hcl:"no-cache-filter,optional"`
|
||||
|
||||
// IMPORTANT: if you add more fields here, do not forget to update newOverrides and README.
|
||||
}
|
||||
|
||||
@@ -537,16 +438,6 @@ func (t *Target) normalize() {
|
||||
t.CacheFrom = removeDupes(t.CacheFrom)
|
||||
t.CacheTo = removeDupes(t.CacheTo)
|
||||
t.Outputs = removeDupes(t.Outputs)
|
||||
t.NoCacheFilter = removeDupes(t.NoCacheFilter)
|
||||
|
||||
for k, v := range t.Contexts {
|
||||
if v == "" {
|
||||
delete(t.Contexts, k)
|
||||
}
|
||||
}
|
||||
if len(t.Contexts) == 0 {
|
||||
t.Contexts = nil
|
||||
}
|
||||
}
|
||||
|
||||
func (t *Target) Merge(t2 *Target) {
|
||||
@@ -565,12 +456,6 @@ func (t *Target) Merge(t2 *Target) {
|
||||
}
|
||||
t.Args[k] = v
|
||||
}
|
||||
for k, v := range t2.Contexts {
|
||||
if t.Contexts == nil {
|
||||
t.Contexts = map[string]string{}
|
||||
}
|
||||
t.Contexts[k] = v
|
||||
}
|
||||
for k, v := range t2.Labels {
|
||||
if t.Labels == nil {
|
||||
t.Labels = map[string]string{}
|
||||
@@ -607,12 +492,6 @@ func (t *Target) Merge(t2 *Target) {
|
||||
if t2.NoCache != nil {
|
||||
t.NoCache = t2.NoCache
|
||||
}
|
||||
if t2.NetworkMode != nil {
|
||||
t.NetworkMode = t2.NetworkMode
|
||||
}
|
||||
if t2.NoCacheFilter != nil { // merge
|
||||
t.NoCacheFilter = append(t.NoCacheFilter, t2.NoCacheFilter...)
|
||||
}
|
||||
t.Inherits = append(t.Inherits, t2.Inherits...)
|
||||
}
|
||||
|
||||
@@ -633,14 +512,7 @@ func (t *Target) AddOverrides(overrides map[string]Override) error {
|
||||
t.Args = map[string]string{}
|
||||
}
|
||||
t.Args[keys[1]] = value
|
||||
case "contexts":
|
||||
if len(keys) != 2 {
|
||||
return errors.Errorf("contexts require name")
|
||||
}
|
||||
if t.Contexts == nil {
|
||||
t.Contexts = map[string]string{}
|
||||
}
|
||||
t.Contexts[keys[1]] = value
|
||||
|
||||
case "labels":
|
||||
if len(keys) != 2 {
|
||||
return errors.Errorf("labels require name")
|
||||
@@ -671,8 +543,6 @@ func (t *Target) AddOverrides(overrides map[string]Override) error {
|
||||
return errors.Errorf("invalid value %s for boolean key no-cache", value)
|
||||
}
|
||||
t.NoCache = &noCache
|
||||
case "no-cache-filter":
|
||||
t.NoCacheFilter = o.ArrValue
|
||||
case "pull":
|
||||
pull, err := strconv.ParseBool(value)
|
||||
if err != nil {
|
||||
@@ -716,21 +586,6 @@ func updateContext(t *build.Inputs, inp *Input) {
|
||||
if inp == nil || inp.State == nil {
|
||||
return
|
||||
}
|
||||
|
||||
for k, v := range t.NamedContexts {
|
||||
if v.Path == "." {
|
||||
t.NamedContexts[k] = build.NamedContext{Path: inp.URL}
|
||||
}
|
||||
if strings.HasPrefix(v.Path, "cwd://") || strings.HasPrefix(v.Path, "target:") || strings.HasPrefix(v.Path, "docker-image:") {
|
||||
continue
|
||||
}
|
||||
if IsRemoteURL(v.Path) {
|
||||
continue
|
||||
}
|
||||
st := llb.Scratch().File(llb.Copy(*inp.State, v.Path, "/"), llb.WithCustomNamef("set context %s to %s", k, v.Path))
|
||||
t.NamedContexts[k] = build.NamedContext{State: &st}
|
||||
}
|
||||
|
||||
if t.ContextPath == "." {
|
||||
t.ContextPath = inp.URL
|
||||
return
|
||||
@@ -745,59 +600,6 @@ func updateContext(t *build.Inputs, inp *Input) {
|
||||
t.ContextState = &st
|
||||
}
|
||||
|
||||
// validateContextsEntitlements is a basic check to ensure contexts do not
|
||||
// escape local directories when loaded from remote sources. This is to be
|
||||
// replaced with proper entitlements support in the future.
|
||||
func validateContextsEntitlements(t build.Inputs, inp *Input) error {
|
||||
if inp == nil || inp.State == nil {
|
||||
return nil
|
||||
}
|
||||
if v, ok := os.LookupEnv("BAKE_ALLOW_REMOTE_FS_ACCESS"); ok {
|
||||
if vv, _ := strconv.ParseBool(v); vv {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
if t.ContextState == nil {
|
||||
if err := checkPath(t.ContextPath); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
for _, v := range t.NamedContexts {
|
||||
if v.State != nil {
|
||||
continue
|
||||
}
|
||||
if err := checkPath(v.Path); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func checkPath(p string) error {
|
||||
if IsRemoteURL(p) || strings.HasPrefix(p, "target:") || strings.HasPrefix(p, "docker-image:") {
|
||||
return nil
|
||||
}
|
||||
p, err := filepath.EvalSymlinks(p)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
}
|
||||
wd, err := os.Getwd()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rel, err := filepath.Rel(wd, p)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if strings.HasPrefix(rel, ".."+string(os.PathSeparator)) {
|
||||
return errors.Errorf("path %s is outside of the working directory, please set BAKE_ALLOW_REMOTE_FS_ACCESS=1", p)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func toBuildOpt(t *Target, inp *Input) (*build.Options, error) {
|
||||
if v := t.Context; v != nil && *v == "-" {
|
||||
return nil, errors.Errorf("context from stdin not allowed in bake")
|
||||
@@ -830,15 +632,10 @@ func toBuildOpt(t *Target, inp *Input) (*build.Options, error) {
|
||||
if t.Pull != nil {
|
||||
pull = *t.Pull
|
||||
}
|
||||
networkMode := ""
|
||||
if t.NetworkMode != nil {
|
||||
networkMode = *t.NetworkMode
|
||||
}
|
||||
|
||||
bi := build.Inputs{
|
||||
ContextPath: contextPath,
|
||||
DockerfilePath: dockerfilePath,
|
||||
NamedContexts: toNamedContexts(t.Contexts),
|
||||
}
|
||||
if t.DockerfileInline != nil {
|
||||
bi.DockerfileInline = *t.DockerfileInline
|
||||
@@ -847,27 +644,16 @@ func toBuildOpt(t *Target, inp *Input) (*build.Options, error) {
|
||||
if strings.HasPrefix(bi.ContextPath, "cwd://") {
|
||||
bi.ContextPath = path.Clean(strings.TrimPrefix(bi.ContextPath, "cwd://"))
|
||||
}
|
||||
for k, v := range bi.NamedContexts {
|
||||
if strings.HasPrefix(v.Path, "cwd://") {
|
||||
bi.NamedContexts[k] = build.NamedContext{Path: path.Clean(strings.TrimPrefix(v.Path, "cwd://"))}
|
||||
}
|
||||
}
|
||||
|
||||
if err := validateContextsEntitlements(bi, inp); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
t.Context = &bi.ContextPath
|
||||
|
||||
bo := &build.Options{
|
||||
Inputs: bi,
|
||||
Tags: t.Tags,
|
||||
BuildArgs: t.Args,
|
||||
Labels: t.Labels,
|
||||
NoCache: noCache,
|
||||
NoCacheFilter: t.NoCacheFilter,
|
||||
Pull: pull,
|
||||
NetworkMode: networkMode,
|
||||
Inputs: bi,
|
||||
Tags: t.Tags,
|
||||
BuildArgs: t.Args,
|
||||
Labels: t.Labels,
|
||||
NoCache: noCache,
|
||||
Pull: pull,
|
||||
}
|
||||
|
||||
platforms, err := platformutil.Parse(t.Platforms)
|
||||
@@ -960,32 +746,3 @@ func parseOutputType(str string) string {
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func validateTargetName(name string) error {
|
||||
if !targetNamePattern.MatchString(name) {
|
||||
return errors.Errorf("only %q are allowed", validTargetNameChars)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func sliceEqual(s1, s2 []string) bool {
|
||||
if len(s1) != len(s2) {
|
||||
return false
|
||||
}
|
||||
sort.Strings(s1)
|
||||
sort.Strings(s2)
|
||||
for i := range s1 {
|
||||
if s1[i] != s2[i] {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func toNamedContexts(m map[string]string) map[string]build.NamedContext {
|
||||
m2 := make(map[string]build.NamedContext, len(m))
|
||||
for k, v := range m {
|
||||
m2[k] = build.NamedContext{Path: v}
|
||||
}
|
||||
return m2
|
||||
}
|
||||
|
@@ -203,7 +203,7 @@ func TestPushOverride(t *testing.T) {
|
||||
t.Parallel()
|
||||
|
||||
fp := File{
|
||||
Name: "docker-bake.hcl",
|
||||
Name: "docker-bake.hc",
|
||||
Data: []byte(
|
||||
`target "app" {
|
||||
output = ["type=image,compression=zstd"]
|
||||
@@ -217,7 +217,7 @@ func TestPushOverride(t *testing.T) {
|
||||
require.Equal(t, "type=image,compression=zstd,push=true", m["app"].Outputs[0])
|
||||
|
||||
fp = File{
|
||||
Name: "docker-bake.hcl",
|
||||
Name: "docker-bake.hc",
|
||||
Data: []byte(
|
||||
`target "app" {
|
||||
output = ["type=image,compression=zstd"]
|
||||
@@ -231,7 +231,7 @@ func TestPushOverride(t *testing.T) {
|
||||
require.Equal(t, "type=image,compression=zstd,push=false", m["app"].Outputs[0])
|
||||
|
||||
fp = File{
|
||||
Name: "docker-bake.hcl",
|
||||
Name: "docker-bake.hc",
|
||||
Data: []byte(
|
||||
`target "app" {
|
||||
}`),
|
||||
@@ -353,329 +353,50 @@ func TestOverrideMerge(t *testing.T) {
|
||||
require.Equal(t, "type=registry", m["app"].Outputs[0])
|
||||
}
|
||||
|
||||
func TestReadContexts(t *testing.T) {
|
||||
fp := File{
|
||||
Name: "docker-bake.hcl",
|
||||
Data: []byte(`
|
||||
target "base" {
|
||||
contexts = {
|
||||
foo: "bar"
|
||||
abc: "def"
|
||||
}
|
||||
}
|
||||
target "app" {
|
||||
inherits = ["base"]
|
||||
contexts = {
|
||||
foo: "baz"
|
||||
}
|
||||
}
|
||||
`),
|
||||
}
|
||||
|
||||
ctx := context.TODO()
|
||||
m, _, err := ReadTargets(ctx, []File{fp}, []string{"app"}, []string{}, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, 1, len(m))
|
||||
_, ok := m["app"]
|
||||
require.True(t, ok)
|
||||
|
||||
bo, err := TargetsToBuildOpt(m, &Input{})
|
||||
require.NoError(t, err)
|
||||
|
||||
ctxs := bo["app"].Inputs.NamedContexts
|
||||
require.Equal(t, 2, len(ctxs))
|
||||
|
||||
require.Equal(t, "baz", ctxs["foo"].Path)
|
||||
require.Equal(t, "def", ctxs["abc"].Path)
|
||||
|
||||
m, _, err = ReadTargets(ctx, []File{fp}, []string{"app"}, []string{"app.contexts.foo=bay", "base.contexts.ghi=jkl"}, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, 1, len(m))
|
||||
_, ok = m["app"]
|
||||
require.True(t, ok)
|
||||
|
||||
bo, err = TargetsToBuildOpt(m, &Input{})
|
||||
require.NoError(t, err)
|
||||
|
||||
ctxs = bo["app"].Inputs.NamedContexts
|
||||
require.Equal(t, 3, len(ctxs))
|
||||
|
||||
require.Equal(t, "bay", ctxs["foo"].Path)
|
||||
require.Equal(t, "def", ctxs["abc"].Path)
|
||||
require.Equal(t, "jkl", ctxs["ghi"].Path)
|
||||
|
||||
// test resetting base values
|
||||
m, _, err = ReadTargets(ctx, []File{fp}, []string{"app"}, []string{"app.contexts.foo="}, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, 1, len(m))
|
||||
_, ok = m["app"]
|
||||
require.True(t, ok)
|
||||
|
||||
bo, err = TargetsToBuildOpt(m, &Input{})
|
||||
require.NoError(t, err)
|
||||
|
||||
ctxs = bo["app"].Inputs.NamedContexts
|
||||
require.Equal(t, 1, len(ctxs))
|
||||
require.Equal(t, "def", ctxs["abc"].Path)
|
||||
}
|
||||
|
||||
func TestReadContextFromTargetUnknown(t *testing.T) {
|
||||
fp := File{
|
||||
Name: "docker-bake.hcl",
|
||||
Data: []byte(`
|
||||
target "base" {
|
||||
contexts = {
|
||||
foo: "bar"
|
||||
abc: "def"
|
||||
}
|
||||
}
|
||||
target "app" {
|
||||
contexts = {
|
||||
foo: "baz"
|
||||
bar: "target:bar"
|
||||
}
|
||||
}
|
||||
`),
|
||||
}
|
||||
|
||||
ctx := context.TODO()
|
||||
_, _, err := ReadTargets(ctx, []File{fp}, []string{"app"}, []string{}, nil)
|
||||
require.Error(t, err)
|
||||
require.Contains(t, err.Error(), "failed to find target bar")
|
||||
}
|
||||
func TestReadContextFromTargetChain(t *testing.T) {
|
||||
ctx := context.TODO()
|
||||
fp := File{
|
||||
Name: "docker-bake.hcl",
|
||||
Data: []byte(`
|
||||
target "base" {
|
||||
}
|
||||
target "mid" {
|
||||
output = ["foo"]
|
||||
contexts = {
|
||||
parent: "target:base"
|
||||
}
|
||||
}
|
||||
target "app" {
|
||||
contexts = {
|
||||
foo: "baz"
|
||||
bar: "target:mid"
|
||||
}
|
||||
}
|
||||
target "unused" {}
|
||||
`),
|
||||
}
|
||||
|
||||
m, _, err := ReadTargets(ctx, []File{fp}, []string{"app"}, []string{}, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, 3, len(m))
|
||||
app, ok := m["app"]
|
||||
require.True(t, ok)
|
||||
|
||||
require.Equal(t, 2, len(app.Contexts))
|
||||
|
||||
mid, ok := m["mid"]
|
||||
require.True(t, ok)
|
||||
require.Equal(t, 0, len(mid.Outputs))
|
||||
require.Equal(t, 1, len(mid.Contexts))
|
||||
|
||||
base, ok := m["base"]
|
||||
require.True(t, ok)
|
||||
require.Equal(t, 0, len(base.Contexts))
|
||||
}
|
||||
|
||||
func TestReadContextFromTargetInfiniteLoop(t *testing.T) {
|
||||
ctx := context.TODO()
|
||||
fp := File{
|
||||
Name: "docker-bake.hcl",
|
||||
Data: []byte(`
|
||||
target "mid" {
|
||||
output = ["foo"]
|
||||
contexts = {
|
||||
parent: "target:app"
|
||||
}
|
||||
}
|
||||
target "app" {
|
||||
contexts = {
|
||||
foo: "baz"
|
||||
bar: "target:mid"
|
||||
}
|
||||
}
|
||||
`),
|
||||
}
|
||||
_, _, err := ReadTargets(ctx, []File{fp}, []string{"app", "mid"}, []string{}, nil)
|
||||
require.Error(t, err)
|
||||
require.Contains(t, err.Error(), "infinite loop from")
|
||||
}
|
||||
|
||||
func TestReadContextFromTargetMultiPlatform(t *testing.T) {
|
||||
ctx := context.TODO()
|
||||
fp := File{
|
||||
Name: "docker-bake.hcl",
|
||||
Data: []byte(`
|
||||
target "mid" {
|
||||
output = ["foo"]
|
||||
platforms = ["linux/amd64", "linux/arm64"]
|
||||
}
|
||||
target "app" {
|
||||
contexts = {
|
||||
bar: "target:mid"
|
||||
}
|
||||
platforms = ["linux/amd64", "linux/arm64"]
|
||||
}
|
||||
`),
|
||||
}
|
||||
_, _, err := ReadTargets(ctx, []File{fp}, []string{"app"}, []string{}, nil)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func TestReadContextFromTargetInvalidPlatforms(t *testing.T) {
|
||||
ctx := context.TODO()
|
||||
fp := File{
|
||||
Name: "docker-bake.hcl",
|
||||
Data: []byte(`
|
||||
target "mid" {
|
||||
output = ["foo"]
|
||||
platforms = ["linux/amd64", "linux/riscv64"]
|
||||
}
|
||||
target "app" {
|
||||
contexts = {
|
||||
bar: "target:mid"
|
||||
}
|
||||
platforms = ["linux/amd64", "linux/arm64"]
|
||||
}
|
||||
`),
|
||||
}
|
||||
_, _, err := ReadTargets(ctx, []File{fp}, []string{"app"}, []string{}, nil)
|
||||
require.Error(t, err)
|
||||
require.Contains(t, err.Error(), "defined for different platforms")
|
||||
}
|
||||
|
||||
func TestReadTargetsDefault(t *testing.T) {
|
||||
func TestReadTargetsMixed(t *testing.T) {
|
||||
t.Parallel()
|
||||
ctx := context.TODO()
|
||||
|
||||
f := File{
|
||||
Name: "docker-bake.hcl",
|
||||
fTargetDefault := File{
|
||||
Name: "docker-bake2.hcl",
|
||||
Data: []byte(`
|
||||
target "default" {
|
||||
dockerfile = "test"
|
||||
}`)}
|
||||
|
||||
m, g, err := ReadTargets(ctx, []File{f}, []string{"default"}, nil, nil)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(g))
|
||||
require.Equal(t, 1, len(m))
|
||||
require.Equal(t, "test", *m["default"].Dockerfile)
|
||||
}
|
||||
|
||||
func TestReadTargetsSpecified(t *testing.T) {
|
||||
t.Parallel()
|
||||
ctx := context.TODO()
|
||||
|
||||
f := File{
|
||||
Name: "docker-bake.hcl",
|
||||
fTargetImage := File{
|
||||
Name: "docker-bake3.hcl",
|
||||
Data: []byte(`
|
||||
target "image" {
|
||||
dockerfile = "test"
|
||||
}`)}
|
||||
|
||||
_, _, err := ReadTargets(ctx, []File{f}, []string{"default"}, nil, nil)
|
||||
require.Error(t, err)
|
||||
|
||||
m, g, err := ReadTargets(ctx, []File{f}, []string{"image"}, nil, nil)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(g))
|
||||
require.Equal(t, []string{"image"}, g[0].Targets)
|
||||
require.Equal(t, 1, len(m))
|
||||
require.Equal(t, "test", *m["image"].Dockerfile)
|
||||
}
|
||||
|
||||
func TestReadTargetsGroup(t *testing.T) {
|
||||
t.Parallel()
|
||||
ctx := context.TODO()
|
||||
|
||||
f := File{
|
||||
Name: "docker-bake.hcl",
|
||||
Data: []byte(`
|
||||
group "foo" {
|
||||
targets = ["image"]
|
||||
}
|
||||
target "image" {
|
||||
dockerfile = "test"
|
||||
}`)}
|
||||
|
||||
m, g, err := ReadTargets(ctx, []File{f}, []string{"foo"}, nil, nil)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(g))
|
||||
require.Equal(t, []string{"image"}, g[0].Targets)
|
||||
require.Equal(t, 1, len(m))
|
||||
require.Equal(t, "test", *m["image"].Dockerfile)
|
||||
}
|
||||
|
||||
func TestReadTargetsGroupAndTarget(t *testing.T) {
|
||||
t.Parallel()
|
||||
ctx := context.TODO()
|
||||
|
||||
f := File{
|
||||
Name: "docker-bake.hcl",
|
||||
Data: []byte(`
|
||||
group "foo" {
|
||||
targets = ["image"]
|
||||
}
|
||||
target "foo" {
|
||||
dockerfile = "bar"
|
||||
}
|
||||
target "image" {
|
||||
dockerfile = "test"
|
||||
}`)}
|
||||
|
||||
m, g, err := ReadTargets(ctx, []File{f}, []string{"foo"}, nil, nil)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(g))
|
||||
require.Equal(t, []string{"image"}, g[0].Targets)
|
||||
require.Equal(t, 1, len(m))
|
||||
require.Equal(t, "test", *m["image"].Dockerfile)
|
||||
|
||||
m, g, err = ReadTargets(ctx, []File{f}, []string{"foo", "foo"}, nil, nil)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(g))
|
||||
require.Equal(t, []string{"image"}, g[0].Targets)
|
||||
require.Equal(t, 1, len(m))
|
||||
require.Equal(t, "test", *m["image"].Dockerfile)
|
||||
}
|
||||
|
||||
func TestReadTargetsMixed(t *testing.T) {
|
||||
t.Parallel()
|
||||
ctx := context.TODO()
|
||||
|
||||
fhcl := File{
|
||||
fpHCL := File{
|
||||
Name: "docker-bake.hcl",
|
||||
Data: []byte(`
|
||||
group "default" {
|
||||
targets = ["image"]
|
||||
}
|
||||
|
||||
target "nocache" {
|
||||
no-cache = true
|
||||
}
|
||||
|
||||
group "release" {
|
||||
targets = ["image-release"]
|
||||
}
|
||||
|
||||
target "image" {
|
||||
inherits = ["nocache"]
|
||||
output = ["type=docker"]
|
||||
}
|
||||
|
||||
target "image-release" {
|
||||
inherits = ["image"]
|
||||
output = ["type=image,push=true"]
|
||||
tags = ["user/app:latest"]
|
||||
}`)}
|
||||
|
||||
fyml := File{
|
||||
fpYML := File{
|
||||
Name: "docker-compose.yml",
|
||||
Data: []byte(`
|
||||
services:
|
||||
@@ -691,6 +412,7 @@ services:
|
||||
- NODE_ENV=test
|
||||
- AWS_ACCESS_KEY_ID=dummy
|
||||
- AWS_SECRET_ACCESS_KEY=dummy
|
||||
|
||||
aws:
|
||||
build:
|
||||
dockerfile: ./aws.Dockerfile
|
||||
@@ -699,7 +421,7 @@ services:
|
||||
CT_TAG: bar
|
||||
image: ct-fake-aws:bar`)}
|
||||
|
||||
fjson := File{
|
||||
fpJSON := File{
|
||||
Name: "docker-bake.json",
|
||||
Data: []byte(`{
|
||||
"group": {
|
||||
@@ -720,15 +442,32 @@ services:
|
||||
}
|
||||
}`)}
|
||||
|
||||
m, g, err := ReadTargets(ctx, []File{fhcl}, []string{"default"}, nil, nil)
|
||||
ctx := context.TODO()
|
||||
|
||||
m, g, err := ReadTargets(ctx, []File{fTargetDefault}, []string{"default"}, nil, nil)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 0, len(g))
|
||||
require.Equal(t, 1, len(m))
|
||||
require.Equal(t, "test", *m["default"].Dockerfile)
|
||||
|
||||
_, _, err = ReadTargets(ctx, []File{fTargetImage}, []string{"default"}, nil, nil)
|
||||
require.Error(t, err)
|
||||
|
||||
m, g, err = ReadTargets(ctx, []File{fTargetImage}, []string{"image"}, nil, nil)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(g))
|
||||
require.Equal(t, []string{"image"}, g[0].Targets)
|
||||
require.Equal(t, 1, len(m))
|
||||
require.Equal(t, 1, len(m["image"].Outputs))
|
||||
require.Equal(t, "type=docker", m["image"].Outputs[0])
|
||||
require.Equal(t, "test", *m["image"].Dockerfile)
|
||||
|
||||
m, g, err = ReadTargets(ctx, []File{fhcl}, []string{"image-release"}, nil, nil)
|
||||
m, g, err = ReadTargets(ctx, []File{fTargetImage}, []string{"image"}, nil, nil)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(g))
|
||||
require.Equal(t, []string{"image"}, g[0].Targets)
|
||||
require.Equal(t, 1, len(m))
|
||||
require.Equal(t, "test", *m["image"].Dockerfile)
|
||||
|
||||
m, g, err = ReadTargets(ctx, []File{fpHCL}, []string{"image-release"}, nil, nil)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(g))
|
||||
require.Equal(t, []string{"image-release"}, g[0].Targets)
|
||||
@@ -736,7 +475,7 @@ services:
|
||||
require.Equal(t, 1, len(m["image-release"].Outputs))
|
||||
require.Equal(t, "type=image,push=true", m["image-release"].Outputs[0])
|
||||
|
||||
m, g, err = ReadTargets(ctx, []File{fhcl}, []string{"image", "image-release"}, nil, nil)
|
||||
m, g, err = ReadTargets(ctx, []File{fpHCL}, []string{"image", "image-release"}, nil, nil)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(g))
|
||||
require.Equal(t, []string{"image", "image-release"}, g[0].Targets)
|
||||
@@ -745,21 +484,21 @@ services:
|
||||
require.Equal(t, 1, len(m["image-release"].Outputs))
|
||||
require.Equal(t, "type=image,push=true", m["image-release"].Outputs[0])
|
||||
|
||||
m, g, err = ReadTargets(ctx, []File{fyml, fhcl}, []string{"default"}, nil, nil)
|
||||
m, g, err = ReadTargets(ctx, []File{fpYML, fpHCL}, []string{"default"}, nil, nil)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(g))
|
||||
require.Equal(t, []string{"image"}, g[0].Targets)
|
||||
require.Equal(t, 1, len(m))
|
||||
require.Equal(t, ".", *m["image"].Context)
|
||||
|
||||
m, g, err = ReadTargets(ctx, []File{fjson}, []string{"default"}, nil, nil)
|
||||
m, g, err = ReadTargets(ctx, []File{fpJSON}, []string{"default"}, nil, nil)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(g))
|
||||
require.Equal(t, []string{"image"}, g[0].Targets)
|
||||
require.Equal(t, 1, len(m))
|
||||
require.Equal(t, ".", *m["image"].Context)
|
||||
|
||||
m, g, err = ReadTargets(ctx, []File{fyml}, []string{"default"}, nil, nil)
|
||||
m, g, err = ReadTargets(ctx, []File{fpYML}, []string{"default"}, nil, nil)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(g))
|
||||
sort.Strings(g[0].Targets)
|
||||
@@ -768,7 +507,7 @@ services:
|
||||
require.Equal(t, "./Dockerfile", *m["addon"].Dockerfile)
|
||||
require.Equal(t, "./aws.Dockerfile", *m["aws"].Dockerfile)
|
||||
|
||||
m, g, err = ReadTargets(ctx, []File{fyml, fhcl}, []string{"addon", "aws"}, nil, nil)
|
||||
m, g, err = ReadTargets(ctx, []File{fpYML, fpHCL}, []string{"addon", "aws"}, nil, nil)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(g))
|
||||
sort.Strings(g[0].Targets)
|
||||
@@ -777,7 +516,7 @@ services:
|
||||
require.Equal(t, "./Dockerfile", *m["addon"].Dockerfile)
|
||||
require.Equal(t, "./aws.Dockerfile", *m["aws"].Dockerfile)
|
||||
|
||||
m, g, err = ReadTargets(ctx, []File{fyml, fhcl}, []string{"addon", "aws", "image"}, nil, nil)
|
||||
m, g, err = ReadTargets(ctx, []File{fpYML, fpHCL}, []string{"addon", "aws", "image"}, nil, nil)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(g))
|
||||
sort.Strings(g[0].Targets)
|
||||
@@ -787,261 +526,3 @@ services:
|
||||
require.Equal(t, "./Dockerfile", *m["addon"].Dockerfile)
|
||||
require.Equal(t, "./aws.Dockerfile", *m["aws"].Dockerfile)
|
||||
}
|
||||
|
||||
func TestReadTargetsSameGroupTarget(t *testing.T) {
|
||||
t.Parallel()
|
||||
ctx := context.TODO()
|
||||
|
||||
f := File{
|
||||
Name: "docker-bake.hcl",
|
||||
Data: []byte(`
|
||||
group "foo" {
|
||||
targets = ["foo"]
|
||||
}
|
||||
target "foo" {
|
||||
dockerfile = "bar"
|
||||
}
|
||||
target "image" {
|
||||
output = ["type=docker"]
|
||||
}`)}
|
||||
|
||||
m, g, err := ReadTargets(ctx, []File{f}, []string{"foo"}, nil, nil)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(g))
|
||||
require.Equal(t, []string{"foo"}, g[0].Targets)
|
||||
require.Equal(t, 1, len(m))
|
||||
require.Equal(t, "bar", *m["foo"].Dockerfile)
|
||||
|
||||
m, g, err = ReadTargets(ctx, []File{f}, []string{"foo", "foo"}, nil, nil)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(g))
|
||||
require.Equal(t, []string{"foo"}, g[0].Targets)
|
||||
require.Equal(t, 1, len(m))
|
||||
require.Equal(t, "bar", *m["foo"].Dockerfile)
|
||||
}
|
||||
|
||||
func TestReadTargetsSameGroupTargetMulti(t *testing.T) {
|
||||
t.Parallel()
|
||||
ctx := context.TODO()
|
||||
|
||||
f := File{
|
||||
Name: "docker-bake.hcl",
|
||||
Data: []byte(`
|
||||
group "foo" {
|
||||
targets = ["foo", "image"]
|
||||
}
|
||||
target "foo" {
|
||||
dockerfile = "bar"
|
||||
}
|
||||
target "image" {
|
||||
output = ["type=docker"]
|
||||
}`)}
|
||||
|
||||
m, g, err := ReadTargets(ctx, []File{f}, []string{"foo"}, nil, nil)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(g))
|
||||
require.Equal(t, []string{"foo", "image"}, g[0].Targets)
|
||||
require.Equal(t, 2, len(m))
|
||||
require.Equal(t, "bar", *m["foo"].Dockerfile)
|
||||
require.Equal(t, "type=docker", m["image"].Outputs[0])
|
||||
|
||||
m, g, err = ReadTargets(ctx, []File{f}, []string{"foo", "image"}, nil, nil)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(g))
|
||||
require.Equal(t, []string{"foo", "image"}, g[0].Targets)
|
||||
require.Equal(t, 2, len(m))
|
||||
require.Equal(t, "bar", *m["foo"].Dockerfile)
|
||||
require.Equal(t, "type=docker", m["image"].Outputs[0])
|
||||
}
|
||||
|
||||
func TestNestedInherits(t *testing.T) {
|
||||
ctx := context.TODO()
|
||||
|
||||
f := File{
|
||||
Name: "docker-bake.hcl",
|
||||
Data: []byte(`
|
||||
target "a" {
|
||||
args = {
|
||||
foo = "123"
|
||||
bar = "234"
|
||||
}
|
||||
}
|
||||
target "b" {
|
||||
inherits = ["a"]
|
||||
args = {
|
||||
bar = "567"
|
||||
}
|
||||
}
|
||||
target "c" {
|
||||
inherits = ["a"]
|
||||
args = {
|
||||
baz = "890"
|
||||
}
|
||||
}
|
||||
target "d" {
|
||||
inherits = ["b", "c"]
|
||||
}`)}
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
overrides []string
|
||||
want map[string]string
|
||||
}{
|
||||
{
|
||||
name: "nested simple",
|
||||
overrides: nil,
|
||||
want: map[string]string{"bar": "234", "baz": "890", "foo": "123"},
|
||||
},
|
||||
{
|
||||
name: "nested with overrides first",
|
||||
overrides: []string{"a.args.foo=321", "b.args.bar=432"},
|
||||
want: map[string]string{"bar": "234", "baz": "890", "foo": "321"},
|
||||
},
|
||||
{
|
||||
name: "nested with overrides last",
|
||||
overrides: []string{"a.args.foo=321", "c.args.bar=432"},
|
||||
want: map[string]string{"bar": "432", "baz": "890", "foo": "321"},
|
||||
},
|
||||
}
|
||||
for _, tt := range cases {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
m, g, err := ReadTargets(ctx, []File{f}, []string{"d"}, tt.overrides, nil)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(g))
|
||||
require.Equal(t, []string{"d"}, g[0].Targets)
|
||||
require.Equal(t, 1, len(m))
|
||||
require.Equal(t, tt.want, m["d"].Args)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestNestedInheritsWithGroup(t *testing.T) {
|
||||
ctx := context.TODO()
|
||||
|
||||
f := File{
|
||||
Name: "docker-bake.hcl",
|
||||
Data: []byte(`
|
||||
target "grandparent" {
|
||||
output = ["type=docker"]
|
||||
args = {
|
||||
BAR = "fuu"
|
||||
}
|
||||
}
|
||||
target "parent" {
|
||||
inherits = ["grandparent"]
|
||||
args = {
|
||||
FOO = "bar"
|
||||
}
|
||||
}
|
||||
target "child1" {
|
||||
inherits = ["parent"]
|
||||
}
|
||||
target "child2" {
|
||||
inherits = ["parent"]
|
||||
args = {
|
||||
FOO2 = "bar2"
|
||||
}
|
||||
}
|
||||
group "default" {
|
||||
targets = [
|
||||
"child1",
|
||||
"child2"
|
||||
]
|
||||
}`)}
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
overrides []string
|
||||
wantch1 map[string]string
|
||||
wantch2 map[string]string
|
||||
}{
|
||||
{
|
||||
name: "nested simple",
|
||||
overrides: nil,
|
||||
wantch1: map[string]string{"BAR": "fuu", "FOO": "bar"},
|
||||
wantch2: map[string]string{"BAR": "fuu", "FOO": "bar", "FOO2": "bar2"},
|
||||
},
|
||||
{
|
||||
name: "nested with overrides first",
|
||||
overrides: []string{"grandparent.args.BAR=fii", "child1.args.FOO=baaar"},
|
||||
wantch1: map[string]string{"BAR": "fii", "FOO": "baaar"},
|
||||
wantch2: map[string]string{"BAR": "fii", "FOO": "bar", "FOO2": "bar2"},
|
||||
},
|
||||
{
|
||||
name: "nested with overrides last",
|
||||
overrides: []string{"grandparent.args.BAR=fii", "child2.args.FOO=baaar"},
|
||||
wantch1: map[string]string{"BAR": "fii", "FOO": "bar"},
|
||||
wantch2: map[string]string{"BAR": "fii", "FOO": "baaar", "FOO2": "bar2"},
|
||||
},
|
||||
}
|
||||
for _, tt := range cases {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
m, g, err := ReadTargets(ctx, []File{f}, []string{"default"}, tt.overrides, nil)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(g))
|
||||
require.Equal(t, []string{"child1", "child2"}, g[0].Targets)
|
||||
require.Equal(t, 2, len(m))
|
||||
require.Equal(t, tt.wantch1, m["child1"].Args)
|
||||
require.Equal(t, []string{"type=docker"}, m["child1"].Outputs)
|
||||
require.Equal(t, tt.wantch2, m["child2"].Args)
|
||||
require.Equal(t, []string{"type=docker"}, m["child2"].Outputs)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestTargetName(t *testing.T) {
|
||||
ctx := context.TODO()
|
||||
cases := []struct {
|
||||
target string
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
target: "a",
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
target: "abc",
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
target: "a/b",
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
target: "a.b",
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
target: "_a",
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
target: "a_b",
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
target: "AbC",
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
target: "AbC-0123",
|
||||
wantErr: false,
|
||||
},
|
||||
}
|
||||
for _, tt := range cases {
|
||||
tt := tt
|
||||
t.Run(tt.target, func(t *testing.T) {
|
||||
_, _, err := ReadTargets(ctx, []File{{
|
||||
Name: "docker-bake.hcl",
|
||||
Data: []byte(`target "` + tt.target + `" {}`),
|
||||
}}, []string{tt.target}, nil, nil)
|
||||
if tt.wantErr {
|
||||
require.Error(t, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@@ -8,7 +8,6 @@ import (
|
||||
|
||||
"github.com/compose-spec/compose-go/loader"
|
||||
compose "github.com/compose-spec/compose-go/types"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
func parseCompose(dt []byte) (*compose.Project, error) {
|
||||
@@ -60,10 +59,6 @@ func ParseCompose(dt []byte) (*Config, error) {
|
||||
continue
|
||||
}
|
||||
|
||||
if err = validateTargetName(s.Name); err != nil {
|
||||
return nil, errors.Wrapf(err, "invalid service name %q", s.Name)
|
||||
}
|
||||
|
||||
var contextPathP *string
|
||||
if s.Build.Context != "" {
|
||||
contextPath := s.Build.Context
|
||||
@@ -81,14 +76,10 @@ func ParseCompose(dt []byte) (*Config, error) {
|
||||
Dockerfile: dockerfilePathP,
|
||||
Labels: s.Build.Labels,
|
||||
Args: flatten(s.Build.Args.Resolve(func(val string) (string, bool) {
|
||||
if val, ok := s.Environment[val]; ok && val != nil {
|
||||
return *val, true
|
||||
}
|
||||
val, ok := cfg.Environment[val]
|
||||
return val, ok
|
||||
})),
|
||||
CacheFrom: s.Build.CacheFrom,
|
||||
NetworkMode: &s.Build.Network,
|
||||
CacheFrom: s.Build.CacheFrom,
|
||||
}
|
||||
if err = t.composeExtTarget(s.Build.Extensions); err != nil {
|
||||
return nil, err
|
||||
@@ -194,14 +185,6 @@ func (t *Target) composeExtTarget(exts map[string]interface{}) error {
|
||||
if res, ok := val.(bool); ok {
|
||||
t.NoCache = &res
|
||||
}
|
||||
case "no-cache-filter":
|
||||
if res, k := val.(string); k {
|
||||
t.NoCacheFilter = append(t.NoCacheFilter, res)
|
||||
} else {
|
||||
for _, res := range val.([]interface{}) {
|
||||
t.NoCacheFilter = append(t.NoCacheFilter, res.(string))
|
||||
}
|
||||
}
|
||||
default:
|
||||
return fmt.Errorf("compose file invalid: unkwown %s field for x-bake", key)
|
||||
}
|
||||
|
@@ -19,8 +19,6 @@ services:
|
||||
build:
|
||||
context: ./dir
|
||||
dockerfile: Dockerfile-alternate
|
||||
network:
|
||||
none
|
||||
args:
|
||||
buildno: 123
|
||||
`)
|
||||
@@ -45,7 +43,6 @@ services:
|
||||
require.Equal(t, "Dockerfile-alternate", *c.Targets[1].Dockerfile)
|
||||
require.Equal(t, 1, len(c.Targets[1].Args))
|
||||
require.Equal(t, "123", c.Targets[1].Args["buildno"])
|
||||
require.Equal(t, "none", *c.Targets[1].NetworkMode)
|
||||
}
|
||||
|
||||
func TestNoBuildOutOfTreeService(t *testing.T) {
|
||||
@@ -280,89 +277,7 @@ services:
|
||||
require.Equal(t, c.Targets[1].NoCache, newBool(true))
|
||||
}
|
||||
|
||||
func TestEnv(t *testing.T) {
|
||||
envf, err := os.CreateTemp("", "env")
|
||||
require.NoError(t, err)
|
||||
defer os.Remove(envf.Name())
|
||||
|
||||
_, err = envf.WriteString("FOO=bsdf -csdf\n")
|
||||
require.NoError(t, err)
|
||||
|
||||
var dt = []byte(`
|
||||
services:
|
||||
scratch:
|
||||
build:
|
||||
context: .
|
||||
args:
|
||||
CT_ECR: foo
|
||||
FOO:
|
||||
NODE_ENV:
|
||||
environment:
|
||||
- NODE_ENV=test
|
||||
- AWS_ACCESS_KEY_ID=dummy
|
||||
- AWS_SECRET_ACCESS_KEY=dummy
|
||||
env_file:
|
||||
- ` + envf.Name() + `
|
||||
`)
|
||||
|
||||
c, err := ParseCompose(dt)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, c.Targets[0].Args, map[string]string{"CT_ECR": "foo", "FOO": "bsdf -csdf", "NODE_ENV": "test"})
|
||||
}
|
||||
|
||||
func newBool(val bool) *bool {
|
||||
b := val
|
||||
return &b
|
||||
}
|
||||
|
||||
func TestServiceName(t *testing.T) {
|
||||
cases := []struct {
|
||||
svc string
|
||||
wantErr bool
|
||||
}{
|
||||
{
|
||||
svc: "a",
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
svc: "abc",
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
svc: "a.b",
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
svc: "_a",
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
svc: "a_b",
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
svc: "AbC",
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
svc: "AbC-0123",
|
||||
wantErr: false,
|
||||
},
|
||||
}
|
||||
for _, tt := range cases {
|
||||
tt := tt
|
||||
t.Run(tt.svc, func(t *testing.T) {
|
||||
_, err := ParseCompose([]byte(`
|
||||
services:
|
||||
` + tt.svc + `:
|
||||
build:
|
||||
context: .
|
||||
`))
|
||||
if tt.wantErr {
|
||||
require.Error(t, err)
|
||||
} else {
|
||||
require.NoError(t, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@@ -3,7 +3,7 @@ package bake
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/hashicorp/hcl/v2"
|
||||
hcl "github.com/hashicorp/hcl/v2"
|
||||
"github.com/hashicorp/hcl/v2/hclparse"
|
||||
"github.com/moby/buildkit/solver/errdefs"
|
||||
"github.com/moby/buildkit/solver/pb"
|
||||
|
@@ -16,9 +16,8 @@ import (
|
||||
)
|
||||
|
||||
type Opt struct {
|
||||
LookupVar func(string) (string, bool)
|
||||
Vars map[string]string
|
||||
ValidateLabel func(string) error
|
||||
LookupVar func(string) (string, bool)
|
||||
Vars map[string]string
|
||||
}
|
||||
|
||||
type variable struct {
|
||||
@@ -263,12 +262,6 @@ func Parse(b hcl.Body, opt Opt, val interface{}) hcl.Diagnostics {
|
||||
}
|
||||
}
|
||||
|
||||
if opt.ValidateLabel == nil {
|
||||
opt.ValidateLabel = func(string) error {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
p := &parser{
|
||||
opt: opt,
|
||||
|
||||
@@ -453,17 +446,6 @@ func Parse(b hcl.Body, opt Opt, val interface{}) hcl.Diagnostics {
|
||||
continue
|
||||
}
|
||||
|
||||
if err := opt.ValidateLabel(b.Labels[0]); err != nil {
|
||||
return hcl.Diagnostics{
|
||||
&hcl.Diagnostic{
|
||||
Severity: hcl.DiagError,
|
||||
Summary: "Invalid name",
|
||||
Detail: err.Error(),
|
||||
Subject: &b.LabelRanges[0],
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
lblIndex := setLabel(vv, b.Labels[0])
|
||||
|
||||
oldValue, exists := t.values[b.Labels[0]]
|
||||
|
319
build/build.go
319
build/build.go
@@ -14,7 +14,6 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"github.com/containerd/containerd/images"
|
||||
@@ -23,7 +22,6 @@ import (
|
||||
"github.com/docker/buildx/util/imagetools"
|
||||
"github.com/docker/buildx/util/progress"
|
||||
"github.com/docker/buildx/util/resolver"
|
||||
"github.com/docker/buildx/util/waitmap"
|
||||
"github.com/docker/cli/opts"
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/docker/docker/api/types"
|
||||
@@ -32,11 +30,9 @@ import (
|
||||
"github.com/docker/docker/pkg/urlutil"
|
||||
"github.com/moby/buildkit/client"
|
||||
"github.com/moby/buildkit/client/llb"
|
||||
"github.com/moby/buildkit/exporter/containerimage/exptypes"
|
||||
gateway "github.com/moby/buildkit/frontend/gateway/client"
|
||||
"github.com/moby/buildkit/session"
|
||||
"github.com/moby/buildkit/session/upload/uploadprovider"
|
||||
"github.com/moby/buildkit/solver/pb"
|
||||
"github.com/moby/buildkit/util/apicaps"
|
||||
"github.com/moby/buildkit/util/entitlements"
|
||||
"github.com/moby/buildkit/util/progress/progresswriter"
|
||||
@@ -57,25 +53,24 @@ var (
|
||||
type Options struct {
|
||||
Inputs Inputs
|
||||
|
||||
Allow []entitlements.Entitlement
|
||||
BuildArgs map[string]string
|
||||
CacheFrom []client.CacheOptionsEntry
|
||||
CacheTo []client.CacheOptionsEntry
|
||||
CgroupParent string
|
||||
Exports []client.ExportEntry
|
||||
ExtraHosts []string
|
||||
ImageIDFile string
|
||||
Labels map[string]string
|
||||
NetworkMode string
|
||||
NoCache bool
|
||||
NoCacheFilter []string
|
||||
Platforms []specs.Platform
|
||||
Pull bool
|
||||
Session []session.Attachable
|
||||
ShmSize opts.MemBytes
|
||||
Tags []string
|
||||
Target string
|
||||
Ulimits *opts.UlimitOpt
|
||||
Allow []entitlements.Entitlement
|
||||
BuildArgs map[string]string
|
||||
CacheFrom []client.CacheOptionsEntry
|
||||
CacheTo []client.CacheOptionsEntry
|
||||
CgroupParent string
|
||||
Exports []client.ExportEntry
|
||||
ExtraHosts []string
|
||||
ImageIDFile string
|
||||
Labels map[string]string
|
||||
NetworkMode string
|
||||
NoCache bool
|
||||
Platforms []specs.Platform
|
||||
Pull bool
|
||||
Session []session.Attachable
|
||||
ShmSize opts.MemBytes
|
||||
Tags []string
|
||||
Target string
|
||||
Ulimits *opts.UlimitOpt
|
||||
}
|
||||
|
||||
type Inputs struct {
|
||||
@@ -84,21 +79,14 @@ type Inputs struct {
|
||||
InStream io.Reader
|
||||
ContextState *llb.State
|
||||
DockerfileInline string
|
||||
NamedContexts map[string]NamedContext
|
||||
}
|
||||
|
||||
type NamedContext struct {
|
||||
Path string
|
||||
State *llb.State
|
||||
}
|
||||
|
||||
type DriverInfo struct {
|
||||
Driver driver.Driver
|
||||
Name string
|
||||
Platform []specs.Platform
|
||||
Err error
|
||||
ImageOpt imagetools.Opt
|
||||
ProxyConfig map[string]string
|
||||
Driver driver.Driver
|
||||
Name string
|
||||
Platform []specs.Platform
|
||||
Err error
|
||||
ImageOpt imagetools.Opt
|
||||
}
|
||||
|
||||
type DockerAPI interface {
|
||||
@@ -189,10 +177,6 @@ func splitToDriverPairs(availablePlatforms map[string]int, opt map[string]Option
|
||||
pp = append(pp, p)
|
||||
mm[idx] = pp
|
||||
}
|
||||
// if no platform is specified, use first driver
|
||||
if len(mm) == 0 {
|
||||
mm[0] = nil
|
||||
}
|
||||
dps := make([]driverPair, 0, 2)
|
||||
for idx, pp := range mm {
|
||||
dps = append(dps, driverPair{driverIndex: idx, platforms: pp})
|
||||
@@ -349,8 +333,7 @@ func toRepoOnly(in string) (string, error) {
|
||||
return strings.Join(out, ","), nil
|
||||
}
|
||||
|
||||
func toSolveOpt(ctx context.Context, di DriverInfo, multiDriver bool, opt Options, bopts gateway.BuildOpts, configDir string, pw progress.Writer, dl dockerLoadCallback) (solveOpt *client.SolveOpt, release func(), err error) {
|
||||
d := di.Driver
|
||||
func toSolveOpt(ctx context.Context, d driver.Driver, multiDriver bool, opt Options, bopts gateway.BuildOpts, configDir string, pw progress.Writer, dl dockerLoadCallback) (solveOpt *client.SolveOpt, release func(), err error) {
|
||||
defers := make([]func(), 0, 2)
|
||||
releaseF := func() {
|
||||
for _, f := range defers {
|
||||
@@ -516,12 +499,6 @@ func toSolveOpt(ctx context.Context, di DriverInfo, multiDriver bool, opt Option
|
||||
}
|
||||
}
|
||||
}
|
||||
if e.Type == "docker" || e.Type == "image" || e.Type == "oci" {
|
||||
// inline buildinfo attrs from build arg
|
||||
if v, ok := opt.BuildArgs["BUILDKIT_INLINE_BUILDINFO_ATTRS"]; ok {
|
||||
e.Attrs["buildinfo-attrs"] = v
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
so.Exports = opt.Exports
|
||||
@@ -546,9 +523,6 @@ func toSolveOpt(ctx context.Context, di DriverInfo, multiDriver bool, opt Option
|
||||
if opt.Target != "" {
|
||||
so.FrontendAttrs["target"] = opt.Target
|
||||
}
|
||||
if len(opt.NoCacheFilter) > 0 {
|
||||
so.FrontendAttrs["no-cache"] = strings.Join(opt.NoCacheFilter, ",")
|
||||
}
|
||||
if opt.NoCache {
|
||||
so.FrontendAttrs["no-cache"] = ""
|
||||
}
|
||||
@@ -559,12 +533,6 @@ func toSolveOpt(ctx context.Context, di DriverInfo, multiDriver bool, opt Option
|
||||
so.FrontendAttrs["label:"+k] = v
|
||||
}
|
||||
|
||||
for k, v := range di.ProxyConfig {
|
||||
if _, ok := opt.BuildArgs[k]; !ok {
|
||||
so.FrontendAttrs["build-arg:"+k] = v
|
||||
}
|
||||
}
|
||||
|
||||
// set platforms
|
||||
if len(opt.Platforms) != 0 {
|
||||
pp := make([]string, len(opt.Platforms))
|
||||
@@ -659,12 +627,12 @@ func Build(ctx context.Context, drivers []DriverInfo, opt map[string]Options, do
|
||||
multiDriver := len(m[k]) > 1
|
||||
hasMobyDriver := false
|
||||
for i, dp := range m[k] {
|
||||
di := drivers[dp.driverIndex]
|
||||
if di.Driver.IsMobyDriver() {
|
||||
d := drivers[dp.driverIndex].Driver
|
||||
if d.IsMobyDriver() {
|
||||
hasMobyDriver = true
|
||||
}
|
||||
opt.Platforms = dp.platforms
|
||||
so, release, err := toSolveOpt(ctx, di, multiDriver, opt, dp.bopts, configDir, w, func(name string) (io.WriteCloser, func(), error) {
|
||||
so, release, err := toSolveOpt(ctx, d, multiDriver, opt, dp.bopts, configDir, w, func(name string) (io.WriteCloser, func(), error) {
|
||||
return newDockerLoader(ctx, docker, name, w)
|
||||
})
|
||||
if err != nil {
|
||||
@@ -697,35 +665,8 @@ func Build(ctx context.Context, drivers []DriverInfo, opt map[string]Options, do
|
||||
}
|
||||
}
|
||||
|
||||
// validate that all links between targets use same drivers
|
||||
for name := range opt {
|
||||
dps := m[name]
|
||||
for _, dp := range dps {
|
||||
for k, v := range dp.so.FrontendAttrs {
|
||||
if strings.HasPrefix(k, "context:") && strings.HasPrefix(v, "target:") {
|
||||
k2 := strings.TrimPrefix(v, "target:")
|
||||
dps2, ok := m[k2]
|
||||
if !ok {
|
||||
return nil, errors.Errorf("failed to find target %s for context %s", k2, strings.TrimPrefix(k, "context:")) // should be validated before already
|
||||
}
|
||||
var found bool
|
||||
for _, dp2 := range dps2 {
|
||||
if dp2.driverIndex == dp.driverIndex {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
return nil, errors.Errorf("failed to use %s as context %s for %s because targets build with different drivers", k2, strings.TrimPrefix(k, "context:"), name)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
resp = map[string]*client.SolveResponse{}
|
||||
var respMu sync.Mutex
|
||||
results := waitmap.New()
|
||||
|
||||
multiTarget := len(opt) > 1
|
||||
|
||||
@@ -766,12 +707,9 @@ func Build(ctx context.Context, drivers []DriverInfo, opt map[string]Options, do
|
||||
resp[k] = res[0]
|
||||
respMu.Unlock()
|
||||
if len(res) == 1 {
|
||||
dgst := res[0].ExporterResponse[exptypes.ExporterImageDigestKey]
|
||||
if v, ok := res[0].ExporterResponse[exptypes.ExporterImageConfigDigestKey]; ok {
|
||||
dgst = v
|
||||
}
|
||||
digest := res[0].ExporterResponse["containerimage.digest"]
|
||||
if opt.ImageIDFile != "" {
|
||||
return ioutil.WriteFile(opt.ImageIDFile, []byte(dgst), 0644)
|
||||
return ioutil.WriteFile(opt.ImageIDFile, []byte(digest), 0644)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -781,7 +719,7 @@ func Build(ctx context.Context, drivers []DriverInfo, opt map[string]Options, do
|
||||
descs := make([]specs.Descriptor, 0, len(res))
|
||||
|
||||
for _, r := range res {
|
||||
s, ok := r.ExporterResponse[exptypes.ExporterImageDigestKey]
|
||||
s, ok := r.ExporterResponse["containerimage.digest"]
|
||||
if ok {
|
||||
descs = append(descs, specs.Descriptor{
|
||||
Digest: digest.Digest(s),
|
||||
@@ -853,6 +791,7 @@ func Build(ctx context.Context, drivers []DriverInfo, opt map[string]Options, do
|
||||
|
||||
for i, dp := range dps {
|
||||
so := *dp.so
|
||||
|
||||
if multiDriver {
|
||||
for i, e := range so.Exports {
|
||||
switch e.Type {
|
||||
@@ -885,43 +824,14 @@ func Build(ctx context.Context, drivers []DriverInfo, opt map[string]Options, do
|
||||
pw := progress.WithPrefix(w, k, multiTarget)
|
||||
|
||||
c := clients[dp.driverIndex]
|
||||
|
||||
pw = progress.ResetTime(pw)
|
||||
|
||||
eg.Go(func() error {
|
||||
pw = progress.ResetTime(pw)
|
||||
defer wg.Done()
|
||||
|
||||
if err := waitContextDeps(ctx, dp.driverIndex, results, &so); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
frontendInputs := make(map[string]*pb.Definition)
|
||||
for key, st := range so.FrontendInputs {
|
||||
def, err := st.Marshal(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
frontendInputs[key] = def.ToPB()
|
||||
}
|
||||
|
||||
req := gateway.SolveRequest{
|
||||
Frontend: so.Frontend,
|
||||
FrontendOpt: so.FrontendAttrs,
|
||||
FrontendInputs: frontendInputs,
|
||||
}
|
||||
so.Frontend = ""
|
||||
so.FrontendAttrs = nil
|
||||
so.FrontendInputs = nil
|
||||
|
||||
ch, done := progress.NewChannel(pw)
|
||||
defer func() { <-done }()
|
||||
|
||||
rr, err := c.Build(ctx, so, "buildx", func(ctx context.Context, c gateway.Client) (*gateway.Result, error) {
|
||||
res, err := c.Solve(ctx, req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
results.Set(resultKey(dp.driverIndex, k), res)
|
||||
return res, nil
|
||||
}, ch)
|
||||
rr, err := c.Solve(ctx, nil, so, ch)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -937,27 +847,13 @@ func Build(ctx context.Context, drivers []DriverInfo, opt map[string]Options, do
|
||||
return errors.Errorf("tag is needed when pushing to registry")
|
||||
}
|
||||
pw := progress.ResetTime(pw)
|
||||
pushList := strings.Split(pushNames, ",")
|
||||
for _, name := range pushList {
|
||||
for _, name := range strings.Split(pushNames, ",") {
|
||||
if err := progress.Wrap(fmt.Sprintf("pushing %s with docker", name), pw.Write, func(l progress.SubLogger) error {
|
||||
return pushWithMoby(ctx, d, name, l)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
remoteDigest, err := remoteDigestWithMoby(ctx, d, pushList[0])
|
||||
if err == nil && remoteDigest != "" {
|
||||
// old daemons might not have containerimage.config.digest set
|
||||
// in response so use containerimage.digest value for it if available
|
||||
if _, ok := rr.ExporterResponse[exptypes.ExporterImageConfigDigestKey]; !ok {
|
||||
if v, ok := rr.ExporterResponse[exptypes.ExporterImageDigestKey]; ok {
|
||||
rr.ExporterResponse[exptypes.ExporterImageConfigDigestKey] = v
|
||||
}
|
||||
}
|
||||
rr.ExporterResponse[exptypes.ExporterImageDigestKey] = remoteDigest
|
||||
} else if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1062,29 +958,6 @@ func pushWithMoby(ctx context.Context, d driver.Driver, name string, l progress.
|
||||
return nil
|
||||
}
|
||||
|
||||
func remoteDigestWithMoby(ctx context.Context, d driver.Driver, name string) (string, error) {
|
||||
api := d.Config().DockerAPI
|
||||
if api == nil {
|
||||
return "", errors.Errorf("invalid empty Docker API reference") // should never happen
|
||||
}
|
||||
creds, err := imagetools.RegistryAuthForRef(name, d.Config().Auth)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
image, _, err := api.ImageInspectWithRaw(ctx, name)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if len(image.RepoDigests) == 0 {
|
||||
return "", nil
|
||||
}
|
||||
remoteImage, err := api.DistributionInspect(ctx, name, creds)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return remoteImage.Descriptor.Digest.String(), nil
|
||||
}
|
||||
|
||||
func createTempDockerfile(r io.Reader) (string, error) {
|
||||
dir, err := ioutil.TempDir("", "dockerfile")
|
||||
if err != nil {
|
||||
@@ -1207,36 +1080,6 @@ func LoadInputs(ctx context.Context, d driver.Driver, inp Inputs, pw progress.Wr
|
||||
|
||||
target.FrontendAttrs["filename"] = dockerfileName
|
||||
|
||||
for k, v := range inp.NamedContexts {
|
||||
target.FrontendAttrs["frontend.caps"] = "moby.buildkit.frontend.contexts+forward"
|
||||
if v.State != nil {
|
||||
target.FrontendAttrs["context:"+k] = "input:" + k
|
||||
if target.FrontendInputs == nil {
|
||||
target.FrontendInputs = make(map[string]llb.State)
|
||||
}
|
||||
target.FrontendInputs[k] = *v.State
|
||||
continue
|
||||
}
|
||||
|
||||
if urlutil.IsGitURL(v.Path) || urlutil.IsURL(v.Path) || strings.HasPrefix(v.Path, "docker-image://") || strings.HasPrefix(v.Path, "target:") {
|
||||
target.FrontendAttrs["context:"+k] = v.Path
|
||||
continue
|
||||
}
|
||||
st, err := os.Stat(v.Path)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to get build context %v", k)
|
||||
}
|
||||
if !st.IsDir() {
|
||||
return nil, errors.Wrapf(syscall.ENOTDIR, "failed to get build context path %v", v)
|
||||
}
|
||||
localName := k
|
||||
if k == "context" || k == "dockerfile" {
|
||||
localName = "_" + k // underscore to avoid collisions
|
||||
}
|
||||
target.LocalDirs[localName] = v.Path
|
||||
target.FrontendAttrs["context:"+k] = "local:" + localName
|
||||
}
|
||||
|
||||
release := func() {
|
||||
for _, dir := range toRemove {
|
||||
os.RemoveAll(dir)
|
||||
@@ -1245,96 +1088,6 @@ func LoadInputs(ctx context.Context, d driver.Driver, inp Inputs, pw progress.Wr
|
||||
return release, nil
|
||||
}
|
||||
|
||||
func resultKey(index int, name string) string {
|
||||
return fmt.Sprintf("%d-%s", index, name)
|
||||
}
|
||||
|
||||
func waitContextDeps(ctx context.Context, index int, results *waitmap.Map, so *client.SolveOpt) error {
|
||||
m := map[string]string{}
|
||||
for k, v := range so.FrontendAttrs {
|
||||
if strings.HasPrefix(k, "context:") && strings.HasPrefix(v, "target:") {
|
||||
target := resultKey(index, strings.TrimPrefix(v, "target:"))
|
||||
m[target] = k
|
||||
}
|
||||
}
|
||||
if len(m) == 0 {
|
||||
return nil
|
||||
}
|
||||
keys := make([]string, 0, len(m))
|
||||
for k := range m {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
res, err := results.Get(ctx, keys...)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for k, v := range m {
|
||||
r, ok := res[k]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
rr, ok := r.(*gateway.Result)
|
||||
if !ok {
|
||||
return errors.Errorf("invalid result type %T", rr)
|
||||
}
|
||||
if so.FrontendAttrs == nil {
|
||||
so.FrontendAttrs = map[string]string{}
|
||||
}
|
||||
if so.FrontendInputs == nil {
|
||||
so.FrontendInputs = map[string]llb.State{}
|
||||
}
|
||||
if len(rr.Refs) > 0 {
|
||||
for platform, r := range rr.Refs {
|
||||
st, err := r.ToState()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
so.FrontendInputs[k+"::"+platform] = st
|
||||
so.FrontendAttrs[v+"::"+platform] = "input:" + k + "::" + platform
|
||||
metadata := make(map[string][]byte)
|
||||
if dt, ok := rr.Metadata[exptypes.ExporterImageConfigKey+"/"+platform]; ok {
|
||||
metadata[exptypes.ExporterImageConfigKey] = dt
|
||||
}
|
||||
if dt, ok := rr.Metadata[exptypes.ExporterBuildInfo+"/"+platform]; ok {
|
||||
metadata[exptypes.ExporterBuildInfo] = dt
|
||||
}
|
||||
if len(metadata) > 0 {
|
||||
dt, err := json.Marshal(metadata)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
so.FrontendAttrs["input-metadata:"+k+"::"+platform] = string(dt)
|
||||
}
|
||||
}
|
||||
delete(so.FrontendAttrs, v)
|
||||
}
|
||||
if rr.Ref != nil {
|
||||
st, err := rr.Ref.ToState()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
so.FrontendInputs[k] = st
|
||||
so.FrontendAttrs[v] = "input:" + k
|
||||
metadata := make(map[string][]byte)
|
||||
if dt, ok := rr.Metadata[exptypes.ExporterImageConfigKey]; ok {
|
||||
metadata[exptypes.ExporterImageConfigKey] = dt
|
||||
}
|
||||
if dt, ok := rr.Metadata[exptypes.ExporterBuildInfo]; ok {
|
||||
metadata[exptypes.ExporterBuildInfo] = dt
|
||||
}
|
||||
if len(metadata) > 0 {
|
||||
dt, err := json.Marshal(metadata)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
so.FrontendAttrs["input-metadata:"+k] = string(dt)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func notSupported(d driver.Driver, f driver.Feature) error {
|
||||
return errors.Errorf("%s feature is currently not supported for %s driver. Please switch to a different driver (eg. \"docker buildx create --use\")", f, d.Factory().Name())
|
||||
}
|
||||
|
@@ -43,19 +43,21 @@ func init() {
|
||||
}
|
||||
|
||||
func main() {
|
||||
if plugin.RunningStandalone() {
|
||||
dockerCli, err := command.NewDockerCli()
|
||||
if err != nil {
|
||||
fmt.Fprintln(os.Stderr, err)
|
||||
os.Exit(1)
|
||||
if os.Getenv("DOCKER_CLI_PLUGIN_ORIGINAL_CLI_COMMAND") == "" {
|
||||
if len(os.Args) < 2 || os.Args[1] != manager.MetadataSubcommandName {
|
||||
dockerCli, err := command.NewDockerCli()
|
||||
if err != nil {
|
||||
fmt.Fprintln(os.Stderr, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
opts := cliflags.NewClientOptions()
|
||||
dockerCli.Initialize(opts)
|
||||
rootCmd := commands.NewRootCmd(os.Args[0], false, dockerCli)
|
||||
if err := rootCmd.Execute(); err != nil {
|
||||
os.Exit(1)
|
||||
}
|
||||
os.Exit(0)
|
||||
}
|
||||
opts := cliflags.NewClientOptions()
|
||||
dockerCli.Initialize(opts)
|
||||
rootCmd := commands.NewRootCmd(os.Args[0], false, dockerCli)
|
||||
if err := rootCmd.Execute(); err != nil {
|
||||
os.Exit(1)
|
||||
}
|
||||
os.Exit(0)
|
||||
}
|
||||
|
||||
dockerCli, err := command.NewDockerCli()
|
||||
|
@@ -13,6 +13,7 @@ import (
|
||||
"github.com/docker/buildx/util/progress"
|
||||
"github.com/docker/buildx/util/tracing"
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/docker/pkg/ioutils"
|
||||
"github.com/moby/buildkit/util/appcontext"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
@@ -75,7 +76,7 @@ func runBake(dockerCli command.Cli, targets []string, in bakeOptions) (err error
|
||||
|
||||
ctx2, cancel := context.WithCancel(context.TODO())
|
||||
defer cancel()
|
||||
printer := progress.NewPrinter(ctx2, os.Stderr, os.Stderr, in.progress)
|
||||
printer := progress.NewPrinter(ctx2, os.Stderr, in.progress)
|
||||
|
||||
defer func() {
|
||||
if printer != nil {
|
||||
@@ -147,15 +148,19 @@ func runBake(dockerCli command.Cli, targets []string, in bakeOptions) (err error
|
||||
|
||||
resp, err := build.Build(ctx, dis, bo, dockerAPI(dockerCli), confutil.ConfigDir(dockerCli), printer)
|
||||
if err != nil {
|
||||
return wrapBuildError(err, true)
|
||||
return err
|
||||
}
|
||||
|
||||
if len(in.metadataFile) > 0 {
|
||||
dt := make(map[string]interface{})
|
||||
for t, r := range resp {
|
||||
dt[t] = decodeExporterResponse(r.ExporterResponse)
|
||||
if len(in.metadataFile) > 0 && resp != nil {
|
||||
mdata := map[string]map[string]string{}
|
||||
for k, r := range resp {
|
||||
mdata[k] = r.ExporterResponse
|
||||
}
|
||||
if err := writeMetadataFile(in.metadataFile, dt); err != nil {
|
||||
mdatab, err := json.MarshalIndent(mdata, "", " ")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := ioutils.AtomicWriteFile(in.metadataFile, mdatab, 0644); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@@ -186,10 +191,10 @@ func bakeCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
|
||||
flags := cmd.Flags()
|
||||
|
||||
flags.StringArrayVarP(&options.files, "file", "f", []string{}, "Build definition file")
|
||||
flags.BoolVar(&options.exportLoad, "load", false, `Shorthand for "--set=*.output=type=docker"`)
|
||||
flags.BoolVar(&options.exportLoad, "load", false, "Shorthand for `--set=*.output=type=docker`")
|
||||
flags.BoolVar(&options.printOnly, "print", false, "Print the options without building")
|
||||
flags.BoolVar(&options.exportPush, "push", false, `Shorthand for "--set=*.output=type=registry"`)
|
||||
flags.StringArrayVar(&options.overrides, "set", nil, `Override target value (e.g., "targetpattern.key=value")`)
|
||||
flags.BoolVar(&options.exportPush, "push", false, "Shorthand for `--set=*.output=type=registry`")
|
||||
flags.StringArrayVar(&options.overrides, "set", nil, "Override target value (e.g., `targetpattern.key=value`)")
|
||||
|
||||
commonBuildFlags(&options.commonOptions, flags)
|
||||
|
||||
|
@@ -1,12 +1,9 @@
|
||||
package commands
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
@@ -17,24 +14,18 @@ import (
|
||||
"github.com/docker/buildx/util/platformutil"
|
||||
"github.com/docker/buildx/util/progress"
|
||||
"github.com/docker/buildx/util/tracing"
|
||||
"github.com/docker/cli-docs-tool/annotation"
|
||||
"github.com/docker/cli/cli"
|
||||
"github.com/docker/cli/cli/command"
|
||||
dockeropts "github.com/docker/cli/opts"
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/docker/docker/pkg/ioutils"
|
||||
"github.com/docker/go-units"
|
||||
"github.com/moby/buildkit/client"
|
||||
"github.com/moby/buildkit/session/auth/authprovider"
|
||||
"github.com/moby/buildkit/solver/errdefs"
|
||||
"github.com/moby/buildkit/util/appcontext"
|
||||
"github.com/moby/buildkit/util/grpcerrors"
|
||||
"github.com/morikuni/aec"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/pflag"
|
||||
"google.golang.org/grpc/codes"
|
||||
)
|
||||
|
||||
const defaultTargetName = "default"
|
||||
@@ -43,26 +34,24 @@ type buildOptions struct {
|
||||
contextPath string
|
||||
dockerfileName string
|
||||
|
||||
allow []string
|
||||
buildArgs []string
|
||||
cacheFrom []string
|
||||
cacheTo []string
|
||||
cgroupParent string
|
||||
contexts []string
|
||||
extraHosts []string
|
||||
imageIDFile string
|
||||
labels []string
|
||||
networkMode string
|
||||
noCacheFilter []string
|
||||
outputs []string
|
||||
platforms []string
|
||||
quiet bool
|
||||
secrets []string
|
||||
shmSize dockeropts.MemBytes
|
||||
ssh []string
|
||||
tags []string
|
||||
target string
|
||||
ulimits *dockeropts.UlimitOpt
|
||||
allow []string
|
||||
buildArgs []string
|
||||
cacheFrom []string
|
||||
cacheTo []string
|
||||
cgroupParent string
|
||||
extraHosts []string
|
||||
imageIDFile string
|
||||
labels []string
|
||||
networkMode string
|
||||
outputs []string
|
||||
platforms []string
|
||||
quiet bool
|
||||
secrets []string
|
||||
shmSize dockeropts.MemBytes
|
||||
ssh []string
|
||||
tags []string
|
||||
target string
|
||||
ulimits *dockeropts.UlimitOpt
|
||||
commonOptions
|
||||
}
|
||||
|
||||
@@ -100,40 +89,29 @@ func runBuild(dockerCli command.Cli, in buildOptions) (err error) {
|
||||
pull = *in.pull
|
||||
}
|
||||
|
||||
if noCache && len(in.noCacheFilter) > 0 {
|
||||
return errors.Errorf("--no-cache and --no-cache-filter cannot currently be used together")
|
||||
}
|
||||
|
||||
if in.quiet && in.progress != "auto" && in.progress != "quiet" {
|
||||
return errors.Errorf("progress=%s and quiet cannot be used together", in.progress)
|
||||
} else if in.quiet {
|
||||
in.progress = "quiet"
|
||||
}
|
||||
|
||||
contexts, err := parseContextNames(in.contexts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
opts := build.Options{
|
||||
Inputs: build.Inputs{
|
||||
ContextPath: in.contextPath,
|
||||
DockerfilePath: in.dockerfileName,
|
||||
InStream: os.Stdin,
|
||||
NamedContexts: contexts,
|
||||
},
|
||||
BuildArgs: listToMap(in.buildArgs, true),
|
||||
ExtraHosts: in.extraHosts,
|
||||
ImageIDFile: in.imageIDFile,
|
||||
Labels: listToMap(in.labels, false),
|
||||
NetworkMode: in.networkMode,
|
||||
NoCache: noCache,
|
||||
NoCacheFilter: in.noCacheFilter,
|
||||
Pull: pull,
|
||||
ShmSize: in.shmSize,
|
||||
Tags: in.tags,
|
||||
Target: in.target,
|
||||
Ulimits: in.ulimits,
|
||||
BuildArgs: listToMap(in.buildArgs, true),
|
||||
ExtraHosts: in.extraHosts,
|
||||
ImageIDFile: in.imageIDFile,
|
||||
Labels: listToMap(in.labels, false),
|
||||
NetworkMode: in.networkMode,
|
||||
NoCache: noCache,
|
||||
Pull: pull,
|
||||
ShmSize: in.shmSize,
|
||||
Tags: in.tags,
|
||||
Target: in.target,
|
||||
Ulimits: in.ulimits,
|
||||
}
|
||||
|
||||
platforms, err := platformutil.Parse(in.platforms)
|
||||
@@ -226,7 +204,6 @@ func runBuild(dockerCli command.Cli, in buildOptions) (err error) {
|
||||
}
|
||||
|
||||
imageID, err := buildTargets(ctx, dockerCli, map[string]build.Options{defaultTargetName: opts}, in.progress, contextPathHash, in.builder, in.metadataFile)
|
||||
err = wrapBuildError(err, false)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -246,7 +223,7 @@ func buildTargets(ctx context.Context, dockerCli command.Cli, opts map[string]bu
|
||||
ctx2, cancel := context.WithCancel(context.TODO())
|
||||
defer cancel()
|
||||
|
||||
printer := progress.NewPrinter(ctx2, os.Stderr, os.Stderr, progressMode)
|
||||
printer := progress.NewPrinter(ctx2, os.Stderr, progressMode)
|
||||
|
||||
resp, err := build.Build(ctx, dis, opts, dockerAPI(dockerCli), confutil.ConfigDir(dockerCli), printer)
|
||||
err1 := printer.Wait()
|
||||
@@ -258,56 +235,18 @@ func buildTargets(ctx context.Context, dockerCli command.Cli, opts map[string]bu
|
||||
}
|
||||
|
||||
if len(metadataFile) > 0 && resp != nil {
|
||||
if err := writeMetadataFile(metadataFile, decodeExporterResponse(resp[defaultTargetName].ExporterResponse)); err != nil {
|
||||
mdatab, err := json.MarshalIndent(resp[defaultTargetName].ExporterResponse, "", " ")
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if err := ioutils.AtomicWriteFile(metadataFile, mdatab, 0644); err != nil {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
|
||||
printWarnings(os.Stderr, printer.Warnings(), progressMode)
|
||||
|
||||
return resp[defaultTargetName].ExporterResponse["containerimage.digest"], err
|
||||
}
|
||||
|
||||
func printWarnings(w io.Writer, warnings []client.VertexWarning, mode string) {
|
||||
if len(warnings) == 0 || mode == progress.PrinterModeQuiet {
|
||||
return
|
||||
}
|
||||
fmt.Fprintf(w, "\n ")
|
||||
sb := &bytes.Buffer{}
|
||||
if len(warnings) == 1 {
|
||||
fmt.Fprintf(sb, "1 warning found")
|
||||
} else {
|
||||
fmt.Fprintf(sb, "%d warnings found", len(warnings))
|
||||
}
|
||||
if logrus.GetLevel() < logrus.DebugLevel {
|
||||
fmt.Fprintf(sb, " (use --debug to expand)")
|
||||
}
|
||||
fmt.Fprintf(sb, ":\n")
|
||||
fmt.Fprint(w, aec.Apply(sb.String(), aec.YellowF))
|
||||
|
||||
for _, warn := range warnings {
|
||||
fmt.Fprintf(w, " - %s\n", warn.Short)
|
||||
if logrus.GetLevel() < logrus.DebugLevel {
|
||||
continue
|
||||
}
|
||||
for _, d := range warn.Detail {
|
||||
fmt.Fprintf(w, "%s\n", d)
|
||||
}
|
||||
if warn.URL != "" {
|
||||
fmt.Fprintf(w, "More info: %s\n", warn.URL)
|
||||
}
|
||||
if warn.SourceInfo != nil && warn.Range != nil {
|
||||
src := errdefs.Source{
|
||||
Info: warn.SourceInfo,
|
||||
Ranges: warn.Range,
|
||||
}
|
||||
src.Print(w)
|
||||
}
|
||||
fmt.Fprintf(w, "\n")
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
func newBuildOptions() buildOptions {
|
||||
ulimits := make(map[string]*units.Ulimit)
|
||||
return buildOptions{
|
||||
@@ -338,54 +277,51 @@ func buildCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
|
||||
|
||||
flags := cmd.Flags()
|
||||
|
||||
flags.StringSliceVar(&options.extraHosts, "add-host", []string{}, `Add a custom host-to-IP mapping (format: "host:ip")`)
|
||||
flags.SetAnnotation("add-host", annotation.ExternalURL, []string{"https://docs.docker.com/engine/reference/commandline/build/#add-entries-to-container-hosts-file---add-host"})
|
||||
flags.StringSliceVar(&options.extraHosts, "add-host", []string{}, "Add a custom host-to-IP mapping (format: `host:ip`)")
|
||||
flags.SetAnnotation("add-host", "docs.external.url", []string{"https://docs.docker.com/engine/reference/commandline/build/#add-entries-to-container-hosts-file---add-host"})
|
||||
|
||||
flags.StringSliceVar(&options.allow, "allow", []string{}, `Allow extra privileged entitlement (e.g., "network.host", "security.insecure")`)
|
||||
flags.StringSliceVar(&options.allow, "allow", []string{}, "Allow extra privileged entitlement (e.g., `network.host`, `security.insecure`)")
|
||||
|
||||
flags.StringArrayVar(&options.buildArgs, "build-arg", []string{}, "Set build-time variables")
|
||||
flags.SetAnnotation("build-arg", "docs.external.url", []string{"https://docs.docker.com/engine/reference/commandline/build/#set-build-time-variables---build-arg"})
|
||||
|
||||
flags.StringArrayVar(&options.cacheFrom, "cache-from", []string{}, `External cache sources (e.g., "user/app:cache", "type=local,src=path/to/dir")`)
|
||||
flags.StringArrayVar(&options.cacheFrom, "cache-from", []string{}, "External cache sources (e.g., `user/app:cache`, `type=local,src=path/to/dir`)")
|
||||
|
||||
flags.StringArrayVar(&options.cacheTo, "cache-to", []string{}, `Cache export destinations (e.g., "user/app:cache", "type=local,dest=path/to/dir")`)
|
||||
flags.StringArrayVar(&options.cacheTo, "cache-to", []string{}, "Cache export destinations (e.g., `user/app:cache`, `type=local,dest=path/to/dir`)")
|
||||
|
||||
flags.StringVar(&options.cgroupParent, "cgroup-parent", "", "Optional parent cgroup for the container")
|
||||
flags.SetAnnotation("cgroup-parent", annotation.ExternalURL, []string{"https://docs.docker.com/engine/reference/commandline/build/#use-a-custom-parent-cgroup---cgroup-parent"})
|
||||
flags.SetAnnotation("cgroup-parent", "docs.external.url", []string{"https://docs.docker.com/engine/reference/commandline/build/#use-a-custom-parent-cgroup---cgroup-parent"})
|
||||
|
||||
flags.StringArrayVar(&options.contexts, "build-context", []string{}, "Additional build contexts (e.g., name=path)")
|
||||
|
||||
flags.StringVarP(&options.dockerfileName, "file", "f", "", `Name of the Dockerfile (default: "PATH/Dockerfile")`)
|
||||
flags.SetAnnotation("file", annotation.ExternalURL, []string{"https://docs.docker.com/engine/reference/commandline/build/#specify-a-dockerfile--f"})
|
||||
flags.StringVarP(&options.dockerfileName, "file", "f", "", "Name of the Dockerfile (default: `PATH/Dockerfile`)")
|
||||
flags.SetAnnotation("file", "docs.external.url", []string{"https://docs.docker.com/engine/reference/commandline/build/#specify-a-dockerfile--f"})
|
||||
|
||||
flags.StringVar(&options.imageIDFile, "iidfile", "", "Write the image ID to the file")
|
||||
|
||||
flags.StringArrayVar(&options.labels, "label", []string{}, "Set metadata for an image")
|
||||
|
||||
flags.BoolVar(&options.exportLoad, "load", false, `Shorthand for "--output=type=docker"`)
|
||||
flags.BoolVar(&options.exportLoad, "load", false, "Shorthand for `--output=type=docker`")
|
||||
|
||||
flags.StringVar(&options.networkMode, "network", "default", `Set the networking mode for the "RUN" instructions during build`)
|
||||
flags.StringVar(&options.networkMode, "network", "default", "Set the networking mode for the RUN instructions during build")
|
||||
|
||||
flags.StringArrayVar(&options.noCacheFilter, "no-cache-filter", []string{}, "Do not cache specified stages")
|
||||
|
||||
flags.StringArrayVarP(&options.outputs, "output", "o", []string{}, `Output destination (format: "type=local,dest=path")`)
|
||||
flags.StringArrayVarP(&options.outputs, "output", "o", []string{}, "Output destination (format: `type=local,dest=path`)")
|
||||
|
||||
flags.StringArrayVar(&options.platforms, "platform", platformsDefault, "Set target platform for build")
|
||||
|
||||
flags.BoolVar(&options.exportPush, "push", false, `Shorthand for "--output=type=registry"`)
|
||||
flags.BoolVar(&options.exportPush, "push", false, "Shorthand for `--output=type=registry`")
|
||||
|
||||
flags.BoolVarP(&options.quiet, "quiet", "q", false, "Suppress the build output and print image ID on success")
|
||||
|
||||
flags.StringArrayVar(&options.secrets, "secret", []string{}, `Secret to expose to the build (format: "id=mysecret[,src=/local/secret]")`)
|
||||
flags.StringArrayVar(&options.secrets, "secret", []string{}, "Secret file to expose to the build (format: `id=mysecret,src=/local/secret`)")
|
||||
|
||||
flags.Var(&options.shmSize, "shm-size", `Size of "/dev/shm"`)
|
||||
flags.Var(&options.shmSize, "shm-size", "Size of `/dev/shm`")
|
||||
|
||||
flags.StringArrayVar(&options.ssh, "ssh", []string{}, `SSH agent socket or keys to expose to the build (format: "default|<id>[=<socket>|<key>[,<key>]]")`)
|
||||
flags.StringArrayVar(&options.ssh, "ssh", []string{}, "SSH agent socket or keys to expose to the build (format: `default|<id>[=<socket>|<key>[,<key>]]`)")
|
||||
|
||||
flags.StringArrayVarP(&options.tags, "tag", "t", []string{}, `Name and optionally a tag (format: "name:tag")`)
|
||||
flags.SetAnnotation("tag", annotation.ExternalURL, []string{"https://docs.docker.com/engine/reference/commandline/build/#tag-an-image--t"})
|
||||
flags.StringArrayVarP(&options.tags, "tag", "t", []string{}, "Name and optionally a tag (format: `name:tag`)")
|
||||
flags.SetAnnotation("tag", "docs.external.url", []string{"https://docs.docker.com/engine/reference/commandline/build/#tag-an-image--t"})
|
||||
|
||||
flags.StringVar(&options.target, "target", "", "Set the target build stage to build")
|
||||
flags.SetAnnotation("target", annotation.ExternalURL, []string{"https://docs.docker.com/engine/reference/commandline/build/#specifying-target-build-stage---target"})
|
||||
flags.StringVar(&options.target, "target", "", "Set the target build stage to build.")
|
||||
flags.SetAnnotation("target", "docs.external.url", []string{"https://docs.docker.com/engine/reference/commandline/build/#specifying-target-build-stage---target"})
|
||||
|
||||
flags.Var(options.ulimits, "ulimit", "Ulimit options")
|
||||
|
||||
@@ -413,7 +349,7 @@ func buildCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
|
||||
flags.StringVarP(&ignore, "memory", "m", "", "Memory limit")
|
||||
flags.MarkHidden("memory")
|
||||
|
||||
flags.StringVar(&ignore, "memory-swap", "", `Swap limit equal to memory plus swap: "-1" to enable unlimited swap`)
|
||||
flags.StringVar(&ignore, "memory-swap", "", "Swap limit equal to memory plus swap: `-1` to enable unlimited swap")
|
||||
flags.MarkHidden("memory-swap")
|
||||
|
||||
flags.Int64VarP(&ignoreInt, "cpu-shares", "c", 0, "CPU shares (relative weight)")
|
||||
@@ -425,10 +361,10 @@ func buildCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
|
||||
flags.Int64Var(&ignoreInt, "cpu-quota", 0, "Limit the CPU CFS (Completely Fair Scheduler) quota")
|
||||
flags.MarkHidden("cpu-quota")
|
||||
|
||||
flags.StringVar(&ignore, "cpuset-cpus", "", `CPUs in which to allow execution ("0-3", "0,1")`)
|
||||
flags.StringVar(&ignore, "cpuset-cpus", "", "CPUs in which to allow execution (`0-3`, `0,1`)")
|
||||
flags.MarkHidden("cpuset-cpus")
|
||||
|
||||
flags.StringVar(&ignore, "cpuset-mems", "", `MEMs in which to allow execution ("0-3", "0,1")`)
|
||||
flags.StringVar(&ignore, "cpuset-mems", "", "MEMs in which to allow execution (`0-3`, `0,1`)")
|
||||
flags.MarkHidden("cpuset-mems")
|
||||
|
||||
flags.BoolVar(&ignoreBool, "rm", true, "Remove intermediate containers after a successful build")
|
||||
@@ -443,8 +379,8 @@ func buildCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
|
||||
|
||||
func commonBuildFlags(options *commonOptions, flags *pflag.FlagSet) {
|
||||
options.noCache = flags.Bool("no-cache", false, "Do not use cache when building the image")
|
||||
flags.StringVar(&options.progress, "progress", "auto", `Set type of progress output ("auto", "plain", "tty"). Use plain to show container output`)
|
||||
options.pull = flags.Bool("pull", false, "Always attempt to pull all referenced images")
|
||||
flags.StringVar(&options.progress, "progress", "auto", "Set type of progress output (`auto`, `plain`, `tty`). Use plain to show container output")
|
||||
options.pull = flags.Bool("pull", false, "Always attempt to pull a newer version of the image")
|
||||
flags.StringVar(&options.metadataFile, "metadata-file", "", "Write build result metadata to the file")
|
||||
}
|
||||
|
||||
@@ -456,6 +392,7 @@ func checkWarnedFlags(f *pflag.Flag) {
|
||||
switch t {
|
||||
case "flag-warn":
|
||||
logrus.Warn(m[0])
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -479,80 +416,3 @@ func listToMap(values []string, defaultEnv bool) map[string]string {
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
func parseContextNames(values []string) (map[string]build.NamedContext, error) {
|
||||
if len(values) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
result := make(map[string]build.NamedContext, len(values))
|
||||
for _, value := range values {
|
||||
kv := strings.SplitN(value, "=", 2)
|
||||
if len(kv) != 2 {
|
||||
return nil, errors.Errorf("invalid context value: %s, expected key=value", value)
|
||||
}
|
||||
named, err := reference.ParseNormalizedNamed(kv[0])
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "invalid context name %s", kv[0])
|
||||
}
|
||||
name := strings.TrimSuffix(reference.FamiliarString(named), ":latest")
|
||||
result[name] = build.NamedContext{Path: kv[1]}
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func writeMetadataFile(filename string, dt interface{}) error {
|
||||
b, err := json.MarshalIndent(dt, "", " ")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return ioutils.AtomicWriteFile(filename, b, 0644)
|
||||
}
|
||||
|
||||
func decodeExporterResponse(exporterResponse map[string]string) map[string]interface{} {
|
||||
out := make(map[string]interface{})
|
||||
for k, v := range exporterResponse {
|
||||
dt, err := base64.StdEncoding.DecodeString(v)
|
||||
if err != nil {
|
||||
out[k] = v
|
||||
continue
|
||||
}
|
||||
var raw map[string]interface{}
|
||||
if err = json.Unmarshal(dt, &raw); err != nil || len(raw) == 0 {
|
||||
out[k] = v
|
||||
continue
|
||||
}
|
||||
out[k] = json.RawMessage(dt)
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func wrapBuildError(err error, bake bool) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
st, ok := grpcerrors.AsGRPCStatus(err)
|
||||
if ok {
|
||||
if st.Code() == codes.Unimplemented && strings.Contains(st.Message(), "unsupported frontend capability moby.buildkit.frontend.contexts") {
|
||||
msg := "current frontend does not support --build-context."
|
||||
if bake {
|
||||
msg = "current frontend does not support defining additional contexts for targets."
|
||||
}
|
||||
msg += " Named contexts are supported since Dockerfile v1.4. Use #syntax directive in Dockerfile or update to latest BuildKit."
|
||||
return &wrapped{err, msg}
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
type wrapped struct {
|
||||
err error
|
||||
msg string
|
||||
}
|
||||
|
||||
func (w *wrapped) Error() string {
|
||||
return w.msg
|
||||
}
|
||||
|
||||
func (w *wrapped) Unwrap() error {
|
||||
return w.err
|
||||
}
|
||||
|
@@ -212,7 +212,7 @@ func createCmd(dockerCli command.Cli) *cobra.Command {
|
||||
if len(drivers.String()) > 0 {
|
||||
drivers.WriteString(", ")
|
||||
}
|
||||
drivers.WriteString(fmt.Sprintf(`"%s"`, d.Name()))
|
||||
drivers.WriteString(fmt.Sprintf("`%s`", d.Name()))
|
||||
}
|
||||
|
||||
cmd := &cobra.Command{
|
||||
|
@@ -4,18 +4,16 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"strings"
|
||||
"text/tabwriter"
|
||||
"time"
|
||||
|
||||
"github.com/docker/buildx/build"
|
||||
"github.com/docker/cli/cli"
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/cli/opts"
|
||||
"github.com/docker/go-units"
|
||||
"github.com/moby/buildkit/client"
|
||||
"github.com/moby/buildkit/util/appcontext"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/tonistiigi/units"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
@@ -127,20 +125,20 @@ func printKV(w io.Writer, k string, v interface{}) {
|
||||
func printVerbose(tw *tabwriter.Writer, du []*client.UsageInfo) {
|
||||
for _, di := range du {
|
||||
printKV(tw, "ID", di.ID)
|
||||
if len(di.Parents) != 0 {
|
||||
printKV(tw, "Parent", strings.Join(di.Parents, ","))
|
||||
if di.Parent != "" {
|
||||
printKV(tw, "Parent", di.Parent)
|
||||
}
|
||||
printKV(tw, "Created at", di.CreatedAt)
|
||||
printKV(tw, "Mutable", di.Mutable)
|
||||
printKV(tw, "Reclaimable", !di.InUse)
|
||||
printKV(tw, "Shared", di.Shared)
|
||||
printKV(tw, "Size", units.HumanSize(float64(di.Size)))
|
||||
printKV(tw, "Size", fmt.Sprintf("%.2f", units.Bytes(di.Size)))
|
||||
if di.Description != "" {
|
||||
printKV(tw, "Description", di.Description)
|
||||
}
|
||||
printKV(tw, "Usage count", di.UsageCount)
|
||||
if di.LastUsedAt != nil {
|
||||
printKV(tw, "Last used", units.HumanDuration(time.Since(*di.LastUsedAt))+" ago")
|
||||
printKV(tw, "Last used", di.LastUsedAt)
|
||||
}
|
||||
if di.RecordType != "" {
|
||||
printKV(tw, "Type", di.RecordType)
|
||||
@@ -161,15 +159,11 @@ func printTableRow(tw *tabwriter.Writer, di *client.UsageInfo) {
|
||||
if di.Mutable {
|
||||
id += "*"
|
||||
}
|
||||
size := units.HumanSize(float64(di.Size))
|
||||
size := fmt.Sprintf("%.2f", units.Bytes(di.Size))
|
||||
if di.Shared {
|
||||
size += "*"
|
||||
}
|
||||
lastAccessed := ""
|
||||
if di.LastUsedAt != nil {
|
||||
lastAccessed = units.HumanDuration(time.Since(*di.LastUsedAt)) + " ago"
|
||||
}
|
||||
fmt.Fprintf(tw, "%-40s\t%-5v\t%-10s\t%s\n", id, !di.InUse, size, lastAccessed)
|
||||
fmt.Fprintf(tw, "%-71s\t%-11v\t%s\t\n", id, !di.InUse, size)
|
||||
}
|
||||
|
||||
func printSummary(tw *tabwriter.Writer, dus [][]*client.UsageInfo) {
|
||||
@@ -192,11 +186,11 @@ func printSummary(tw *tabwriter.Writer, dus [][]*client.UsageInfo) {
|
||||
}
|
||||
|
||||
if shared > 0 {
|
||||
fmt.Fprintf(tw, "Shared:\t%s\n", units.HumanSize(float64(shared)))
|
||||
fmt.Fprintf(tw, "Private:\t%s\n", units.HumanSize(float64(total-shared)))
|
||||
fmt.Fprintf(tw, "Shared:\t%.2f\n", units.Bytes(shared))
|
||||
fmt.Fprintf(tw, "Private:\t%.2f\n", units.Bytes(total-shared))
|
||||
}
|
||||
|
||||
fmt.Fprintf(tw, "Reclaimable:\t%s\n", units.HumanSize(float64(reclaimable)))
|
||||
fmt.Fprintf(tw, "Total:\t%s\n", units.HumanSize(float64(total)))
|
||||
fmt.Fprintf(tw, "Reclaimable:\t%.2f\n", units.Bytes(reclaimable))
|
||||
fmt.Fprintf(tw, "Total:\t%.2f\n", units.Bytes(total))
|
||||
tw.Flush()
|
||||
}
|
||||
|
@@ -1,30 +1,28 @@
|
||||
package commands
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/containerd/containerd/images"
|
||||
"github.com/docker/buildx/store"
|
||||
"github.com/docker/buildx/store/storeutil"
|
||||
"github.com/docker/buildx/util/imagetools"
|
||||
"github.com/docker/cli-docs-tool/annotation"
|
||||
"github.com/docker/cli/cli"
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/moby/buildkit/util/appcontext"
|
||||
"github.com/pkg/errors"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
type inspectOptions struct {
|
||||
builder string
|
||||
format string
|
||||
raw bool
|
||||
builder string
|
||||
}
|
||||
|
||||
func runInspect(dockerCli command.Cli, in inspectOptions, name string) error {
|
||||
ctx := appcontext.Context()
|
||||
|
||||
if in.format != "" && in.raw {
|
||||
return errors.Errorf("format and raw cannot be used together")
|
||||
}
|
||||
|
||||
txn, release, err := storeutil.GetStore(dockerCli)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -49,13 +47,28 @@ func runInspect(dockerCli command.Cli, in inspectOptions, name string) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
r := imagetools.New(imageopt)
|
||||
|
||||
p, err := imagetools.NewPrinter(ctx, imageopt, name, in.format)
|
||||
dt, desc, err := r.Get(ctx, name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return p.Print(in.raw, dockerCli.Out())
|
||||
if in.raw {
|
||||
fmt.Printf("%s", dt) // avoid newline to keep digest
|
||||
return nil
|
||||
}
|
||||
|
||||
switch desc.MediaType {
|
||||
// case images.MediaTypeDockerSchema2Manifest, specs.MediaTypeImageManifest:
|
||||
// TODO: handle distribution manifest and schema1
|
||||
case images.MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageIndex:
|
||||
return imagetools.PrintManifestList(dt, desc, name, os.Stdout)
|
||||
default:
|
||||
fmt.Printf("%s\n", dt)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func inspectCmd(dockerCli command.Cli, rootOpts RootOptions) *cobra.Command {
|
||||
@@ -63,7 +76,7 @@ func inspectCmd(dockerCli command.Cli, rootOpts RootOptions) *cobra.Command {
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "inspect [OPTIONS] NAME",
|
||||
Short: "Show details of an image in the registry",
|
||||
Short: "Show details of image in the registry",
|
||||
Args: cli.ExactArgs(1),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
options.builder = rootOpts.Builder
|
||||
@@ -72,11 +85,7 @@ func inspectCmd(dockerCli command.Cli, rootOpts RootOptions) *cobra.Command {
|
||||
}
|
||||
|
||||
flags := cmd.Flags()
|
||||
|
||||
flags.StringVar(&options.format, "format", "", "Format the output using the given Go template")
|
||||
flags.SetAnnotation("format", annotation.DefaultValue, []string{`"{{.Manifest}}"`})
|
||||
|
||||
flags.BoolVar(&options.raw, "raw", false, "Show original, unformatted JSON manifest")
|
||||
flags.BoolVar(&options.raw, "raw", false, "Show original JSON manifest")
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
@@ -16,8 +16,8 @@ func RootCmd(dockerCli command.Cli, opts RootOptions) *cobra.Command {
|
||||
}
|
||||
|
||||
cmd.AddCommand(
|
||||
createCmd(dockerCli, opts),
|
||||
inspectCmd(dockerCli, opts),
|
||||
createCmd(dockerCli, opts),
|
||||
)
|
||||
|
||||
return cmd
|
||||
|
@@ -12,11 +12,11 @@ import (
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/cli/opts"
|
||||
"github.com/docker/docker/api/types/filters"
|
||||
"github.com/docker/go-units"
|
||||
"github.com/moby/buildkit/client"
|
||||
"github.com/moby/buildkit/util/appcontext"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/tonistiigi/units"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
@@ -119,7 +119,7 @@ func runPrune(dockerCli command.Cli, opts pruneOptions) error {
|
||||
<-printed
|
||||
|
||||
tw = tabwriter.NewWriter(os.Stdout, 1, 8, 1, '\t', 0)
|
||||
fmt.Fprintf(tw, "Total:\t%s\n", units.HumanSize(float64(total)))
|
||||
fmt.Fprintf(tw, "Total:\t%.2f\n", units.Bytes(total))
|
||||
tw.Flush()
|
||||
return nil
|
||||
}
|
||||
@@ -139,7 +139,7 @@ func pruneCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
|
||||
|
||||
flags := cmd.Flags()
|
||||
flags.BoolVarP(&options.all, "all", "a", false, "Remove all unused images, not just dangling ones")
|
||||
flags.Var(&options.filter, "filter", `Provide filter values (e.g., "until=24h")`)
|
||||
flags.Var(&options.filter, "filter", "Provide filter values (e.g., `until=24h`)")
|
||||
flags.Var(&options.keepStorage, "keep-storage", "Amount of disk space to keep for cache")
|
||||
flags.BoolVar(&options.verbose, "verbose", false, "Provide a more verbose output")
|
||||
flags.BoolVarP(&options.force, "force", "f", false, "Do not prompt for confirmation")
|
||||
|
@@ -2,53 +2,35 @@ package commands
|
||||
|
||||
import (
|
||||
"context"
|
||||
"time"
|
||||
|
||||
"github.com/docker/buildx/store"
|
||||
"github.com/docker/buildx/store/storeutil"
|
||||
"github.com/docker/cli/cli"
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/moby/buildkit/util/appcontext"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
type rmOptions struct {
|
||||
builder string
|
||||
keepState bool
|
||||
keepDaemon bool
|
||||
allInactive bool
|
||||
force bool
|
||||
builder string
|
||||
keepState bool
|
||||
}
|
||||
|
||||
const (
|
||||
rmInactiveWarning = `WARNING! This will remove all builders that are not in running state. Are you sure you want to continue?`
|
||||
)
|
||||
|
||||
func runRm(dockerCli command.Cli, in rmOptions) error {
|
||||
ctx := appcontext.Context()
|
||||
|
||||
if in.allInactive && !in.force && !command.PromptForConfirmation(dockerCli.In(), dockerCli.Out(), rmInactiveWarning) {
|
||||
return nil
|
||||
}
|
||||
|
||||
txn, release, err := storeutil.GetStore(dockerCli)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer release()
|
||||
|
||||
if in.allInactive {
|
||||
return rmAllInactive(ctx, txn, dockerCli, in)
|
||||
}
|
||||
|
||||
if in.builder != "" {
|
||||
ng, err := storeutil.GetNodeGroup(txn, dockerCli, in.builder)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err1 := rm(ctx, dockerCli, in, ng)
|
||||
err1 := rm(ctx, dockerCli, ng, in.keepState)
|
||||
if err := txn.Remove(ng.Name); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -60,7 +42,7 @@ func runRm(dockerCli command.Cli, in rmOptions) error {
|
||||
return err
|
||||
}
|
||||
if ng != nil {
|
||||
err1 := rm(ctx, dockerCli, in, ng)
|
||||
err1 := rm(ctx, dockerCli, ng, in.keepState)
|
||||
if err := txn.Remove(ng.Name); err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -80,9 +62,6 @@ func rmCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
options.builder = rootOpts.builder
|
||||
if len(args) > 0 {
|
||||
if options.allInactive {
|
||||
return errors.New("cannot specify builder name when --all-inactive is set")
|
||||
}
|
||||
options.builder = args[0]
|
||||
}
|
||||
return runRm(dockerCli, options)
|
||||
@@ -91,30 +70,23 @@ func rmCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
|
||||
|
||||
flags := cmd.Flags()
|
||||
flags.BoolVar(&options.keepState, "keep-state", false, "Keep BuildKit state")
|
||||
flags.BoolVar(&options.keepDaemon, "keep-daemon", false, "Keep the buildkitd daemon running")
|
||||
flags.BoolVar(&options.allInactive, "all-inactive", false, "Remove all inactive builders")
|
||||
flags.BoolVarP(&options.force, "force", "f", false, "Do not prompt for confirmation")
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func rm(ctx context.Context, dockerCli command.Cli, in rmOptions, ng *store.NodeGroup) error {
|
||||
func rm(ctx context.Context, dockerCli command.Cli, ng *store.NodeGroup, keepState bool) error {
|
||||
dis, err := driversForNodeGroup(ctx, dockerCli, ng, "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, di := range dis {
|
||||
if di.Driver == nil {
|
||||
continue
|
||||
}
|
||||
// Do not stop the buildkitd daemon when --keep-daemon is provided
|
||||
if !in.keepDaemon {
|
||||
if di.Driver != nil {
|
||||
if err := di.Driver.Stop(ctx, true); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if err := di.Driver.Rm(ctx, true, !in.keepState, !in.keepDaemon); err != nil {
|
||||
return err
|
||||
if err := di.Driver.Rm(ctx, true, !keepState); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if di.Err != nil {
|
||||
err = di.Err
|
||||
@@ -122,42 +94,3 @@ func rm(ctx context.Context, dockerCli command.Cli, in rmOptions, ng *store.Node
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func rmAllInactive(ctx context.Context, txn *store.Txn, dockerCli command.Cli, in rmOptions) error {
|
||||
ctx, cancel := context.WithTimeout(ctx, 20*time.Second)
|
||||
defer cancel()
|
||||
|
||||
ll, err := txn.List()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
builders := make([]*nginfo, len(ll))
|
||||
for i, ng := range ll {
|
||||
builders[i] = &nginfo{ng: ng}
|
||||
}
|
||||
|
||||
eg, _ := errgroup.WithContext(ctx)
|
||||
for _, b := range builders {
|
||||
func(b *nginfo) {
|
||||
eg.Go(func() error {
|
||||
if err := loadNodeGroupData(ctx, dockerCli, b); err != nil {
|
||||
return errors.Wrapf(err, "cannot load %s", b.ng.Name)
|
||||
}
|
||||
if b.ng.Dynamic {
|
||||
return nil
|
||||
}
|
||||
if b.inactive() {
|
||||
rmerr := rm(ctx, dockerCli, in, b.ng)
|
||||
if err := txn.Remove(b.ng.Name); err != nil {
|
||||
return err
|
||||
}
|
||||
return rmerr
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}(b)
|
||||
}
|
||||
|
||||
return eg.Wait()
|
||||
}
|
||||
|
@@ -4,11 +4,8 @@ import (
|
||||
"os"
|
||||
|
||||
imagetoolscmd "github.com/docker/buildx/commands/imagetools"
|
||||
"github.com/docker/buildx/util/logutil"
|
||||
"github.com/docker/cli-docs-tool/annotation"
|
||||
"github.com/docker/cli/cli-plugins/plugin"
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/pflag"
|
||||
)
|
||||
@@ -18,9 +15,6 @@ func NewRootCmd(name string, isPlugin bool, dockerCli command.Cli) *cobra.Comman
|
||||
Short: "Docker Buildx",
|
||||
Long: `Extended build capabilities with BuildKit`,
|
||||
Use: name,
|
||||
Annotations: map[string]string{
|
||||
annotation.CodeDelimiter: `"`,
|
||||
},
|
||||
}
|
||||
if isPlugin {
|
||||
cmd.PersistentPreRunE = func(cmd *cobra.Command, args []string) error {
|
||||
@@ -28,26 +22,6 @@ func NewRootCmd(name string, isPlugin bool, dockerCli command.Cli) *cobra.Comman
|
||||
}
|
||||
}
|
||||
|
||||
logrus.SetFormatter(&logutil.Formatter{})
|
||||
|
||||
logrus.AddHook(logutil.NewFilter([]logrus.Level{
|
||||
logrus.DebugLevel,
|
||||
},
|
||||
"serving grpc connection",
|
||||
"stopping session",
|
||||
"using default config store",
|
||||
))
|
||||
|
||||
// filter out useless commandConn.CloseWrite warning message that can occur
|
||||
// when listing builder instances with "buildx ls" for those that are
|
||||
// unreachable: "commandConn.CloseWrite: commandconn: failed to wait: signal: killed"
|
||||
// https://github.com/docker/cli/blob/3fb4fb83dfb5db0c0753a8316f21aea54dab32c5/cli/connhelper/commandconn/commandconn.go#L203-L214
|
||||
logrus.AddHook(logutil.NewFilter([]logrus.Level{
|
||||
logrus.WarnLevel,
|
||||
},
|
||||
"commandConn.CloseWrite:",
|
||||
))
|
||||
|
||||
addCommands(cmd, dockerCli)
|
||||
return cmd
|
||||
}
|
||||
|
@@ -8,13 +8,13 @@ import (
|
||||
|
||||
"github.com/docker/buildx/build"
|
||||
"github.com/docker/buildx/driver"
|
||||
ctxkube "github.com/docker/buildx/driver/kubernetes/context"
|
||||
"github.com/docker/buildx/store"
|
||||
"github.com/docker/buildx/store/storeutil"
|
||||
"github.com/docker/buildx/util/platformutil"
|
||||
"github.com/docker/buildx/util/progress"
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/cli/cli/context/docker"
|
||||
"github.com/docker/cli/cli/context/kubernetes"
|
||||
ctxstore "github.com/docker/cli/cli/context/store"
|
||||
dopts "github.com/docker/cli/opts"
|
||||
dockerclient "github.com/docker/docker/client"
|
||||
@@ -73,9 +73,8 @@ func driversForNodeGroup(ctx context.Context, dockerCli command.Cli, ng *store.N
|
||||
func(i int, n store.Node) {
|
||||
eg.Go(func() error {
|
||||
di := build.DriverInfo{
|
||||
Name: n.Name,
|
||||
Platform: n.Platforms,
|
||||
ProxyConfig: storeutil.GetProxyConfig(dockerCli),
|
||||
Name: n.Name,
|
||||
Platform: n.Platforms,
|
||||
}
|
||||
defer func() {
|
||||
dis[i] = di
|
||||
@@ -150,7 +149,7 @@ func configFromContext(endpointName string, s ctxstore.Reader) (clientcmd.Client
|
||||
}
|
||||
return clientcmd.NewDefaultClientConfig(*apiConfig, &clientcmd.ConfigOverrides{}), nil
|
||||
}
|
||||
return ctxkube.ConfigFromContext(endpointName, s)
|
||||
return kubernetes.ConfigFromContext(endpointName, s)
|
||||
}
|
||||
|
||||
// clientForEndpoint returns a docker client for an endpoint
|
||||
@@ -265,10 +264,9 @@ func getDefaultDrivers(ctx context.Context, dockerCli command.Cli, defaultOnly b
|
||||
}
|
||||
return []build.DriverInfo{
|
||||
{
|
||||
Name: "default",
|
||||
Driver: d,
|
||||
ImageOpt: imageopt,
|
||||
ProxyConfig: storeutil.GetProxyConfig(dockerCli),
|
||||
Name: "default",
|
||||
Driver: d,
|
||||
ImageOpt: imageopt,
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
@@ -292,7 +290,9 @@ func loadInfoData(ctx context.Context, d *dinfo) error {
|
||||
return errors.Wrap(err, "listing workers")
|
||||
}
|
||||
for _, w := range workers {
|
||||
d.platforms = append(d.platforms, w.Platforms...)
|
||||
for _, p := range w.Platforms {
|
||||
d.platforms = append(d.platforms, p)
|
||||
}
|
||||
}
|
||||
d.platforms = platformutil.Dedupe(d.platforms)
|
||||
}
|
||||
@@ -391,17 +391,6 @@ type nginfo struct {
|
||||
err error
|
||||
}
|
||||
|
||||
// inactive checks if all nodes are inactive for this builder
|
||||
func (n *nginfo) inactive() bool {
|
||||
for idx := range n.ng.Nodes {
|
||||
d := n.drivers[idx]
|
||||
if d.info != nil && d.info.Status == driver.Running {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func boot(ctx context.Context, ngi *nginfo) (bool, error) {
|
||||
toBoot := make([]int, 0, len(ngi.drivers))
|
||||
for i, d := range ngi.drivers {
|
||||
@@ -416,7 +405,7 @@ func boot(ctx context.Context, ngi *nginfo) (bool, error) {
|
||||
return false, nil
|
||||
}
|
||||
|
||||
printer := progress.NewPrinter(context.TODO(), os.Stderr, os.Stderr, "auto")
|
||||
printer := progress.NewPrinter(context.TODO(), os.Stderr, "auto")
|
||||
|
||||
baseCtx := ctx
|
||||
eg, _ := errgroup.WithContext(ctx)
|
||||
|
@@ -89,11 +89,6 @@ target "mod-outdated" {
|
||||
inherits = ["_common"]
|
||||
dockerfile = "./hack/dockerfiles/vendor.Dockerfile"
|
||||
target = "outdated"
|
||||
args = {
|
||||
// used to invalidate cache for outdated run stage
|
||||
// can be dropped when https://github.com/moby/buildkit/issues/1213 fixed
|
||||
_RANDOM = uuidv4()
|
||||
}
|
||||
output = ["type=cacheonly"]
|
||||
}
|
||||
|
||||
|
@@ -1,48 +0,0 @@
|
||||
# CI/CD
|
||||
|
||||
## GitHub Actions
|
||||
|
||||
Docker provides a [GitHub Action that will build and push your image](https://github.com/docker/build-push-action/#about)
|
||||
using Buildx. Here is a simple workflow:
|
||||
|
||||
```yaml
|
||||
name: ci
|
||||
|
||||
on:
|
||||
push:
|
||||
branches:
|
||||
- 'main'
|
||||
|
||||
jobs:
|
||||
docker:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
-
|
||||
name: Set up QEMU
|
||||
uses: docker/setup-qemu-action@v1
|
||||
-
|
||||
name: Set up Docker Buildx
|
||||
uses: docker/setup-buildx-action@v1
|
||||
-
|
||||
name: Login to DockerHub
|
||||
uses: docker/login-action@v1
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
-
|
||||
name: Build and push
|
||||
uses: docker/build-push-action@v2
|
||||
with:
|
||||
push: true
|
||||
tags: user/app:latest
|
||||
```
|
||||
|
||||
In this example we are also using 3 other actions:
|
||||
|
||||
* [`setup-buildx`](https://github.com/docker/setup-buildx-action) action will create and boot a builder using by
|
||||
default the `docker-container` [builder driver](../reference/buildx_create.md#driver).
|
||||
This is **not required but recommended** using it to be able to build multi-platform images, export cache, etc.
|
||||
* [`setup-qemu`](https://github.com/docker/setup-qemu-action) action can be useful if you want
|
||||
to add emulation support with QEMU to be able to build against more platforms.
|
||||
* [`login`](https://github.com/docker/login-action) action will take care to log
|
||||
in against a Docker registry.
|
@@ -1,23 +0,0 @@
|
||||
# CNI networking
|
||||
|
||||
It can be useful to use a bridge network for your builder if for example you
|
||||
encounter a network port contention during multiple builds. If you're using
|
||||
the BuildKit image, CNI is not yet available in it, but you can create
|
||||
[a custom BuildKit image with CNI support](https://github.com/moby/buildkit/blob/master/docs/cni-networking.md).
|
||||
|
||||
Now build this image:
|
||||
|
||||
```console
|
||||
$ docker buildx build --tag buildkit-cni:local --load .
|
||||
```
|
||||
|
||||
Then [create a `docker-container` builder](../reference/buildx_create.md) that
|
||||
will use this image:
|
||||
|
||||
```console
|
||||
$ docker buildx create --use \
|
||||
--name mybuilder \
|
||||
--driver docker-container \
|
||||
--driver-opt "image=buildkit-cni:local" \
|
||||
--buildkitd-flags "--oci-worker-net=cni"
|
||||
```
|
@@ -1,48 +0,0 @@
|
||||
# Using a custom network
|
||||
|
||||
[Create a network](https://docs.docker.com/engine/reference/commandline/network_create/)
|
||||
named `foonet`:
|
||||
|
||||
```console
|
||||
$ docker network create foonet
|
||||
```
|
||||
|
||||
[Create a `docker-container` builder](../reference/buildx_create.md) named
|
||||
`mybuilder` that will use this network:
|
||||
|
||||
```console
|
||||
$ docker buildx create --use \
|
||||
--name mybuilder \
|
||||
--driver docker-container \
|
||||
--driver-opt "network=foonet"
|
||||
```
|
||||
|
||||
Boot and [inspect `mybuilder`](../reference/buildx_inspect.md):
|
||||
|
||||
```console
|
||||
$ docker buildx inspect --bootstrap
|
||||
```
|
||||
|
||||
[Inspect the builder container](https://docs.docker.com/engine/reference/commandline/inspect/)
|
||||
and see what network is being used:
|
||||
|
||||
```console
|
||||
$ docker inspect buildx_buildkit_mybuilder0 --format={{.NetworkSettings.Networks}}
|
||||
map[foonet:0xc00018c0c0]
|
||||
```
|
||||
|
||||
## What's `buildx_buildkit_mybuilder0`?
|
||||
|
||||
`buildx_buildkit_mybuilder0` is the container name. It can be broken down like this:
|
||||
|
||||
* `buildx_buildkit_` is a hardcoded prefix
|
||||
* `mybuilder0` is the name of the node (defaults to builder name + position in the list of nodes)
|
||||
|
||||
```console
|
||||
$ docker buildx ls
|
||||
NAME/NODE DRIVER/ENDPOINT STATUS PLATFORMS
|
||||
mybuilder * docker-container
|
||||
mybuilder0 unix:///var/run/docker.sock running linux/amd64, linux/arm64, linux/riscv64, linux/ppc64le, linux/s390x, linux/386, linux/mips64le, linux/mips64, linux/arm/v7, linux/arm/v6
|
||||
default docker
|
||||
default default running linux/amd64, linux/arm64, linux/riscv64, linux/ppc64le, linux/s390x, linux/386, linux/arm/v7, linux/arm/v6
|
||||
```
|
@@ -1,63 +0,0 @@
|
||||
# Using a custom registry configuration
|
||||
|
||||
If you [create a `docker-container` or `kubernetes` builder](../reference/buildx_create.md) and
|
||||
have specified certificates for registries in the [BuildKit daemon configuration](https://github.com/moby/buildkit/blob/master/docs/buildkitd.toml.md),
|
||||
the files will be copied into the container under `/etc/buildkit/certs` and
|
||||
configuration will be updated to reflect that.
|
||||
|
||||
Take the following `buildkitd.toml` configuration that will be used for
|
||||
pushing an image to this registry using self-signed certificates:
|
||||
|
||||
```toml"
|
||||
debug = true
|
||||
[registry."myregistry.com"]
|
||||
ca=["/etc/certs/myregistry.pem"]
|
||||
[[registry."myregistry.com".keypair]]
|
||||
key="/etc/certs/myregistry_key.pem"
|
||||
cert="/etc/certs/myregistry_cert.pem"
|
||||
```
|
||||
> `/etc/buildkitd.toml`
|
||||
|
||||
Here we have configured a self-signed certificate for `myregistry.com` registry.
|
||||
|
||||
Now [create a `docker-container` builder](../reference/buildx_create.md)
|
||||
that will use this BuildKit configuration:
|
||||
|
||||
```console
|
||||
$ docker buildx create --use \
|
||||
--name mybuilder \
|
||||
--driver docker-container \
|
||||
--config /etc/buildkitd.toml
|
||||
```
|
||||
|
||||
Inspecting the builder container, you can see that buildkitd configuration
|
||||
has changed:
|
||||
|
||||
```console
|
||||
$ docker exec -it buildx_buildkit_mybuilder0 cat /etc/buildkit/buildkitd.toml
|
||||
```
|
||||
```toml
|
||||
debug = true
|
||||
|
||||
[registry]
|
||||
|
||||
[registry."myregistry.com"]
|
||||
ca = ["/etc/buildkit/certs/myregistry.com/myregistry.pem"]
|
||||
|
||||
[[registry."myregistry.com".keypair]]
|
||||
cert = "/etc/buildkit/certs/myregistry.com/myregistry_cert.pem"
|
||||
key = "/etc/buildkit/certs/myregistry.com/myregistry_key.pem"
|
||||
```
|
||||
|
||||
And certificates copied inside the container:
|
||||
|
||||
```console
|
||||
$ docker exec -it buildx_buildkit_mybuilder0 ls /etc/buildkit/certs/myregistry.com/
|
||||
myregistry.pem myregistry_cert.pem myregistry_key.pem
|
||||
```
|
||||
|
||||
Now you should be able to push to the registry with this builder:
|
||||
|
||||
```console
|
||||
$ docker buildx build --push --tag myregistry.com/myimage:latest .
|
||||
```
|
@@ -1,31 +0,0 @@
|
||||
# OpenTelemetry support
|
||||
|
||||
To capture the trace to [Jaeger](https://github.com/jaegertracing/jaeger), set
|
||||
`JAEGER_TRACE` environment variable to the collection address using a `driver-opt`.
|
||||
|
||||
First create a Jaeger container:
|
||||
|
||||
```console
|
||||
$ docker run -d --name jaeger -p "6831:6831/udp" -p "16686:16686" jaegertracing/all-in-one
|
||||
```
|
||||
|
||||
Then [create a `docker-container` builder](../reference/buildx_create.md)
|
||||
that will use the Jaeger instance via the `JAEGER_TRACE` env var:
|
||||
|
||||
```console
|
||||
$ docker buildx create --use \
|
||||
--name mybuilder \
|
||||
--driver docker-container \
|
||||
--driver-opt "network=host" \
|
||||
--driver-opt "env.JAEGER_TRACE=localhost:6831"
|
||||
```
|
||||
|
||||
Boot and [inspect `mybuilder`](../reference/buildx_inspect.md):
|
||||
|
||||
```console
|
||||
$ docker buildx inspect --bootstrap
|
||||
```
|
||||
|
||||
Buildx commands should be traced at `http://127.0.0.1:16686/`:
|
||||
|
||||

|
@@ -1,60 +0,0 @@
|
||||
# Registry mirror
|
||||
|
||||
You can define a registry mirror to use for your builds by providing a [BuildKit daemon configuration](https://github.com/moby/buildkit/blob/master/docs/buildkitd.toml.md)
|
||||
while creating a builder with the [`--config` flags](../reference/buildx_create.md#config).
|
||||
|
||||
```toml
|
||||
debug = true
|
||||
[registry."docker.io"]
|
||||
mirrors = ["mirror.gcr.io"]
|
||||
```
|
||||
> `/etc/buildkitd.toml`
|
||||
|
||||
> :information_source: `debug = true` has been added to be able to debug requests
|
||||
in the BuildKit daemon and see if the mirror is effectively used.
|
||||
|
||||
Then [create a `docker-container` builder](../reference/buildx_create.md)
|
||||
that will use this BuildKit configuration:
|
||||
|
||||
```console
|
||||
$ docker buildx create --use \
|
||||
--name mybuilder \
|
||||
--driver docker-container \
|
||||
--config /etc/buildkitd.toml
|
||||
```
|
||||
|
||||
Boot and [inspect `mybuilder`](../reference/buildx_inspect.md):
|
||||
|
||||
```console
|
||||
$ docker buildx inspect --bootstrap
|
||||
```
|
||||
|
||||
Build an image:
|
||||
|
||||
```console
|
||||
$ docker buildx build --load . -f-<<EOF
|
||||
FROM alpine
|
||||
RUN echo "hello world"
|
||||
EOF
|
||||
```
|
||||
|
||||
Now let's check the BuildKit logs in the builder container:
|
||||
|
||||
```console
|
||||
$ docker logs buildx_buildkit_mybuilder0
|
||||
```
|
||||
```text
|
||||
...
|
||||
time="2022-02-06T17:47:48Z" level=debug msg="do request" request.header.accept="application/vnd.docker.container.image.v1+json, */*" request.header.user-agent=containerd/1.5.8+unknown request.method=GET spanID=9460e5b6e64cec91 traceID=b162d3040ddf86d6614e79c66a01a577
|
||||
time="2022-02-06T17:47:48Z" level=debug msg="fetch response received" response.header.accept-ranges=bytes response.header.age=1356 response.header.alt-svc="h3=\":443\"; ma=2592000,h3-29=\":443\"; ma=2592000,h3-Q050=\":443\"; ma=2592000,h3-Q046=\":443\"; ma=2592000,h3-Q043=\":443\"; ma=2592000,quic=\":443\"; ma=2592000; v=\"46,43\"" response.header.cache-control="public, max-age=3600" response.header.content-length=1469 response.header.content-type=application/octet-stream response.header.date="Sun, 06 Feb 2022 17:25:17 GMT" response.header.etag="\"774380abda8f4eae9a149e5d5d3efc83\"" response.header.expires="Sun, 06 Feb 2022 18:25:17 GMT" response.header.last-modified="Wed, 24 Nov 2021 21:07:57 GMT" response.header.server=UploadServer response.header.x-goog-generation=1637788077652182 response.header.x-goog-hash="crc32c=V3DSrg==" response.header.x-goog-hash.1="md5=d0OAq9qPTq6aFJ5dXT78gw==" response.header.x-goog-metageneration=1 response.header.x-goog-storage-class=STANDARD response.header.x-goog-stored-content-encoding=identity response.header.x-goog-stored-content-length=1469 response.header.x-guploader-uploadid=ADPycduqQipVAXc3tzXmTzKQ2gTT6CV736B2J628smtD1iDytEyiYCgvvdD8zz9BT1J1sASUq9pW_ctUyC4B-v2jvhIxnZTlKg response.status="200 OK" spanID=9460e5b6e64cec91 traceID=b162d3040ddf86d6614e79c66a01a577
|
||||
time="2022-02-06T17:47:48Z" level=debug msg="fetch response received" response.header.accept-ranges=bytes response.header.age=760 response.header.alt-svc="h3=\":443\"; ma=2592000,h3-29=\":443\"; ma=2592000,h3-Q050=\":443\"; ma=2592000,h3-Q046=\":443\"; ma=2592000,h3-Q043=\":443\"; ma=2592000,quic=\":443\"; ma=2592000; v=\"46,43\"" response.header.cache-control="public, max-age=3600" response.header.content-length=1471 response.header.content-type=application/octet-stream response.header.date="Sun, 06 Feb 2022 17:35:13 GMT" response.header.etag="\"35d688bd15327daafcdb4d4395e616a8\"" response.header.expires="Sun, 06 Feb 2022 18:35:13 GMT" response.header.last-modified="Wed, 24 Nov 2021 21:07:12 GMT" response.header.server=UploadServer response.header.x-goog-generation=1637788032100793 response.header.x-goog-hash="crc32c=aWgRjA==" response.header.x-goog-hash.1="md5=NdaIvRUyfar8201DleYWqA==" response.header.x-goog-metageneration=1 response.header.x-goog-storage-class=STANDARD response.header.x-goog-stored-content-encoding=identity response.header.x-goog-stored-content-length=1471 response.header.x-guploader-uploadid=ADPycdtR-gJYwC7yHquIkJWFFG8FovDySvtmRnZBqlO3yVDanBXh_VqKYt400yhuf0XbQ3ZMB9IZV2vlcyHezn_Pu3a1SMMtiw response.status="200 OK" spanID=9460e5b6e64cec91 traceID=b162d3040ddf86d6614e79c66a01a577
|
||||
time="2022-02-06T17:47:48Z" level=debug msg=fetch spanID=9460e5b6e64cec91 traceID=b162d3040ddf86d6614e79c66a01a577
|
||||
time="2022-02-06T17:47:48Z" level=debug msg=fetch spanID=9460e5b6e64cec91 traceID=b162d3040ddf86d6614e79c66a01a577
|
||||
time="2022-02-06T17:47:48Z" level=debug msg=fetch spanID=9460e5b6e64cec91 traceID=b162d3040ddf86d6614e79c66a01a577
|
||||
time="2022-02-06T17:47:48Z" level=debug msg=fetch spanID=9460e5b6e64cec91 traceID=b162d3040ddf86d6614e79c66a01a577
|
||||
time="2022-02-06T17:47:48Z" level=debug msg="do request" request.header.accept="application/vnd.docker.image.rootfs.diff.tar.gzip, */*" request.header.user-agent=containerd/1.5.8+unknown request.method=GET spanID=9460e5b6e64cec91 traceID=b162d3040ddf86d6614e79c66a01a577
|
||||
time="2022-02-06T17:47:48Z" level=debug msg="fetch response received" response.header.accept-ranges=bytes response.header.age=1356 response.header.alt-svc="h3=\":443\"; ma=2592000,h3-29=\":443\"; ma=2592000,h3-Q050=\":443\"; ma=2592000,h3-Q046=\":443\"; ma=2592000,h3-Q043=\":443\"; ma=2592000,quic=\":443\"; ma=2592000; v=\"46,43\"" response.header.cache-control="public, max-age=3600" response.header.content-length=2818413 response.header.content-type=application/octet-stream response.header.date="Sun, 06 Feb 2022 17:25:17 GMT" response.header.etag="\"1d55e7be5a77c4a908ad11bc33ebea1c\"" response.header.expires="Sun, 06 Feb 2022 18:25:17 GMT" response.header.last-modified="Wed, 24 Nov 2021 21:07:06 GMT" response.header.server=UploadServer response.header.x-goog-generation=1637788026431708 response.header.x-goog-hash="crc32c=ZojF+g==" response.header.x-goog-hash.1="md5=HVXnvlp3xKkIrRG8M+vqHA==" response.header.x-goog-metageneration=1 response.header.x-goog-storage-class=STANDARD response.header.x-goog-stored-content-encoding=identity response.header.x-goog-stored-content-length=2818413 response.header.x-guploader-uploadid=ADPycdsebqxiTBJqZ0bv9zBigjFxgQydD2ESZSkKchpE0ILlN9Ibko3C5r4fJTJ4UR9ddp-UBd-2v_4eRpZ8Yo2llW_j4k8WhQ response.status="200 OK" spanID=9460e5b6e64cec91 traceID=b162d3040ddf86d6614e79c66a01a577
|
||||
...
|
||||
```
|
||||
|
||||
As you can see, requests come from the GCR registry mirror (`response.header.x-goog*`).
|
@@ -1,33 +0,0 @@
|
||||
# Resource limiting
|
||||
|
||||
## Max parallelism
|
||||
|
||||
You can limit the parallelism of the BuildKit solver, which is particularly useful
|
||||
for low-powered machines, using a [BuildKit daemon configuration](https://github.com/moby/buildkit/blob/master/docs/buildkitd.toml.md)
|
||||
while creating a builder with the [`--config` flags](../reference/buildx_create.md#config).
|
||||
|
||||
```toml
|
||||
[worker.oci]
|
||||
max-parallelism = 4
|
||||
```
|
||||
> `/etc/buildkitd.toml`
|
||||
|
||||
Now you can [create a `docker-container` builder](../reference/buildx_create.md)
|
||||
that will use this BuildKit configuration to limit parallelism.
|
||||
|
||||
```console
|
||||
$ docker buildx create --use \
|
||||
--name mybuilder \
|
||||
--driver docker-container \
|
||||
--config /etc/buildkitd.toml
|
||||
```
|
||||
|
||||
## Limit on TCP connections
|
||||
|
||||
We are also now limiting TCP connections to **4 per registry** with an additional
|
||||
connection not used for layer pulls and pushes. This limitation will be able to
|
||||
manage TCP connection per host to avoid your build being stuck while pulling
|
||||
images. The additional connection is used for metadata requests
|
||||
(image config retrieval) to enhance the overall build time.
|
||||
|
||||
More info: [moby/buildkit#2259](https://github.com/moby/buildkit/pull/2259)
|
@@ -29,9 +29,9 @@ Extended build capabilities with BuildKit
|
||||
|
||||
### Options
|
||||
|
||||
| Name | Type | Default | Description |
|
||||
| --- | --- | --- | --- |
|
||||
| [`--builder`](#builder) | `string` | | Override the configured builder instance |
|
||||
| Name | Description |
|
||||
| --- | --- |
|
||||
| [`--builder string`](#builder) | Override the configured builder instance |
|
||||
|
||||
|
||||
<!---MARKER_GEN_END-->
|
||||
|
@@ -13,18 +13,18 @@ Build from a file
|
||||
|
||||
### Options
|
||||
|
||||
| Name | Type | Default | Description |
|
||||
| --- | --- | --- | --- |
|
||||
| [`--builder`](#builder) | `string` | | Override the configured builder instance |
|
||||
| [`-f`](#file), [`--file`](#file) | `stringArray` | | Build definition file |
|
||||
| `--load` | | | Shorthand for `--set=*.output=type=docker` |
|
||||
| `--metadata-file` | `string` | | Write build result metadata to the file |
|
||||
| [`--no-cache`](#no-cache) | | | Do not use cache when building the image |
|
||||
| [`--print`](#print) | | | Print the options without building |
|
||||
| [`--progress`](#progress) | `string` | `auto` | Set type of progress output (`auto`, `plain`, `tty`). Use plain to show container output |
|
||||
| [`--pull`](#pull) | | | Always attempt to pull all referenced images |
|
||||
| `--push` | | | Shorthand for `--set=*.output=type=registry` |
|
||||
| [`--set`](#set) | `stringArray` | | Override target value (e.g., `targetpattern.key=value`) |
|
||||
| Name | Description |
|
||||
| --- | --- |
|
||||
| [`--builder string`](#builder) | Override the configured builder instance |
|
||||
| [`-f`](#file), [`--file stringArray`](#file) | Build definition file |
|
||||
| `--load` | Shorthand for `--set=*.output=type=docker` |
|
||||
| `--metadata-file string` | Write build result metadata to the file |
|
||||
| [`--no-cache`](#no-cache) | Do not use cache when building the image |
|
||||
| [`--print`](#print) | Print the options without building |
|
||||
| [`--progress string`](#progress) | Set type of progress output (`auto`, `plain`, `tty`). Use plain to show container output |
|
||||
| [`--pull`](#pull) | Always attempt to pull a newer version of the image |
|
||||
| `--push` | Shorthand for `--set=*.output=type=registry` |
|
||||
| [`--set stringArray`](#set) | Override target value (e.g., `targetpattern.key=value`) |
|
||||
|
||||
|
||||
<!---MARKER_GEN_END-->
|
||||
@@ -99,10 +99,10 @@ $ docker buildx bake -f docker-compose.dev.yaml backend database
|
||||
You can also use a remote `git` bake definition:
|
||||
|
||||
```console
|
||||
$ docker buildx bake "https://github.com/docker/cli.git#v20.10.11" --print
|
||||
#1 [internal] load git source https://github.com/docker/cli.git#v20.10.11
|
||||
$ docker buildx bake "git://github.com/docker/cli#v20.10.11" --print
|
||||
#1 [internal] load git source git://github.com/docker/cli#v20.10.11
|
||||
#1 0.745 e8f1871b077b64bcb4a13334b7146492773769f7 refs/tags/v20.10.11
|
||||
#1 2.022 From https://github.com/docker/cli
|
||||
#1 2.022 From git://github.com/docker/cli
|
||||
#1 2.022 * [new tag] v20.10.11 -> v20.10.11
|
||||
#1 DONE 2.9s
|
||||
{
|
||||
@@ -115,7 +115,7 @@ $ docker buildx bake "https://github.com/docker/cli.git#v20.10.11" --print
|
||||
},
|
||||
"target": {
|
||||
"binary": {
|
||||
"context": "https://github.com/docker/cli.git#v20.10.11",
|
||||
"context": "git://github.com/docker/cli#v20.10.11",
|
||||
"dockerfile": "Dockerfile",
|
||||
"args": {
|
||||
"BASE_VARIANT": "alpine",
|
||||
@@ -134,7 +134,7 @@ $ docker buildx bake "https://github.com/docker/cli.git#v20.10.11" --print
|
||||
}
|
||||
```
|
||||
|
||||
As you can see the context is fixed to `https://github.com/docker/cli.git` even if
|
||||
As you can see the context is fixed to `git://github.com/docker/cli` even if
|
||||
[no context is actually defined](https://github.com/docker/cli/blob/2776a6d694f988c0c1df61cad4bfac0f54e481c8/docker-bake.hcl#L17-L26)
|
||||
in the definition.
|
||||
|
||||
@@ -155,7 +155,7 @@ EOT
|
||||
```
|
||||
|
||||
```console
|
||||
$ docker buildx bake "https://github.com/tonistiigi/buildx.git#remote-test" --print
|
||||
$ docker buildx bake "git://github.com/tonistiigi/buildx#remote-test" --print
|
||||
{
|
||||
"target": {
|
||||
"default": {
|
||||
@@ -169,7 +169,7 @@ $ docker buildx bake "https://github.com/tonistiigi/buildx.git#remote-test" --pr
|
||||
|
||||
```console
|
||||
$ touch foo bar
|
||||
$ docker buildx bake "https://github.com/tonistiigi/buildx.git#remote-test"
|
||||
$ docker buildx bake "git://github.com/tonistiigi/buildx#remote-test"
|
||||
...
|
||||
> [4/4] RUN ls -l && stop:
|
||||
#8 0.101 total 0
|
||||
@@ -179,14 +179,14 @@ $ docker buildx bake "https://github.com/tonistiigi/buildx.git#remote-test"
|
||||
```
|
||||
|
||||
```console
|
||||
$ docker buildx bake "https://github.com/tonistiigi/buildx.git#remote-test" "https://github.com/docker/cli.git#v20.10.11" --print
|
||||
#1 [internal] load git source https://github.com/tonistiigi/buildx.git#remote-test
|
||||
$ docker buildx bake "git://github.com/tonistiigi/buildx#remote-test" "git://github.com/docker/cli#v20.10.11" --print
|
||||
#1 [internal] load git source git://github.com/tonistiigi/buildx#remote-test
|
||||
#1 0.429 577303add004dd7efeb13434d69ea030d35f7888 refs/heads/remote-test
|
||||
#1 CACHED
|
||||
{
|
||||
"target": {
|
||||
"default": {
|
||||
"context": "https://github.com/docker/cli.git#v20.10.11",
|
||||
"context": "git://github.com/docker/cli#v20.10.11",
|
||||
"dockerfile": "Dockerfile",
|
||||
"dockerfile-inline": "FROM alpine\nWORKDIR /src\nCOPY . .\nRUN ls -l \u0026\u0026 stop\n"
|
||||
}
|
||||
@@ -195,7 +195,7 @@ $ docker buildx bake "https://github.com/tonistiigi/buildx.git#remote-test" "htt
|
||||
```
|
||||
|
||||
```console
|
||||
$ docker buildx bake "https://github.com/tonistiigi/buildx.git#remote-test" "https://github.com/docker/cli.git#v20.10.11"
|
||||
$ docker buildx bake "git://github.com/tonistiigi/buildx#remote-test" "git://github.com/docker/cli#v20.10.11"
|
||||
...
|
||||
> [4/4] RUN ls -l && stop:
|
||||
#8 0.136 drwxrwxrwx 5 root root 4096 Jul 27 18:31 kubernetes
|
||||
@@ -264,6 +264,7 @@ $ docker buildx bake --progress=plain
|
||||
...
|
||||
```
|
||||
|
||||
|
||||
### <a name="pull"></a> Always attempt to pull a newer version of the image (--pull)
|
||||
|
||||
Same as `build --pull`.
|
||||
@@ -277,6 +278,9 @@ Same as `build --pull`.
|
||||
Override target configurations from command line. The pattern matching syntax
|
||||
is defined in https://golang.org/pkg/path/#Match.
|
||||
|
||||
|
||||
**Examples**
|
||||
|
||||
```console
|
||||
$ docker buildx bake --set target.args.mybuildarg=value
|
||||
$ docker buildx bake --set target.platform=linux/arm64
|
||||
@@ -309,7 +313,8 @@ groups to inherit from.
|
||||
Note: Design of bake command is work in progress, the user experience may change
|
||||
based on feedback.
|
||||
|
||||
HCL definition example:
|
||||
|
||||
**Example HCL definition**
|
||||
|
||||
```hcl
|
||||
group "default" {
|
||||
@@ -334,8 +339,8 @@ target "db" {
|
||||
|
||||
Complete list of valid target fields:
|
||||
|
||||
`args`, `cache-from`, `cache-to`, `context`, `contexts`, `dockerfile`, `inherits`, `labels`,
|
||||
`no-cache`, `no-cache-filter`, `output`, `platform`, `pull`, `secrets`, `ssh`, `tags`, `target`
|
||||
`args`, `cache-from`, `cache-to`, `context`, `dockerfile`, `inherits`, `labels`,
|
||||
`no-cache`, `output`, `platform`, `pull`, `secrets`, `ssh`, `tags`, `target`
|
||||
|
||||
### Global scope attributes
|
||||
|
||||
@@ -445,28 +450,6 @@ target "webapp" {
|
||||
}
|
||||
```
|
||||
|
||||
alternatively, in json format:
|
||||
|
||||
```json
|
||||
{
|
||||
"variable": {
|
||||
"TAG": {
|
||||
"default": "latest"
|
||||
}
|
||||
}
|
||||
"group": {
|
||||
"default": {
|
||||
"targets": ["webapp"]
|
||||
}
|
||||
},
|
||||
"target": {
|
||||
"webapp": {
|
||||
"tags": ["docker.io/username/webapp:${TAG}"]
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
```console
|
||||
$ docker buildx bake --print webapp
|
||||
{
|
||||
@@ -794,77 +777,6 @@ $ docker buildx bake --print app
|
||||
}
|
||||
```
|
||||
|
||||
### Defining additional build contexts and linking targets
|
||||
|
||||
In addition to the main `context` key that defines the build context each target can also define additional named contexts with a map defined with key `contexts`. These values map to the `--build-context` flag in the [build command](buildx_build.md#build-context).
|
||||
|
||||
Inside the Dockerfile these contexts can be used with the `FROM` instruction or `--from` flag.
|
||||
|
||||
The value can be a local source directory, container image (with docker-image:// prefix), Git URL, HTTP URL or a name of another target in the Bake file (with target: prefix).
|
||||
|
||||
#### Pinning alpine image
|
||||
|
||||
```Dockerfile
|
||||
# Dockerfile
|
||||
FROM alpine
|
||||
RUN echo "Hello world"
|
||||
```
|
||||
|
||||
```hcl
|
||||
# docker-bake.hcl
|
||||
target "app" {
|
||||
contexts = {
|
||||
alpine = "docker-image://alpine:3.13"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### Using a secondary source directory
|
||||
|
||||
```Dockerfile
|
||||
# Dockerfile
|
||||
|
||||
FROM scratch AS src
|
||||
|
||||
FROM golang
|
||||
COPY --from=src . .
|
||||
```
|
||||
|
||||
```hcl
|
||||
# docker-bake.hcl
|
||||
target "app" {
|
||||
contexts = {
|
||||
src = "../path/to/source"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### Using a result of one target as a base image in another target
|
||||
|
||||
To use a result of one target as a build context of another, specity the target name with `target:` prefix.
|
||||
|
||||
```Dockerfile
|
||||
# Dockerfile
|
||||
FROM baseapp
|
||||
RUN echo "Hello world"
|
||||
```
|
||||
|
||||
```hcl
|
||||
# docker-bake.hcl
|
||||
|
||||
target "base" {
|
||||
dockerfile = "baseapp.Dockerfile"
|
||||
}
|
||||
|
||||
target "app" {
|
||||
contexts = {
|
||||
baseapp = "target:base"
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Please note that in most cases you should just use a single multi-stage Dockerfile with multiple targets for similar behavior. This case is recommended when you have multiple Dockerfiles that can't be easily merged into one.
|
||||
|
||||
### Extension field with Compose
|
||||
|
||||
[Special extension](https://github.com/compose-spec/compose-spec/blob/master/spec.md#extension)
|
||||
@@ -976,7 +888,7 @@ $ docker buildx bake --print
|
||||
Complete list of valid fields for `x-bake`:
|
||||
|
||||
`tags`, `cache-from`, `cache-to`, `secret`, `ssh`, `platforms`, `output`,
|
||||
`pull`, `no-cache`, `no-cache-filter`
|
||||
`pull`, `no-cache`
|
||||
|
||||
### Built-in variables
|
||||
|
||||
|
@@ -13,36 +13,34 @@ Start a build
|
||||
|
||||
### Options
|
||||
|
||||
| Name | Type | Default | Description |
|
||||
| --- | --- | --- | --- |
|
||||
| [`--add-host`](https://docs.docker.com/engine/reference/commandline/build/#add-entries-to-container-hosts-file---add-host) | `stringSlice` | | Add a custom host-to-IP mapping (format: `host:ip`) |
|
||||
| [`--allow`](#allow) | `stringSlice` | | Allow extra privileged entitlement (e.g., `network.host`, `security.insecure`) |
|
||||
| [`--build-arg`](#build-arg) | `stringArray` | | Set build-time variables |
|
||||
| [`--build-context`](#build-context) | `stringArray` | | Additional build contexts (e.g., name=path) |
|
||||
| [`--builder`](#builder) | `string` | | Override the configured builder instance |
|
||||
| [`--cache-from`](#cache-from) | `stringArray` | | External cache sources (e.g., `user/app:cache`, `type=local,src=path/to/dir`) |
|
||||
| [`--cache-to`](#cache-to) | `stringArray` | | Cache export destinations (e.g., `user/app:cache`, `type=local,dest=path/to/dir`) |
|
||||
| [`--cgroup-parent`](https://docs.docker.com/engine/reference/commandline/build/#use-a-custom-parent-cgroup---cgroup-parent) | `string` | | Optional parent cgroup for the container |
|
||||
| [`-f`](https://docs.docker.com/engine/reference/commandline/build/#specify-a-dockerfile--f), [`--file`](https://docs.docker.com/engine/reference/commandline/build/#specify-a-dockerfile--f) | `string` | | Name of the Dockerfile (default: `PATH/Dockerfile`) |
|
||||
| `--iidfile` | `string` | | Write the image ID to the file |
|
||||
| `--label` | `stringArray` | | Set metadata for an image |
|
||||
| [`--load`](#load) | | | Shorthand for `--output=type=docker` |
|
||||
| [`--metadata-file`](#metadata-file) | `string` | | Write build result metadata to the file |
|
||||
| `--network` | `string` | `default` | Set the networking mode for the `RUN` instructions during build |
|
||||
| `--no-cache` | | | Do not use cache when building the image |
|
||||
| `--no-cache-filter` | `stringArray` | | Do not cache specified stages |
|
||||
| [`-o`](#output), [`--output`](#output) | `stringArray` | | Output destination (format: `type=local,dest=path`) |
|
||||
| [`--platform`](#platform) | `stringArray` | | Set target platform for build |
|
||||
| [`--progress`](#progress) | `string` | `auto` | Set type of progress output (`auto`, `plain`, `tty`). Use plain to show container output |
|
||||
| `--pull` | | | Always attempt to pull all referenced images |
|
||||
| [`--push`](#push) | | | Shorthand for `--output=type=registry` |
|
||||
| `-q`, `--quiet` | | | Suppress the build output and print image ID on success |
|
||||
| [`--secret`](#secret) | `stringArray` | | Secret to expose to the build (format: `id=mysecret[,src=/local/secret]`) |
|
||||
| [`--shm-size`](#shm-size) | `bytes` | `0` | Size of `/dev/shm` |
|
||||
| [`--ssh`](#ssh) | `stringArray` | | SSH agent socket or keys to expose to the build (format: `default\|<id>[=<socket>\|<key>[,<key>]]`) |
|
||||
| [`-t`](https://docs.docker.com/engine/reference/commandline/build/#tag-an-image--t), [`--tag`](https://docs.docker.com/engine/reference/commandline/build/#tag-an-image--t) | `stringArray` | | Name and optionally a tag (format: `name:tag`) |
|
||||
| [`--target`](https://docs.docker.com/engine/reference/commandline/build/#specifying-target-build-stage---target) | `string` | | Set the target build stage to build |
|
||||
| [`--ulimit`](#ulimit) | `ulimit` | | Ulimit options |
|
||||
| Name | Description |
|
||||
| --- | --- |
|
||||
| [`--add-host stringSlice`](https://docs.docker.com/engine/reference/commandline/build/#add-entries-to-container-hosts-file---add-host) | Add a custom host-to-IP mapping (format: `host:ip`) |
|
||||
| [`--allow stringSlice`](#allow) | Allow extra privileged entitlement (e.g., `network.host`, `security.insecure`) |
|
||||
| [`--build-arg stringArray`](https://docs.docker.com/engine/reference/commandline/build/#set-build-time-variables---build-arg) | Set build-time variables |
|
||||
| [`--builder string`](#builder) | Override the configured builder instance |
|
||||
| [`--cache-from stringArray`](#cache-from) | External cache sources (e.g., `user/app:cache`, `type=local,src=path/to/dir`) |
|
||||
| [`--cache-to stringArray`](#cache-to) | Cache export destinations (e.g., `user/app:cache`, `type=local,dest=path/to/dir`) |
|
||||
| [`--cgroup-parent string`](https://docs.docker.com/engine/reference/commandline/build/#use-a-custom-parent-cgroup---cgroup-parent) | Optional parent cgroup for the container |
|
||||
| [`-f`](https://docs.docker.com/engine/reference/commandline/build/#specify-a-dockerfile--f), [`--file string`](https://docs.docker.com/engine/reference/commandline/build/#specify-a-dockerfile--f) | Name of the Dockerfile (default: `PATH/Dockerfile`) |
|
||||
| `--iidfile string` | Write the image ID to the file |
|
||||
| `--label stringArray` | Set metadata for an image |
|
||||
| [`--load`](#load) | Shorthand for `--output=type=docker` |
|
||||
| `--metadata-file string` | Write build result metadata to the file |
|
||||
| `--network string` | Set the networking mode for the RUN instructions during build |
|
||||
| `--no-cache` | Do not use cache when building the image |
|
||||
| [`-o`](#output), [`--output stringArray`](#output) | Output destination (format: `type=local,dest=path`) |
|
||||
| [`--platform stringArray`](#platform) | Set target platform for build |
|
||||
| [`--progress string`](#progress) | Set type of progress output (`auto`, `plain`, `tty`). Use plain to show container output |
|
||||
| `--pull` | Always attempt to pull a newer version of the image |
|
||||
| [`--push`](#push) | Shorthand for `--output=type=registry` |
|
||||
| `-q`, `--quiet` | Suppress the build output and print image ID on success |
|
||||
| `--secret stringArray` | Secret file to expose to the build (format: `id=mysecret,src=/local/secret`) |
|
||||
| [`--shm-size bytes`](#shm-size) | Size of `/dev/shm` |
|
||||
| `--ssh stringArray` | SSH agent socket or keys to expose to the build (format: `default\|<id>[=<socket>\|<key>[,<key>]]`) |
|
||||
| [`-t`](https://docs.docker.com/engine/reference/commandline/build/#tag-an-image--t), [`--tag stringArray`](https://docs.docker.com/engine/reference/commandline/build/#tag-an-image--t) | Name and optionally a tag (format: `name:tag`) |
|
||||
| [`--target string`](https://docs.docker.com/engine/reference/commandline/build/#specifying-target-build-stage---target) | Set the target build stage to build. |
|
||||
| [`--ulimit ulimit`](#ulimit) | Ulimit options |
|
||||
|
||||
|
||||
<!---MARKER_GEN_END-->
|
||||
@@ -54,200 +52,80 @@ to the UI of `docker build` command and takes the same flags and arguments.
|
||||
|
||||
For documentation on most of these flags, refer to the [`docker build`
|
||||
documentation](https://docs.docker.com/engine/reference/commandline/build/). In
|
||||
here we'll document a subset of the new flags.
|
||||
here we’ll document a subset of the new flags.
|
||||
|
||||
## Examples
|
||||
|
||||
### <a name="allow"></a> Allow extra privileged entitlement (--allow)
|
||||
|
||||
```
|
||||
--allow=ENTITLEMENT
|
||||
```
|
||||
|
||||
Allow extra privileged entitlement. List of entitlements:
|
||||
|
||||
- `network.host` - Allows executions with host networking.
|
||||
- `security.insecure` - Allows executions without sandbox. See
|
||||
[related Dockerfile extensions](https://github.com/moby/buildkit/blob/master/frontend/dockerfile/docs/experimental.md#run---securityinsecuresandbox).
|
||||
|
||||
For entitlements to be enabled, the `buildkitd` daemon also needs to allow them
|
||||
with `--allow-insecure-entitlement` (see [`create --buildkitd-flags`](buildx_create.md#buildkitd-flags))
|
||||
|
||||
**Examples**
|
||||
|
||||
```console
|
||||
$ docker buildx create --use --name insecure-builder --buildkitd-flags '--allow-insecure-entitlement security.insecure'
|
||||
$ docker buildx build --allow security.insecure .
|
||||
```
|
||||
|
||||
### <a name="build-arg"></a> Set build-time variables (--build-arg)
|
||||
|
||||
Same as [`docker build` command](https://docs.docker.com/engine/reference/commandline/build/#set-build-time-variables---build-arg).
|
||||
|
||||
There are also useful built-in build args like:
|
||||
|
||||
* `BUILDKIT_CONTEXT_KEEP_GIT_DIR=<bool>` trigger git context to keep the `.git` directory
|
||||
* `BUILDKIT_INLINE_BUILDINFO_ATTRS=<bool>` inline build info attributes in image config or not
|
||||
* `BUILDKIT_INLINE_CACHE=<bool>` inline cache metadata to image config or not
|
||||
* `BUILDKIT_MULTI_PLATFORM=<bool>` opt into determnistic output regardless of multi-platform output or not
|
||||
|
||||
```console
|
||||
$ docker buildx build --build-arg BUILDKIT_MULTI_PLATFORM=1 .
|
||||
```
|
||||
|
||||
More built-in build args can be found in [dockerfile frontend docs](https://github.com/moby/buildkit/blob/master/frontend/dockerfile/docs/syntax.md#built-in-build-args).
|
||||
|
||||
### <a name="build-context"></a> Additional build contexts (--build-context)
|
||||
|
||||
```
|
||||
--build-context=name=VALUE
|
||||
```
|
||||
|
||||
Define additional build context with specified contents. In Dockerfile the context can be accessed when `FROM name` or `--from=name` is used.
|
||||
When Dockerfile defines a stage with the same name it is overwritten.
|
||||
|
||||
The value can be a local source directory, container image (with docker-image:// prefix), Git or HTTP URL.
|
||||
|
||||
Replace `alpine:latest` with a pinned one:
|
||||
|
||||
```console
|
||||
$ docker buildx build --build-context alpine=docker-image://alpine@sha256:0123456789 .
|
||||
```
|
||||
|
||||
Expose a secondary local source directory:
|
||||
|
||||
```console
|
||||
$ docker buildx build --build-context project=path/to/project/source .
|
||||
# docker buildx build --build-context project=https://github.com/myuser/project.git .
|
||||
```
|
||||
|
||||
```Dockerfile
|
||||
FROM alpine
|
||||
COPY --from=project myfile /
|
||||
```
|
||||
|
||||
### <a name="builder"></a> Override the configured builder instance (--builder)
|
||||
|
||||
Same as [`buildx --builder`](buildx.md#builder).
|
||||
|
||||
### <a name="cache-from"></a> Use an external cache source for a build (--cache-from)
|
||||
### <a name="platform"></a> Set the target platforms for the build (--platform)
|
||||
|
||||
```
|
||||
--cache-from=[NAME|type=TYPE[,KEY=VALUE]]
|
||||
--platform=value[,value]
|
||||
```
|
||||
|
||||
Use an external cache source for a build. Supported types are `registry`,
|
||||
`local` and `gha`.
|
||||
Set the target platform for the build. All `FROM` commands inside the Dockerfile
|
||||
without their own `--platform` flag will pull base images for this platform and
|
||||
this value will also be the platform of the resulting image. The default value
|
||||
will be the current platform of the buildkit daemon.
|
||||
|
||||
- [`registry` source](https://github.com/moby/buildkit#registry-push-image-and-cache-separately)
|
||||
can import cache from a cache manifest or (special) image configuration on the
|
||||
registry.
|
||||
- [`local` source](https://github.com/moby/buildkit#local-directory-1) can
|
||||
import cache from local files previously exported with `--cache-to`.
|
||||
- [`gha` source](https://github.com/moby/buildkit#github-actions-cache-experimental)
|
||||
can import cache from a previously exported cache with `--cache-to` in your
|
||||
GitHub repository
|
||||
When using `docker-container` driver with `buildx`, this flag can accept multiple
|
||||
values as an input separated by a comma. With multiple values the result will be
|
||||
built for all of the specified platforms and joined together into a single manifest
|
||||
list.
|
||||
|
||||
If no type is specified, `registry` exporter is used with a specified reference.
|
||||
If the `Dockerfile` needs to invoke the `RUN` command, the builder needs runtime
|
||||
support for the specified platform. In a clean setup, you can only execute `RUN`
|
||||
commands for your system architecture.
|
||||
If your kernel supports [`binfmt_misc`](https://en.wikipedia.org/wiki/Binfmt_misc)
|
||||
launchers for secondary architectures, buildx will pick them up automatically.
|
||||
Docker desktop releases come with `binfmt_misc` automatically configured for `arm64`
|
||||
and `arm` architectures. You can see what runtime platforms your current builder
|
||||
instance supports by running `docker buildx inspect --bootstrap`.
|
||||
|
||||
`docker` driver currently only supports importing build cache from the registry.
|
||||
Inside a `Dockerfile`, you can access the current platform value through
|
||||
`TARGETPLATFORM` build argument. Please refer to the [`docker build`
|
||||
documentation](https://docs.docker.com/engine/reference/builder/#automatic-platform-args-in-the-global-scope)
|
||||
for the full description of automatic platform argument variants .
|
||||
|
||||
The formatting for the platform specifier is defined in the [containerd source
|
||||
code](https://github.com/containerd/containerd/blob/v1.4.3/platforms/platforms.go#L63).
|
||||
|
||||
**Examples**
|
||||
|
||||
```console
|
||||
$ docker buildx build --cache-from=user/app:cache .
|
||||
$ docker buildx build --cache-from=user/app .
|
||||
$ docker buildx build --cache-from=type=registry,ref=user/app .
|
||||
$ docker buildx build --cache-from=type=local,src=path/to/cache .
|
||||
$ docker buildx build --cache-from=type=gha .
|
||||
$ docker buildx build --platform=linux/arm64 .
|
||||
$ docker buildx build --platform=linux/amd64,linux/arm64,linux/arm/v7 .
|
||||
$ docker buildx build --platform=darwin .
|
||||
```
|
||||
|
||||
More info about cache exporters and available attributes: https://github.com/moby/buildkit#export-cache
|
||||
|
||||
### <a name="cache-to"></a> Export build cache to an external cache destination (--cache-to)
|
||||
### <a name="progress"></a> Set type of progress output (--progress)
|
||||
|
||||
```
|
||||
--cache-to=[NAME|type=TYPE[,KEY=VALUE]]
|
||||
--progress=VALUE
|
||||
```
|
||||
|
||||
Export build cache to an external cache destination. Supported types are
|
||||
`registry`, `local`, `inline` and `gha`.
|
||||
Set type of progress output (auto, plain, tty). Use plain to show container
|
||||
output (default "auto").
|
||||
|
||||
- [`registry` type](https://github.com/moby/buildkit#registry-push-image-and-cache-separately) exports build cache to a cache manifest in the registry.
|
||||
- [`local` type](https://github.com/moby/buildkit#local-directory-1) type
|
||||
exports cache to a local directory on the client.
|
||||
- [`inline` type](https://github.com/moby/buildkit#inline-push-image-and-cache-together)
|
||||
type writes the cache metadata into the image configuration.
|
||||
- [`gha` type](https://github.com/moby/buildkit#github-actions-cache-experimental)
|
||||
type exports cache through the [Github Actions Cache service API](https://github.com/tonistiigi/go-actions-cache/blob/master/api.md#authentication).
|
||||
> You can also use the `BUILDKIT_PROGRESS` environment variable to set
|
||||
> its value.
|
||||
|
||||
`docker` driver currently only supports exporting inline cache metadata to image
|
||||
configuration. Alternatively, `--build-arg BUILDKIT_INLINE_CACHE=1` can be used
|
||||
to trigger inline cache exporter.
|
||||
|
||||
Attribute key:
|
||||
|
||||
- `mode` - Specifies how many layers are exported with the cache. `min` on only
|
||||
exports layers already in the final build stage, `max` exports layers for
|
||||
all stages. Metadata is always exported for the whole build.
|
||||
The following example uses `plain` output during the build:
|
||||
|
||||
```console
|
||||
$ docker buildx build --cache-to=user/app:cache .
|
||||
$ docker buildx build --cache-to=type=inline .
|
||||
$ docker buildx build --cache-to=type=registry,ref=user/app .
|
||||
$ docker buildx build --cache-to=type=local,dest=path/to/cache .
|
||||
$ docker buildx build --cache-to=type=gha .
|
||||
```
|
||||
$ docker buildx build --load --progress=plain .
|
||||
|
||||
More info about cache exporters and available attributes: https://github.com/moby/buildkit#export-cache
|
||||
#1 [internal] load build definition from Dockerfile
|
||||
#1 transferring dockerfile: 227B 0.0s done
|
||||
#1 DONE 0.1s
|
||||
|
||||
### <a name="load"></a> Load the single-platform build result to `docker images` (--load)
|
||||
|
||||
Shorthand for [`--output=type=docker`](#docker). Will automatically load the
|
||||
single-platform build result to `docker images`.
|
||||
|
||||
### <a name="metadata-file"></a> Write build result metadata to the file (--metadata-file)
|
||||
|
||||
To output build metadata such as the image digest, pass the `--metadata-file` flag.
|
||||
The metadata will be written as a JSON object to the specified file. The
|
||||
directory of the specified file must already exist and be writable.
|
||||
|
||||
```console
|
||||
$ docker buildx build --load --metadata-file metadata.json .
|
||||
$ cat metadata.json
|
||||
```
|
||||
```json
|
||||
{
|
||||
"containerimage.buildinfo": {
|
||||
"frontend": "dockerfile.v0",
|
||||
"attrs": {
|
||||
"context": "https://github.com/crazy-max/buildkit-buildsources-test.git#master",
|
||||
"filename": "Dockerfile",
|
||||
"source": "docker/dockerfile:master"
|
||||
},
|
||||
"sources": [
|
||||
{
|
||||
"type": "docker-image",
|
||||
"ref": "docker.io/docker/buildx-bin:0.6.1@sha256:a652ced4a4141977c7daaed0a074dcd9844a78d7d2615465b12f433ae6dd29f0",
|
||||
"pin": "sha256:a652ced4a4141977c7daaed0a074dcd9844a78d7d2615465b12f433ae6dd29f0"
|
||||
},
|
||||
{
|
||||
"type": "docker-image",
|
||||
"ref": "docker.io/library/alpine:3.13",
|
||||
"pin": "sha256:026f721af4cf2843e07bba648e158fb35ecc876d822130633cc49f707f0fc88c"
|
||||
}
|
||||
]
|
||||
},
|
||||
"containerimage.config.digest": "sha256:2937f66a9722f7f4a2df583de2f8cb97fc9196059a410e7f00072fc918930e66",
|
||||
"containerimage.descriptor": {
|
||||
"annotations": {
|
||||
"config.digest": "sha256:2937f66a9722f7f4a2df583de2f8cb97fc9196059a410e7f00072fc918930e66",
|
||||
"org.opencontainers.image.created": "2022-02-08T21:28:03Z"
|
||||
},
|
||||
"digest": "sha256:19ffeab6f8bc9293ac2c3fdf94ebe28396254c993aea0b5a542cfb02e0883fa3",
|
||||
"mediaType": "application/vnd.oci.image.manifest.v1+json",
|
||||
"size": 506
|
||||
},
|
||||
"containerimage.digest": "sha256:19ffeab6f8bc9293ac2c3fdf94ebe28396254c993aea0b5a542cfb02e0883fa3"
|
||||
}
|
||||
#2 [internal] load .dockerignore
|
||||
#2 transferring context: 129B 0.0s done
|
||||
#2 DONE 0.0s
|
||||
...
|
||||
```
|
||||
|
||||
### <a name="output"></a> Set the export action for the build result (-o, --output)
|
||||
@@ -268,6 +146,8 @@ If just the path is specified as a value, `buildx` will use the local exporter
|
||||
with this path as the destination. If the value is "-", `buildx` will use `tar`
|
||||
exporter and write to `stdout`.
|
||||
|
||||
**Examples**
|
||||
|
||||
```console
|
||||
$ docker buildx build -o . .
|
||||
$ docker buildx build -o outdir .
|
||||
@@ -321,7 +201,7 @@ The most common usecase for multi-platform images is to directly push to a regis
|
||||
Attribute keys:
|
||||
|
||||
- `dest` - destination path where tarball will be written. If not specified the
|
||||
tar will be loaded automatically to the current docker instance.
|
||||
tar will be loaded automatically to the current docker instance.
|
||||
- `context` - name for the docker context where to import the result
|
||||
|
||||
#### `image`
|
||||
@@ -339,163 +219,118 @@ Attribute keys:
|
||||
|
||||
The `registry` exporter is a shortcut for `type=image,push=true`.
|
||||
|
||||
### <a name="platform"></a> Set the target platforms for the build (--platform)
|
||||
|
||||
```
|
||||
--platform=value[,value]
|
||||
```
|
||||
|
||||
Set the target platform for the build. All `FROM` commands inside the Dockerfile
|
||||
without their own `--platform` flag will pull base images for this platform and
|
||||
this value will also be the platform of the resulting image. The default value
|
||||
will be the current platform of the buildkit daemon.
|
||||
|
||||
When using `docker-container` driver with `buildx`, this flag can accept multiple
|
||||
values as an input separated by a comma. With multiple values the result will be
|
||||
built for all of the specified platforms and joined together into a single manifest
|
||||
list.
|
||||
|
||||
If the `Dockerfile` needs to invoke the `RUN` command, the builder needs runtime
|
||||
support for the specified platform. In a clean setup, you can only execute `RUN`
|
||||
commands for your system architecture.
|
||||
If your kernel supports [`binfmt_misc`](https://en.wikipedia.org/wiki/Binfmt_misc)
|
||||
launchers for secondary architectures, buildx will pick them up automatically.
|
||||
Docker desktop releases come with `binfmt_misc` automatically configured for `arm64`
|
||||
and `arm` architectures. You can see what runtime platforms your current builder
|
||||
instance supports by running `docker buildx inspect --bootstrap`.
|
||||
|
||||
Inside a `Dockerfile`, you can access the current platform value through
|
||||
`TARGETPLATFORM` build argument. Please refer to the [`docker build`
|
||||
documentation](https://docs.docker.com/engine/reference/builder/#automatic-platform-args-in-the-global-scope)
|
||||
for the full description of automatic platform argument variants .
|
||||
|
||||
The formatting for the platform specifier is defined in the [containerd source
|
||||
code](https://github.com/containerd/containerd/blob/v1.4.3/platforms/platforms.go#L63).
|
||||
|
||||
```console
|
||||
$ docker buildx build --platform=linux/arm64 .
|
||||
$ docker buildx build --platform=linux/amd64,linux/arm64,linux/arm/v7 .
|
||||
$ docker buildx build --platform=darwin .
|
||||
```
|
||||
|
||||
### <a name="progress"></a> Set type of progress output (--progress)
|
||||
|
||||
```
|
||||
--progress=VALUE
|
||||
```
|
||||
|
||||
Set type of progress output (auto, plain, tty). Use plain to show container
|
||||
output (default "auto").
|
||||
|
||||
> You can also use the `BUILDKIT_PROGRESS` environment variable to set
|
||||
> its value.
|
||||
|
||||
The following example uses `plain` output during the build:
|
||||
|
||||
```console
|
||||
$ docker buildx build --load --progress=plain .
|
||||
|
||||
#1 [internal] load build definition from Dockerfile
|
||||
#1 transferring dockerfile: 227B 0.0s done
|
||||
#1 DONE 0.1s
|
||||
|
||||
#2 [internal] load .dockerignore
|
||||
#2 transferring context: 129B 0.0s done
|
||||
#2 DONE 0.0s
|
||||
...
|
||||
```
|
||||
|
||||
### <a name="push"></a> Push the build result to a registry (--push)
|
||||
|
||||
Shorthand for [`--output=type=registry`](#registry). Will automatically push the
|
||||
build result to registry.
|
||||
|
||||
### <a name="secret"></a> Secret to expose to the build (--secret)
|
||||
### <a name="load"></a> Load the single-platform build result to `docker images` (--load)
|
||||
|
||||
Shorthand for [`--output=type=docker`](#docker). Will automatically load the
|
||||
single-platform build result to `docker images`.
|
||||
|
||||
### <a name="cache-from"></a> Use an external cache source for a build (--cache-from)
|
||||
|
||||
```
|
||||
--secret=[type=TYPE[,KEY=VALUE]
|
||||
--cache-from=[NAME|type=TYPE[,KEY=VALUE]]
|
||||
```
|
||||
|
||||
Exposes secret to the build. The secret can be used by the build using
|
||||
[`RUN --mount=type=secret` mount](https://github.com/moby/buildkit/blob/master/frontend/dockerfile/docs/syntax.md#run---mounttypesecret).
|
||||
Use an external cache source for a build. Supported types are `registry`,
|
||||
`local` and `gha`.
|
||||
|
||||
If `type` is unset it will be detected. Supported types are:
|
||||
- [`registry` source](https://github.com/moby/buildkit#registry-push-image-and-cache-separately)
|
||||
can import cache from a cache manifest or (special) image configuration on the
|
||||
registry.
|
||||
- [`local` source](https://github.com/moby/buildkit#local-directory-1) can
|
||||
import cache from local files previously exported with `--cache-to`.
|
||||
- [`gha` source](https://github.com/moby/buildkit#github-actions-cache-experimental)
|
||||
can import cache from a previously exported cache with `--cache-to` in your
|
||||
GitHub repository
|
||||
|
||||
#### `file`
|
||||
If no type is specified, `registry` exporter is used with a specified reference.
|
||||
|
||||
Attribute keys:
|
||||
`docker` driver currently only supports importing build cache from the registry.
|
||||
|
||||
- `id` - ID of the secret. Defaults to basename of the `src` path.
|
||||
- `src`, `source` - Secret filename. `id` used if unset.
|
||||
|
||||
```dockerfile
|
||||
# syntax=docker/dockerfile:1.3
|
||||
FROM python:3
|
||||
RUN pip install awscli
|
||||
RUN --mount=type=secret,id=aws,target=/root/.aws/credentials \
|
||||
aws s3 cp s3://... ...
|
||||
```
|
||||
**Examples**
|
||||
|
||||
```console
|
||||
$ docker buildx build --secret id=aws,src=$HOME/.aws/credentials .
|
||||
$ docker buildx build --cache-from=user/app:cache .
|
||||
$ docker buildx build --cache-from=user/app .
|
||||
$ docker buildx build --cache-from=type=registry,ref=user/app .
|
||||
$ docker buildx build --cache-from=type=local,src=path/to/cache .
|
||||
$ docker buildx build --cache-from=type=gha .
|
||||
```
|
||||
|
||||
#### `env`
|
||||
More info about cache exporters and available attributes: https://github.com/moby/buildkit#export-cache
|
||||
|
||||
Attribute keys:
|
||||
### <a name="cache-to"></a> Export build cache to an external cache destination (--cache-to)
|
||||
|
||||
- `id` - ID of the secret. Defaults to `env` name.
|
||||
- `env` - Secret environment variable. `id` used if unset, otherwise will look for `src`, `source` if `id` unset.
|
||||
|
||||
```dockerfile
|
||||
# syntax=docker/dockerfile:1.3
|
||||
FROM node:alpine
|
||||
RUN --mount=type=bind,target=. \
|
||||
--mount=type=secret,id=SECRET_TOKEN \
|
||||
SECRET_TOKEN=$(cat /run/secrets/SECRET_TOKEN) yarn run test
|
||||
```
|
||||
--cache-to=[NAME|type=TYPE[,KEY=VALUE]]
|
||||
```
|
||||
|
||||
Export build cache to an external cache destination. Supported types are
|
||||
`registry`, `local`, `inline` and `gha`.
|
||||
|
||||
- [`registry` type](https://github.com/moby/buildkit#registry-push-image-and-cache-separately) exports build cache to a cache manifest in the registry.
|
||||
- [`local` type](https://github.com/moby/buildkit#local-directory-1) type
|
||||
exports cache to a local directory on the client.
|
||||
- [`inline` type](https://github.com/moby/buildkit#inline-push-image-and-cache-together)
|
||||
type writes the cache metadata into the image configuration.
|
||||
- [`gha` type](https://github.com/moby/buildkit#github-actions-cache-experimental)
|
||||
type exports cache through the [Github Actions Cache service API](https://github.com/tonistiigi/go-actions-cache/blob/master/api.md#authentication).
|
||||
|
||||
`docker` driver currently only supports exporting inline cache metadata to image
|
||||
configuration. Alternatively, `--build-arg BUILDKIT_INLINE_CACHE=1` can be used
|
||||
to trigger inline cache exporter.
|
||||
|
||||
Attribute key:
|
||||
|
||||
- `mode` - Specifies how many layers are exported with the cache. `min` on only
|
||||
exports layers already in the final build stage, `max` exports layers for
|
||||
all stages. Metadata is always exported for the whole build.
|
||||
|
||||
**Examples**
|
||||
|
||||
```console
|
||||
$ SECRET_TOKEN=token docker buildx build --secret id=SECRET_TOKEN .
|
||||
$ docker buildx build --cache-to=user/app:cache .
|
||||
$ docker buildx build --cache-to=type=inline .
|
||||
$ docker buildx build --cache-to=type=registry,ref=user/app .
|
||||
$ docker buildx build --cache-to=type=local,dest=path/to/cache .
|
||||
$ docker buildx build --cache-to=type=gha .
|
||||
```
|
||||
|
||||
### <a name="shm-size"></a> Size of /dev/shm (--shm-size)
|
||||
More info about cache exporters and available attributes: https://github.com/moby/buildkit#export-cache
|
||||
|
||||
### <a name="allow"></a> Allow extra privileged entitlement (--allow)
|
||||
|
||||
```
|
||||
--allow=ENTITLEMENT
|
||||
```
|
||||
|
||||
Allow extra privileged entitlement. List of entitlements:
|
||||
|
||||
- `network.host` - Allows executions with host networking.
|
||||
- `security.insecure` - Allows executions without sandbox. See
|
||||
[related Dockerfile extensions](https://github.com/moby/buildkit/blob/master/frontend/dockerfile/docs/experimental.md#run---securityinsecuresandbox).
|
||||
|
||||
For entitlements to be enabled, the `buildkitd` daemon also needs to allow them
|
||||
with `--allow-insecure-entitlement` (see [`create --buildkitd-flags`](buildx_create.md#buildkitd-flags))
|
||||
|
||||
**Examples**
|
||||
|
||||
```console
|
||||
$ docker buildx create --use --name insecure-builder --buildkitd-flags '--allow-insecure-entitlement security.insecure'
|
||||
$ docker buildx build --allow security.insecure .
|
||||
```
|
||||
|
||||
### <a name="shm-size"></a> Size of `/dev/shm` (--shm-size)
|
||||
|
||||
The format is `<number><unit>`. `number` must be greater than `0`. Unit is
|
||||
optional and can be `b` (bytes), `k` (kilobytes), `m` (megabytes), or `g`
|
||||
(gigabytes). If you omit the unit, the system uses bytes.
|
||||
|
||||
### <a name="ssh"></a> SSH agent socket or keys to expose to the build (--ssh)
|
||||
|
||||
```
|
||||
--ssh=default|<id>[=<socket>|<key>[,<key>]]
|
||||
```
|
||||
|
||||
This can be useful when some commands in your Dockerfile need specific SSH
|
||||
authentication (e.g., cloning a private repository).
|
||||
|
||||
`--ssh` exposes SSH agent socket or keys to the build and can be used with the
|
||||
[`RUN --mount=type=ssh` mount](https://github.com/moby/buildkit/blob/master/frontend/dockerfile/docs/syntax.md#run---mounttypessh).
|
||||
|
||||
Example to access Gitlab using an SSH agent socket:
|
||||
|
||||
```dockerfile
|
||||
# syntax=docker/dockerfile:1.3
|
||||
FROM alpine
|
||||
RUN apk add --no-cache openssh-client
|
||||
RUN mkdir -p -m 0700 ~/.ssh && ssh-keyscan gitlab.com >> ~/.ssh/known_hosts
|
||||
RUN --mount=type=ssh ssh -q -T git@gitlab.com 2>&1 | tee /hello
|
||||
# "Welcome to GitLab, @GITLAB_USERNAME_ASSOCIATED_WITH_SSHKEY" should be printed here
|
||||
# with the type of build progress is defined as `plain`.
|
||||
```
|
||||
|
||||
```console
|
||||
$ eval $(ssh-agent)
|
||||
$ ssh-add ~/.ssh/id_rsa
|
||||
(Input your passphrase here)
|
||||
$ docker buildx build --ssh default=$SSH_AUTH_SOCK .
|
||||
```
|
||||
|
||||
### <a name="ulimit"></a> Set ulimits (--ulimit)
|
||||
|
||||
`--ulimit` is specified with a soft and hard limit as such:
|
||||
|
@@ -9,19 +9,19 @@ Create a new builder instance
|
||||
|
||||
### Options
|
||||
|
||||
| Name | Type | Default | Description |
|
||||
| --- | --- | --- | --- |
|
||||
| [`--append`](#append) | | | Append a node to builder instead of changing it |
|
||||
| `--bootstrap` | | | Boot builder after creation |
|
||||
| [`--buildkitd-flags`](#buildkitd-flags) | `string` | | Flags for buildkitd daemon |
|
||||
| [`--config`](#config) | `string` | | BuildKit config file |
|
||||
| [`--driver`](#driver) | `string` | | Driver to use (available: `docker`, `docker-container`, `kubernetes`) |
|
||||
| [`--driver-opt`](#driver-opt) | `stringArray` | | Options for the driver |
|
||||
| [`--leave`](#leave) | | | Remove a node from builder instead of changing it |
|
||||
| [`--name`](#name) | `string` | | Builder instance name |
|
||||
| [`--node`](#node) | `string` | | Create/modify node with given name |
|
||||
| [`--platform`](#platform) | `stringArray` | | Fixed platforms for current node |
|
||||
| [`--use`](#use) | | | Set the current builder instance |
|
||||
| Name | Description |
|
||||
| --- | --- |
|
||||
| [`--append`](#append) | Append a node to builder instead of changing it |
|
||||
| `--bootstrap` | Boot builder after creation |
|
||||
| [`--buildkitd-flags string`](#buildkitd-flags) | Flags for buildkitd daemon |
|
||||
| [`--config string`](#config) | BuildKit config file |
|
||||
| [`--driver string`](#driver) | Driver to use (available: `docker`, `docker-container`, `kubernetes`) |
|
||||
| [`--driver-opt stringArray`](#driver-opt) | Options for the driver |
|
||||
| [`--leave`](#leave) | Remove a node from builder instead of changing it |
|
||||
| [`--name string`](#name) | Builder instance name |
|
||||
| [`--node string`](#node) | Create/modify node with given name |
|
||||
| [`--platform stringArray`](#platform) | Fixed platforms for current node |
|
||||
| [`--use`](#use) | Set the current builder instance |
|
||||
|
||||
|
||||
<!---MARKER_GEN_END-->
|
||||
@@ -47,6 +47,8 @@ The `--append` flag changes the action of the command to append a new node to an
|
||||
existing builder specified by `--name`. Buildx will choose an appropriate node
|
||||
for a build based on the platforms it supports.
|
||||
|
||||
**Examples**
|
||||
|
||||
```console
|
||||
$ docker buildx create mycontext1
|
||||
eager_beaver
|
||||
@@ -65,6 +67,8 @@ Adds flags when starting the buildkitd daemon. They take precedence over the
|
||||
configuration file specified by [`--config`](#config). See `buildkitd --help`
|
||||
for the available flags.
|
||||
|
||||
**Example**
|
||||
|
||||
```
|
||||
--buildkitd-flags '--debug --debugaddr 0.0.0.0:6666'
|
||||
```
|
||||
@@ -128,22 +132,46 @@ Passes additional driver-specific options. Details for each driver:
|
||||
|
||||
- `docker` - No driver options
|
||||
- `docker-container`
|
||||
- `image=IMAGE` - Sets the container image to be used for running buildkit.
|
||||
- `network=NETMODE` - Sets the network mode for running the buildkit container.
|
||||
- `cgroup-parent=CGROUP` - Sets the cgroup parent of the buildkit container if docker is using the "cgroupfs" driver. Defaults to `/docker/buildx`.
|
||||
- `image=IMAGE` - Sets the container image to be used for running buildkit.
|
||||
- `network=NETMODE` - Sets the network mode for running the buildkit container.
|
||||
- `cgroup-parent=CGROUP` - Sets the cgroup parent of the buildkit container if docker is using the "cgroupfs" driver. Defaults to `/docker/buildx`.
|
||||
- `kubernetes`
|
||||
- `image=IMAGE` - Sets the container image to be used for running buildkit.
|
||||
- `namespace=NS` - Sets the Kubernetes namespace. Defaults to the current namespace.
|
||||
- `replicas=N` - Sets the number of `Pod` replicas. Defaults to 1.
|
||||
- `requests.cpu` - Sets the request CPU value specified in units of Kubernetes CPU. Example `requests.cpu=100m`, `requests.cpu=2`
|
||||
- `requests.memory` - Sets the request memory value specified in bytes or with a valid suffix. Example `requests.memory=500Mi`, `requests.memory=4G`
|
||||
- `limits.cpu` - Sets the limit CPU value specified in units of Kubernetes CPU. Example `limits.cpu=100m`, `limits.cpu=2`
|
||||
- `limits.memory` - Sets the limit memory value specified in bytes or with a valid suffix. Example `limits.memory=500Mi`, `limits.memory=4G`
|
||||
- `nodeselector="label1=value1,label2=value2"` - Sets the kv of `Pod` nodeSelector. No Defaults. Example `nodeselector=kubernetes.io/arch=arm64`
|
||||
- `rootless=(true|false)` - Run the container as a non-root user without `securityContext.privileged`. [Using Ubuntu host kernel is recommended](https://github.com/moby/buildkit/blob/master/docs/rootless.md). Defaults to false.
|
||||
- `loadbalance=(sticky|random)` - Load-balancing strategy. If set to "sticky", the pod is chosen using the hash of the context path. Defaults to "sticky"
|
||||
- `qemu.install=(true|false)` - Install QEMU emulation for multi platforms support.
|
||||
- `qemu.image=IMAGE` - Sets the QEMU emulation image. Defaults to `tonistiigi/binfmt:latest`
|
||||
- `image=IMAGE` - Sets the container image to be used for running buildkit.
|
||||
- `namespace=NS` - Sets the Kubernetes namespace. Defaults to the current namespace.
|
||||
- `replicas=N` - Sets the number of `Pod` replicas. Defaults to 1.
|
||||
- `requests.cpu` - Sets the request CPU value specified in units of Kubernetes CPU. Example `requests.cpu=100m`, `requests.cpu=2`
|
||||
- `requests.memory` - Sets the request memory value specified in bytes or with a valid suffix. Example `requests.memory=500Mi`, `requests.memory=4G`
|
||||
- `limits.cpu` - Sets the limit CPU value specified in units of Kubernetes CPU. Example `limits.cpu=100m`, `limits.cpu=2`
|
||||
- `limits.memory` - Sets the limit memory value specified in bytes or with a valid suffix. Example `limits.memory=500Mi`, `limits.memory=4G`
|
||||
- `nodeselector="label1=value1,label2=value2"` - Sets the kv of `Pod` nodeSelector. No Defaults. Example `nodeselector=kubernetes.io/arch=arm64`
|
||||
- `rootless=(true|false)` - Run the container as a non-root user without `securityContext.privileged`. [Using Ubuntu host kernel is recommended](https://github.com/moby/buildkit/blob/master/docs/rootless.md). Defaults to false.
|
||||
- `loadbalance=(sticky|random)` - Load-balancing strategy. If set to "sticky", the pod is chosen using the hash of the context path. Defaults to "sticky"
|
||||
- `qemu.install=(true|false)` - Install QEMU emulation for multi platforms support.
|
||||
- `qemu.image=IMAGE` - Sets the QEMU emulation image. Defaults to `tonistiigi/binfmt:latest`
|
||||
|
||||
**Examples**
|
||||
|
||||
#### Use a custom network
|
||||
|
||||
```console
|
||||
$ docker network create foonet
|
||||
$ docker buildx create --name builder --driver docker-container --driver-opt network=foonet --use
|
||||
$ docker buildx inspect --bootstrap
|
||||
$ docker inspect buildx_buildkit_builder0 --format={{.NetworkSettings.Networks}}
|
||||
map[foonet:0xc00018c0c0]
|
||||
```
|
||||
|
||||
#### OpenTelemetry support
|
||||
|
||||
To capture the trace to [Jaeger](https://github.com/jaegertracing/jaeger), set
|
||||
`JAEGER_TRACE` environment variable to the collection address using the `driver-opt`:
|
||||
|
||||
```console
|
||||
$ docker run -d --name jaeger -p 6831:6831/udp -p 16686:16686 jaegertracing/all-in-one
|
||||
$ docker buildx create --name builder --driver docker-container --driver-opt network=host --driver-opt env.JAEGER_TRACE=localhost:6831 --use
|
||||
$ docker buildx inspect --bootstrap
|
||||
# buildx command should be traced at http://127.0.0.1:16686/
|
||||
```
|
||||
|
||||
### <a name="leave"></a> Remove a node from a builder (--leave)
|
||||
|
||||
@@ -151,6 +179,8 @@ The `--leave` flag changes the action of the command to remove a node from a
|
||||
builder. The builder needs to be specified with `--name` and node that is removed
|
||||
is set with `--node`.
|
||||
|
||||
**Examples**
|
||||
|
||||
```console
|
||||
$ docker buildx create --name mybuilder --node mybuilder0 --leave
|
||||
```
|
||||
@@ -174,7 +204,7 @@ The `--node` flag specifies the name of the node to be created or modified. If
|
||||
none is specified, it is the name of the builder it belongs to, with an index
|
||||
number suffix.
|
||||
|
||||
### <a name="platform"></a> Set the platforms supported by the node (--platform)
|
||||
### <a name="platform"></a> Set the platforms supported by the node
|
||||
|
||||
```
|
||||
--platform PLATFORMS
|
||||
@@ -186,12 +216,14 @@ will also automatically detect the platforms it supports, but manual values take
|
||||
priority over the detected ones and can be used when multiple nodes support
|
||||
building for the same platform.
|
||||
|
||||
**Examples**
|
||||
|
||||
```console
|
||||
$ docker buildx create --platform linux/amd64
|
||||
$ docker buildx create --platform linux/arm64,linux/arm/v8
|
||||
```
|
||||
|
||||
### <a name="use"></a> Automatically switch to the newly created builder (--use)
|
||||
### <a name="use"></a> Automatically switch to the newly created builder
|
||||
|
||||
The `--use` flag automatically switches the current builder to the newly created
|
||||
one. Equivalent to running `docker buildx use $(docker buildx create ...)`.
|
||||
|
@@ -9,11 +9,11 @@ Disk usage
|
||||
|
||||
### Options
|
||||
|
||||
| Name | Type | Default | Description |
|
||||
| --- | --- | --- | --- |
|
||||
| [`--builder`](#builder) | `string` | | Override the configured builder instance |
|
||||
| `--filter` | `filter` | | Provide filter values |
|
||||
| `--verbose` | | | Provide a more verbose output |
|
||||
| Name | Description |
|
||||
| --- | --- |
|
||||
| [`--builder string`](#builder) | Override the configured builder instance |
|
||||
| `--filter filter` | Provide filter values |
|
||||
| `--verbose` | Provide a more verbose output |
|
||||
|
||||
|
||||
<!---MARKER_GEN_END-->
|
||||
|
@@ -12,14 +12,14 @@ Commands to work on images in registry
|
||||
| Name | Description |
|
||||
| --- | --- |
|
||||
| [`create`](buildx_imagetools_create.md) | Create a new image based on source images |
|
||||
| [`inspect`](buildx_imagetools_inspect.md) | Show details of an image in the registry |
|
||||
| [`inspect`](buildx_imagetools_inspect.md) | Show details of image in the registry |
|
||||
|
||||
|
||||
### Options
|
||||
|
||||
| Name | Type | Default | Description |
|
||||
| --- | --- | --- | --- |
|
||||
| [`--builder`](#builder) | `string` | | Override the configured builder instance |
|
||||
| Name | Description |
|
||||
| --- | --- |
|
||||
| [`--builder string`](#builder) | Override the configured builder instance |
|
||||
|
||||
|
||||
<!---MARKER_GEN_END-->
|
||||
|
@@ -9,19 +9,22 @@ Create a new image based on source images
|
||||
|
||||
### Options
|
||||
|
||||
| Name | Type | Default | Description |
|
||||
| --- | --- | --- | --- |
|
||||
| [`--append`](#append) | | | Append to existing manifest |
|
||||
| [`--builder`](#builder) | `string` | | Override the configured builder instance |
|
||||
| [`--dry-run`](#dry-run) | | | Show final image instead of pushing |
|
||||
| [`-f`](#file), [`--file`](#file) | `stringArray` | | Read source descriptor from file |
|
||||
| [`-t`](#tag), [`--tag`](#tag) | `stringArray` | | Set reference for new image |
|
||||
| Name | Description |
|
||||
| --- | --- |
|
||||
| [`--append`](#append) | Append to existing manifest |
|
||||
| [`--builder string`](#builder) | Override the configured builder instance |
|
||||
| [`--dry-run`](#dry-run) | Show final image instead of pushing |
|
||||
| [`-f`](#file), [`--file stringArray`](#file) | Read source descriptor from file |
|
||||
| [`-t`](#tag), [`--tag stringArray`](#tag) | Set reference for new image |
|
||||
|
||||
|
||||
<!---MARKER_GEN_END-->
|
||||
|
||||
## Description
|
||||
|
||||
Imagetools contains commands for working with manifest lists in the registry.
|
||||
These commands are useful for inspecting multi-platform build results.
|
||||
|
||||
Create a new manifest list based on source manifests. The source manifests can
|
||||
be manifest lists or single platform distribution manifests and must already
|
||||
exist in the registry where the new manifest is created. If only one source is
|
||||
@@ -54,15 +57,16 @@ or a JSON of OCI descriptor object.
|
||||
In order to define annotations or additional platform properties like `os.version` and
|
||||
`os.features` you need to add them in the OCI descriptor object encoded in JSON.
|
||||
|
||||
```console
|
||||
$ docker buildx imagetools inspect --raw alpine | jq '.manifests[0] | .platform."os.version"="10.1"' > descr.json
|
||||
$ docker buildx imagetools create -f descr.json myuser/image
|
||||
```
|
||||
docker buildx imagetools inspect --raw alpine | jq '.manifests[0] | .platform."os.version"="10.1"' > descr.json
|
||||
docker buildx imagetools create -f descr.json myuser/image
|
||||
```
|
||||
|
||||
The descriptor in the file is merged with existing descriptor in the registry if it exists.
|
||||
|
||||
The supported fields for the descriptor are defined in [OCI spec](https://github.com/opencontainers/image-spec/blob/master/descriptor.md#properties) .
|
||||
|
||||
|
||||
### <a name="tag"></a> Set reference for new image (-t, --tag)
|
||||
|
||||
```
|
||||
@@ -71,7 +75,10 @@ The supported fields for the descriptor are defined in [OCI spec](https://github
|
||||
|
||||
Use the `-t` or `--tag` flag to set the name of the image to be created.
|
||||
|
||||
**Examples**
|
||||
|
||||
```console
|
||||
$ docker buildx imagetools create --dry-run alpine@sha256:5c40b3c27b9f13c873fefb2139765c56ce97fd50230f1f2d5c91e55dec171907 sha256:c4ba6347b0e4258ce6a6de2401619316f982b7bcc529f73d2a410d0097730204
|
||||
|
||||
$ docker buildx imagetools create -t tonistiigi/myapp -f image1 -f image2
|
||||
```
|
||||
|
@@ -5,57 +5,40 @@ docker buildx imagetools inspect [OPTIONS] NAME
|
||||
```
|
||||
|
||||
<!---MARKER_GEN_START-->
|
||||
Show details of an image in the registry
|
||||
Show details of image in the registry
|
||||
|
||||
### Options
|
||||
|
||||
| Name | Type | Default | Description |
|
||||
| --- | --- | --- | --- |
|
||||
| [`--builder`](#builder) | `string` | | Override the configured builder instance |
|
||||
| [`--format`](#format) | `string` | `{{.Manifest}}` | Format the output using the given Go template |
|
||||
| [`--raw`](#raw) | | | Show original, unformatted JSON manifest |
|
||||
| Name | Description |
|
||||
| --- | --- |
|
||||
| [`--builder string`](#builder) | Override the configured builder instance |
|
||||
| [`--raw`](#raw) | Show original JSON manifest |
|
||||
|
||||
|
||||
<!---MARKER_GEN_END-->
|
||||
|
||||
## Description
|
||||
|
||||
Show details of an image in the registry.
|
||||
Show details of image in the registry.
|
||||
|
||||
Example:
|
||||
|
||||
```console
|
||||
$ docker buildx imagetools inspect alpine
|
||||
|
||||
Name: docker.io/library/alpine:latest
|
||||
MediaType: application/vnd.docker.distribution.manifest.list.v2+json
|
||||
Digest: sha256:21a3deaa0d32a8057914f36584b5288d2e5ecc984380bc0118285c70fa8c9300
|
||||
Digest: sha256:28ef97b8686a0b5399129e9b763d5b7e5ff03576aa5580d6f4182a49c5fe1913
|
||||
|
||||
Manifests:
|
||||
Name: docker.io/library/alpine:latest@sha256:e7d88de73db3d3fd9b2d63aa7f447a10fd0220b7cbf39803c803f2af9ba256b3
|
||||
Name: docker.io/library/alpine:latest@sha256:5c40b3c27b9f13c873fefb2139765c56ce97fd50230f1f2d5c91e55dec171907
|
||||
MediaType: application/vnd.docker.distribution.manifest.v2+json
|
||||
Platform: linux/amd64
|
||||
|
||||
Name: docker.io/library/alpine:latest@sha256:e047bc2af17934d38c5a7fa9f46d443f1de3a7675546402592ef805cfa929f9d
|
||||
Name: docker.io/library/alpine:latest@sha256:c4ba6347b0e4258ce6a6de2401619316f982b7bcc529f73d2a410d0097730204
|
||||
MediaType: application/vnd.docker.distribution.manifest.v2+json
|
||||
Platform: linux/arm/v6
|
||||
|
||||
Name: docker.io/library/alpine:latest@sha256:8483ecd016885d8dba70426fda133c30466f661bb041490d525658f1aac73822
|
||||
MediaType: application/vnd.docker.distribution.manifest.v2+json
|
||||
Platform: linux/arm/v7
|
||||
|
||||
Name: docker.io/library/alpine:latest@sha256:c74f1b1166784193ea6c8f9440263b9be6cae07dfe35e32a5df7a31358ac2060
|
||||
MediaType: application/vnd.docker.distribution.manifest.v2+json
|
||||
Platform: linux/arm64/v8
|
||||
|
||||
Name: docker.io/library/alpine:latest@sha256:2689e157117d2da668ad4699549e55eba1ceb79cb7862368b30919f0488213f4
|
||||
MediaType: application/vnd.docker.distribution.manifest.v2+json
|
||||
Platform: linux/386
|
||||
|
||||
Name: docker.io/library/alpine:latest@sha256:2042a492bcdd847a01cd7f119cd48caa180da696ed2aedd085001a78664407d6
|
||||
MediaType: application/vnd.docker.distribution.manifest.v2+json
|
||||
Platform: linux/ppc64le
|
||||
|
||||
Name: docker.io/library/alpine:latest@sha256:49e322ab6690e73a4909f787bcbdb873631264ff4a108cddfd9f9c249ba1d58e
|
||||
MediaType: application/vnd.docker.distribution.manifest.v2+json
|
||||
Platform: linux/s390x
|
||||
...
|
||||
```
|
||||
|
||||
## Examples
|
||||
@@ -64,569 +47,7 @@ Manifests:
|
||||
|
||||
Same as [`buildx --builder`](buildx.md#builder).
|
||||
|
||||
### <a name="format"></a> Format the output (--format)
|
||||
|
||||
Format the output using the given Go template. Defaults to `{{.Manifest}}` if
|
||||
unset. Following fields are available:
|
||||
|
||||
* `.Name`: provides the reference of the image
|
||||
* `.Manifest`: provides the manifest or manifest list
|
||||
* `.Image`: provides the image config
|
||||
* `.BuildInfo`: provides [build info from image config](https://github.com/moby/buildkit/blob/master/docs/build-repro.md#image-config)
|
||||
|
||||
#### `.Name`
|
||||
|
||||
```console
|
||||
$ docker buildx imagetools inspect alpine --format "{{.Name}}"
|
||||
Name: docker.io/library/alpine:latest
|
||||
```
|
||||
|
||||
#### `.Manifest`
|
||||
|
||||
```console
|
||||
$ docker buildx imagetools inspect crazymax/loop --format "{{.Manifest}}"
|
||||
Name: docker.io/crazymax/loop:latest
|
||||
MediaType: application/vnd.docker.distribution.manifest.v2+json
|
||||
Digest: sha256:08602e7340970e92bde5e0a2e887c1fde4d9ae753d1e05efb4c8ef3b609f97f1
|
||||
```
|
||||
|
||||
```console
|
||||
$ docker buildx imagetools inspect moby/buildkit:master --format "{{.Manifest}}"
|
||||
Name: docker.io/moby/buildkit:master
|
||||
MediaType: application/vnd.docker.distribution.manifest.list.v2+json
|
||||
Digest: sha256:3183f7ce54d1efb44c34b84f428ae10aaf141e553c6b52a7ff44cc7083a05a66
|
||||
|
||||
Manifests:
|
||||
Name: docker.io/moby/buildkit:master@sha256:667d28c9fb33820ce686887a717a148e89fa77f9097f9352996bbcce99d352b1
|
||||
MediaType: application/vnd.docker.distribution.manifest.v2+json
|
||||
Platform: linux/amd64
|
||||
|
||||
Name: docker.io/moby/buildkit:master@sha256:71789527b64ab3d7b3de01d364b449cd7f7a3da758218fbf73b9c9aae05a6775
|
||||
MediaType: application/vnd.docker.distribution.manifest.v2+json
|
||||
Platform: linux/arm/v7
|
||||
|
||||
Name: docker.io/moby/buildkit:master@sha256:fb64667e1ce6ab0d05478f3a8402af07b27737598dcf9a510fb1d792b13a66be
|
||||
MediaType: application/vnd.docker.distribution.manifest.v2+json
|
||||
Platform: linux/arm64
|
||||
|
||||
Name: docker.io/moby/buildkit:master@sha256:1c3ddf95a0788e23f72f25800c05abc4458946685e2b66788c3d978cde6da92b
|
||||
MediaType: application/vnd.docker.distribution.manifest.v2+json
|
||||
Platform: linux/s390x
|
||||
|
||||
Name: docker.io/moby/buildkit:master@sha256:05bcde6d460a284e5bc88026cd070277e8380355de3126cbc8fe8a452708c6b1
|
||||
MediaType: application/vnd.docker.distribution.manifest.v2+json
|
||||
Platform: linux/ppc64le
|
||||
|
||||
Name: docker.io/moby/buildkit:master@sha256:c04c57765304ab84f4f9807fff3e11605c3a60e16435c734b02c723680f6bd6e
|
||||
MediaType: application/vnd.docker.distribution.manifest.v2+json
|
||||
Platform: linux/riscv64
|
||||
```
|
||||
|
||||
#### `.BuildInfo`
|
||||
|
||||
```console
|
||||
$ docker buildx imagetools inspect crazymax/buildx:buildinfo --format "{{.BuildInfo}}"
|
||||
Name: docker.io/crazymax/buildx:buildinfo
|
||||
Frontend: dockerfile.v0
|
||||
Attrs:
|
||||
filename: Dockerfile
|
||||
source: docker/dockerfile-upstream:master-labs
|
||||
build-arg:bar: foo
|
||||
build-arg:foo: bar
|
||||
Sources:
|
||||
Type: docker-image
|
||||
Ref: docker.io/docker/buildx-bin:0.6.1@sha256:a652ced4a4141977c7daaed0a074dcd9844a78d7d2615465b12f433ae6dd29f0
|
||||
Pin: sha256:a652ced4a4141977c7daaed0a074dcd9844a78d7d2615465b12f433ae6dd29f0
|
||||
|
||||
Type: docker-image
|
||||
Ref: docker.io/library/alpine:3.13
|
||||
Pin: sha256:026f721af4cf2843e07bba648e158fb35ecc876d822130633cc49f707f0fc88c
|
||||
|
||||
Type: docker-image
|
||||
Ref: docker.io/moby/buildkit:v0.9.0
|
||||
Pin: sha256:8dc668e7f66db1c044aadbed306020743516a94848793e0f81f94a087ee78cab
|
||||
|
||||
Type: docker-image
|
||||
Ref: docker.io/tonistiigi/xx@sha256:21a61be4744f6531cb5f33b0e6f40ede41fa3a1b8c82d5946178f80cc84bfc04
|
||||
Pin: sha256:21a61be4744f6531cb5f33b0e6f40ede41fa3a1b8c82d5946178f80cc84bfc04
|
||||
|
||||
Type: http
|
||||
Ref: https://raw.githubusercontent.com/moby/moby/master/README.md
|
||||
Pin: sha256:419455202b0ef97e480d7f8199b26a721a417818bc0e2d106975f74323f25e6c
|
||||
```
|
||||
|
||||
#### JSON output
|
||||
|
||||
A `json` go template func is also available if you want to render fields as
|
||||
JSON bytes:
|
||||
|
||||
```console
|
||||
$ docker buildx imagetools inspect crazymax/loop --format "{{json .Manifest}}"
|
||||
```
|
||||
```json
|
||||
{
|
||||
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
|
||||
"digest": "sha256:08602e7340970e92bde5e0a2e887c1fde4d9ae753d1e05efb4c8ef3b609f97f1",
|
||||
"size": 949
|
||||
}
|
||||
```
|
||||
|
||||
```console
|
||||
$ docker buildx imagetools inspect moby/buildkit:master --format "{{json .Manifest}}"
|
||||
```
|
||||
```json
|
||||
{
|
||||
"schemaVersion": 2,
|
||||
"mediaType": "application/vnd.docker.distribution.manifest.list.v2+json",
|
||||
"digest": "sha256:79d97f205e2799d99a3a8ae2a1ef17acb331e11784262c3faada847dc6972c52",
|
||||
"size": 2010,
|
||||
"manifests": [
|
||||
{
|
||||
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
|
||||
"digest": "sha256:bd1e78f06de26610fadf4eb9d04b1a45a545799d6342701726e952cc0c11c912",
|
||||
"size": 1158,
|
||||
"platform": {
|
||||
"architecture": "amd64",
|
||||
"os": "linux"
|
||||
}
|
||||
},
|
||||
{
|
||||
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
|
||||
"digest": "sha256:d37dcced63ec0965824fca644f0ac9efad8569434ec15b4c83adfcb3dcfc743b",
|
||||
"size": 1158,
|
||||
"platform": {
|
||||
"architecture": "arm",
|
||||
"os": "linux",
|
||||
"variant": "v7"
|
||||
}
|
||||
},
|
||||
{
|
||||
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
|
||||
"digest": "sha256:ce142eb2255e6af46f2809e159fd03081697c7605a3de03b9cbe9a52ddb244bf",
|
||||
"size": 1158,
|
||||
"platform": {
|
||||
"architecture": "arm64",
|
||||
"os": "linux"
|
||||
}
|
||||
},
|
||||
{
|
||||
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
|
||||
"digest": "sha256:f59bfb5062fff76ce464bfa4e25ebaaaac887d6818238e119d68613c456d360c",
|
||||
"size": 1158,
|
||||
"platform": {
|
||||
"architecture": "s390x",
|
||||
"os": "linux"
|
||||
}
|
||||
},
|
||||
{
|
||||
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
|
||||
"digest": "sha256:cc96426e0c50a78105d5637d31356db5dd6ec594f21b24276e534a32da09645c",
|
||||
"size": 1159,
|
||||
"platform": {
|
||||
"architecture": "ppc64le",
|
||||
"os": "linux"
|
||||
}
|
||||
},
|
||||
{
|
||||
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
|
||||
"digest": "sha256:39f9c1e2878e6c333acb23187d6b205ce82ed934c60da326cb2c698192631478",
|
||||
"size": 1158,
|
||||
"platform": {
|
||||
"architecture": "riscv64",
|
||||
"os": "linux"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
```console
|
||||
$ docker buildx imagetools inspect crazymax/buildx:buildinfo --format "{{json .BuildInfo}}"
|
||||
```
|
||||
```json
|
||||
{
|
||||
"frontend": "dockerfile.v0",
|
||||
"attrs": {
|
||||
"build-arg:bar": "foo",
|
||||
"build-arg:foo": "bar",
|
||||
"filename": "Dockerfile",
|
||||
"source": "crazymax/dockerfile:buildattrs"
|
||||
},
|
||||
"sources": [
|
||||
{
|
||||
"type": "docker-image",
|
||||
"ref": "docker.io/docker/buildx-bin:0.6.1@sha256:a652ced4a4141977c7daaed0a074dcd9844a78d7d2615465b12f433ae6dd29f0",
|
||||
"pin": "sha256:a652ced4a4141977c7daaed0a074dcd9844a78d7d2615465b12f433ae6dd29f0"
|
||||
},
|
||||
{
|
||||
"type": "docker-image",
|
||||
"ref": "docker.io/library/alpine:3.13@sha256:026f721af4cf2843e07bba648e158fb35ecc876d822130633cc49f707f0fc88c",
|
||||
"pin": "sha256:026f721af4cf2843e07bba648e158fb35ecc876d822130633cc49f707f0fc88c"
|
||||
},
|
||||
{
|
||||
"type": "docker-image",
|
||||
"ref": "docker.io/moby/buildkit:v0.9.0@sha256:8dc668e7f66db1c044aadbed306020743516a94848793e0f81f94a087ee78cab",
|
||||
"pin": "sha256:8dc668e7f66db1c044aadbed306020743516a94848793e0f81f94a087ee78cab"
|
||||
},
|
||||
{
|
||||
"type": "docker-image",
|
||||
"ref": "docker.io/tonistiigi/xx@sha256:21a61be4744f6531cb5f33b0e6f40ede41fa3a1b8c82d5946178f80cc84bfc04",
|
||||
"pin": "sha256:21a61be4744f6531cb5f33b0e6f40ede41fa3a1b8c82d5946178f80cc84bfc04"
|
||||
},
|
||||
{
|
||||
"type": "http",
|
||||
"ref": "https://raw.githubusercontent.com/moby/moby/master/README.md",
|
||||
"pin": "sha256:419455202b0ef97e480d7f8199b26a721a417818bc0e2d106975f74323f25e6c"
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
```console
|
||||
$ docker buildx imagetools inspect crazymax/buildx:buildinfo --format "{{json .}}"
|
||||
```
|
||||
```json
|
||||
{
|
||||
"name": "crazymax/buildx:buildinfo",
|
||||
"manifest": {
|
||||
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
|
||||
"digest": "sha256:899d2c7acbc124d406820857bb51d9089717bbe4e22b97eb4bc5789e99f09f83",
|
||||
"size": 2628
|
||||
},
|
||||
"image": {
|
||||
"created": "2022-02-24T12:27:43.627154558Z",
|
||||
"architecture": "amd64",
|
||||
"os": "linux",
|
||||
"config": {
|
||||
"Env": [
|
||||
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin",
|
||||
"DOCKER_TLS_CERTDIR=/certs",
|
||||
"DOCKER_CLI_EXPERIMENTAL=enabled"
|
||||
],
|
||||
"Entrypoint": [
|
||||
"docker-entrypoint.sh"
|
||||
],
|
||||
"Cmd": [
|
||||
"sh"
|
||||
]
|
||||
},
|
||||
"rootfs": {
|
||||
"type": "layers",
|
||||
"diff_ids": [
|
||||
"sha256:7fcb75871b2101082203959c83514ac8a9f4ecfee77a0fe9aa73bbe56afdf1b4",
|
||||
"sha256:d3c0b963ff5684160641f936d6a4aa14efc8ff27b6edac255c07f2d03ff92e82",
|
||||
"sha256:3f8d78f13fa9b1f35d3bc3f1351d03a027c38018c37baca73f93eecdea17f244",
|
||||
"sha256:8e6eb1137b182ae0c3f5d40ca46341fda2eaeeeb5fa516a9a2bf96171238e2e0",
|
||||
"sha256:fde4c869a56b54dd76d7352ddaa813fd96202bda30b9dceb2c2f2ad22fa2e6ce",
|
||||
"sha256:52025823edb284321af7846419899234b3c66219bf06061692b709875ed0760f",
|
||||
"sha256:50adb5982dbf6126c7cf279ac3181d1e39fc9116b610b947a3dadae6f7e7c5bc",
|
||||
"sha256:9801c319e1c66c5d295e78b2d3e80547e73c7e3c63a4b71e97c8ca357224af24",
|
||||
"sha256:dfbfac44d5d228c49b42194c8a2f470abd6916d072f612a6fb14318e94fde8ae",
|
||||
"sha256:3dfb74e19dedf61568b917c19b0fd3ee4580870027ca0b6054baf239855d1322",
|
||||
"sha256:b182e707c23e4f19be73f9022a99d2d1ca7bf1ca8f280d40e4d1c10a6f51550e"
|
||||
]
|
||||
},
|
||||
"history": [
|
||||
{
|
||||
"created": "2021-11-12T17:19:58.698676655Z",
|
||||
"created_by": "/bin/sh -c #(nop) ADD file:5a707b9d6cb5fff532e4c2141bc35707593f21da5528c9e71ae2ddb6ba4a4eb6 in / "
|
||||
},
|
||||
{
|
||||
"created": "2021-11-12T17:19:58.948920855Z",
|
||||
"created_by": "/bin/sh -c #(nop) CMD [\"/bin/sh\"]",
|
||||
"empty_layer": true
|
||||
},
|
||||
{
|
||||
"created": "2022-02-24T12:27:38.285594601Z",
|
||||
"created_by": "RUN /bin/sh -c apk --update --no-cache add bash ca-certificates openssh-client \u0026\u0026 rm -rf /tmp/* /var/cache/apk/* # buildkit",
|
||||
"comment": "buildkit.dockerfile.v0"
|
||||
},
|
||||
{
|
||||
"created": "2022-02-24T12:27:41.061874167Z",
|
||||
"created_by": "COPY /opt/docker/ /usr/local/bin/ # buildkit",
|
||||
"comment": "buildkit.dockerfile.v0"
|
||||
},
|
||||
{
|
||||
"created": "2022-02-24T12:27:41.174098947Z",
|
||||
"created_by": "COPY /usr/bin/buildctl /usr/local/bin/buildctl # buildkit",
|
||||
"comment": "buildkit.dockerfile.v0"
|
||||
},
|
||||
{
|
||||
"created": "2022-02-24T12:27:41.320343683Z",
|
||||
"created_by": "COPY /usr/bin/buildkit* /usr/local/bin/ # buildkit",
|
||||
"comment": "buildkit.dockerfile.v0"
|
||||
},
|
||||
{
|
||||
"created": "2022-02-24T12:27:41.447149933Z",
|
||||
"created_by": "COPY /buildx /usr/libexec/docker/cli-plugins/docker-buildx # buildkit",
|
||||
"comment": "buildkit.dockerfile.v0"
|
||||
},
|
||||
{
|
||||
"created": "2022-02-24T12:27:43.057722191Z",
|
||||
"created_by": "COPY /opt/docker-compose /usr/libexec/docker/cli-plugins/docker-compose # buildkit",
|
||||
"comment": "buildkit.dockerfile.v0"
|
||||
},
|
||||
{
|
||||
"created": "2022-02-24T12:27:43.145224134Z",
|
||||
"created_by": "ADD https://raw.githubusercontent.com/moby/moby/master/README.md / # buildkit",
|
||||
"comment": "buildkit.dockerfile.v0"
|
||||
},
|
||||
{
|
||||
"created": "2022-02-24T12:27:43.422212427Z",
|
||||
"created_by": "ENV DOCKER_TLS_CERTDIR=/certs",
|
||||
"comment": "buildkit.dockerfile.v0",
|
||||
"empty_layer": true
|
||||
},
|
||||
{
|
||||
"created": "2022-02-24T12:27:43.422212427Z",
|
||||
"created_by": "ENV DOCKER_CLI_EXPERIMENTAL=enabled",
|
||||
"comment": "buildkit.dockerfile.v0",
|
||||
"empty_layer": true
|
||||
},
|
||||
{
|
||||
"created": "2022-02-24T12:27:43.422212427Z",
|
||||
"created_by": "RUN /bin/sh -c docker --version \u0026\u0026 buildkitd --version \u0026\u0026 buildctl --version \u0026\u0026 docker buildx version \u0026\u0026 docker compose version \u0026\u0026 mkdir /certs /certs/client \u0026\u0026 chmod 1777 /certs /certs/client # buildkit",
|
||||
"comment": "buildkit.dockerfile.v0"
|
||||
},
|
||||
{
|
||||
"created": "2022-02-24T12:27:43.514320155Z",
|
||||
"created_by": "COPY rootfs/modprobe.sh /usr/local/bin/modprobe # buildkit",
|
||||
"comment": "buildkit.dockerfile.v0"
|
||||
},
|
||||
{
|
||||
"created": "2022-02-24T12:27:43.627154558Z",
|
||||
"created_by": "COPY rootfs/docker-entrypoint.sh /usr/local/bin/ # buildkit",
|
||||
"comment": "buildkit.dockerfile.v0"
|
||||
},
|
||||
{
|
||||
"created": "2022-02-24T12:27:43.627154558Z",
|
||||
"created_by": "ENTRYPOINT [\"docker-entrypoint.sh\"]",
|
||||
"comment": "buildkit.dockerfile.v0",
|
||||
"empty_layer": true
|
||||
},
|
||||
{
|
||||
"created": "2022-02-24T12:27:43.627154558Z",
|
||||
"created_by": "CMD [\"sh\"]",
|
||||
"comment": "buildkit.dockerfile.v0",
|
||||
"empty_layer": true
|
||||
}
|
||||
]
|
||||
},
|
||||
"buildinfo": {
|
||||
"frontend": "dockerfile.v0",
|
||||
"attrs": {
|
||||
"build-arg:bar": "foo",
|
||||
"build-arg:foo": "bar",
|
||||
"filename": "Dockerfile",
|
||||
"source": "docker/dockerfile-upstream:master-labs"
|
||||
},
|
||||
"sources": [
|
||||
{
|
||||
"type": "docker-image",
|
||||
"ref": "docker.io/docker/buildx-bin:0.6.1@sha256:a652ced4a4141977c7daaed0a074dcd9844a78d7d2615465b12f433ae6dd29f0",
|
||||
"pin": "sha256:a652ced4a4141977c7daaed0a074dcd9844a78d7d2615465b12f433ae6dd29f0"
|
||||
},
|
||||
{
|
||||
"type": "docker-image",
|
||||
"ref": "docker.io/library/alpine:3.13",
|
||||
"pin": "sha256:026f721af4cf2843e07bba648e158fb35ecc876d822130633cc49f707f0fc88c"
|
||||
},
|
||||
{
|
||||
"type": "docker-image",
|
||||
"ref": "docker.io/moby/buildkit:v0.9.0",
|
||||
"pin": "sha256:8dc668e7f66db1c044aadbed306020743516a94848793e0f81f94a087ee78cab"
|
||||
},
|
||||
{
|
||||
"type": "docker-image",
|
||||
"ref": "docker.io/tonistiigi/xx@sha256:21a61be4744f6531cb5f33b0e6f40ede41fa3a1b8c82d5946178f80cc84bfc04",
|
||||
"pin": "sha256:21a61be4744f6531cb5f33b0e6f40ede41fa3a1b8c82d5946178f80cc84bfc04"
|
||||
},
|
||||
{
|
||||
"type": "http",
|
||||
"ref": "https://raw.githubusercontent.com/moby/moby/master/README.md",
|
||||
"pin": "sha256:419455202b0ef97e480d7f8199b26a721a417818bc0e2d106975f74323f25e6c"
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
#### Multi-platform
|
||||
|
||||
Multi-platform images are supported for `.Image` and `.BuildInfo` fields. If
|
||||
you want to pick up a specific platform, you can specify it using the `index`
|
||||
go template function:
|
||||
|
||||
```console
|
||||
$ docker buildx imagetools inspect --format '{{json (index .Image "linux/s390x")}}' moby/buildkit:master
|
||||
```
|
||||
```json
|
||||
{
|
||||
"created": "2022-02-25T17:13:27.89891722Z",
|
||||
"architecture": "s390x",
|
||||
"os": "linux",
|
||||
"config": {
|
||||
"Env": [
|
||||
"PATH=/usr/local/sbin:/usr/local/bin:/usr/sbin:/usr/bin:/sbin:/bin"
|
||||
],
|
||||
"Entrypoint": [
|
||||
"buildkitd"
|
||||
],
|
||||
"Volumes": {
|
||||
"/var/lib/buildkit": {}
|
||||
}
|
||||
},
|
||||
"rootfs": {
|
||||
"type": "layers",
|
||||
"diff_ids": [
|
||||
"sha256:41048e32d0684349141cf05f629c5fc3c5915d1f3426b66dbb8953a540e01e1e",
|
||||
"sha256:2651209b9208fff6c053bc3c17353cb07874e50f1a9bc96d6afd03aef63de76a",
|
||||
"sha256:6741ed7e73039d853fa8902246a4c7e8bf9dd09652fd1b08251bc5f9e8876a7f",
|
||||
"sha256:92ac046adeeb65c86ae3f0b458dee04ad4a462e417661c04d77642c66494f69b"
|
||||
]
|
||||
},
|
||||
"history": [
|
||||
{
|
||||
"created": "2021-11-24T20:41:23.709681315Z",
|
||||
"created_by": "/bin/sh -c #(nop) ADD file:cd24c711a2ef431b3ff94f9a02bfc42f159bc60de1d0eceecafea4e8af02441d in / "
|
||||
},
|
||||
{
|
||||
"created": "2021-11-24T20:41:23.94211262Z",
|
||||
"created_by": "/bin/sh -c #(nop) CMD [\"/bin/sh\"]",
|
||||
"empty_layer": true
|
||||
},
|
||||
{
|
||||
"created": "2022-01-26T18:15:21.449825391Z",
|
||||
"created_by": "RUN /bin/sh -c apk add --no-cache fuse3 git openssh pigz xz \u0026\u0026 ln -s fusermount3 /usr/bin/fusermount # buildkit",
|
||||
"comment": "buildkit.dockerfile.v0"
|
||||
},
|
||||
{
|
||||
"created": "2022-02-24T00:34:00.924540012Z",
|
||||
"created_by": "COPY examples/buildctl-daemonless/buildctl-daemonless.sh /usr/bin/ # buildkit",
|
||||
"comment": "buildkit.dockerfile.v0"
|
||||
},
|
||||
{
|
||||
"created": "2022-02-25T17:13:27.89891722Z",
|
||||
"created_by": "VOLUME [/var/lib/buildkit]",
|
||||
"comment": "buildkit.dockerfile.v0",
|
||||
"empty_layer": true
|
||||
},
|
||||
{
|
||||
"created": "2022-02-25T17:13:27.89891722Z",
|
||||
"created_by": "COPY / /usr/bin/ # buildkit",
|
||||
"comment": "buildkit.dockerfile.v0"
|
||||
},
|
||||
{
|
||||
"created": "2022-02-25T17:13:27.89891722Z",
|
||||
"created_by": "ENTRYPOINT [\"buildkitd\"]",
|
||||
"comment": "buildkit.dockerfile.v0",
|
||||
"empty_layer": true
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
### <a name="raw"></a> Show original, unformatted JSON manifest (--raw)
|
||||
|
||||
Use the `--raw` option to print the unformatted JSON manifest bytes.
|
||||
|
||||
> `jq` is used here to get a better rendering of the output result.
|
||||
|
||||
```console
|
||||
$ docker buildx imagetools inspect --raw crazymax/loop | jq
|
||||
```
|
||||
```json
|
||||
{
|
||||
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
|
||||
"schemaVersion": 2,
|
||||
"config": {
|
||||
"mediaType": "application/vnd.docker.container.image.v1+json",
|
||||
"digest": "sha256:7ace7d324e79b360b2db8b820d83081863d96d22e734cdf297a8e7fd83f6ceb3",
|
||||
"size": 2298
|
||||
},
|
||||
"layers": [
|
||||
{
|
||||
"mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
|
||||
"digest": "sha256:5843afab387455b37944e709ee8c78d7520df80f8d01cf7f861aae63beeddb6b",
|
||||
"size": 2811478
|
||||
},
|
||||
{
|
||||
"mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
|
||||
"digest": "sha256:726d3732a87e1c430d67e8969de6b222a889d45e045ebae1a008a37ba38f3b1f",
|
||||
"size": 1776812
|
||||
},
|
||||
{
|
||||
"mediaType": "application/vnd.docker.image.rootfs.diff.tar.gzip",
|
||||
"digest": "sha256:5d7cf9b33148a8f220c84f27dd2cfae46aca019a3ea3fbf7274f6d6dbfae8f3b",
|
||||
"size": 382855
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
```console
|
||||
$ docker buildx imagetools inspect --raw moby/buildkit:master | jq
|
||||
```
|
||||
```json
|
||||
{
|
||||
"mediaType": "application/vnd.docker.distribution.manifest.list.v2+json",
|
||||
"schemaVersion": 2,
|
||||
"manifests": [
|
||||
{
|
||||
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
|
||||
"digest": "sha256:667d28c9fb33820ce686887a717a148e89fa77f9097f9352996bbcce99d352b1",
|
||||
"size": 1158,
|
||||
"platform": {
|
||||
"architecture": "amd64",
|
||||
"os": "linux"
|
||||
}
|
||||
},
|
||||
{
|
||||
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
|
||||
"digest": "sha256:71789527b64ab3d7b3de01d364b449cd7f7a3da758218fbf73b9c9aae05a6775",
|
||||
"size": 1158,
|
||||
"platform": {
|
||||
"architecture": "arm",
|
||||
"os": "linux",
|
||||
"variant": "v7"
|
||||
}
|
||||
},
|
||||
{
|
||||
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
|
||||
"digest": "sha256:fb64667e1ce6ab0d05478f3a8402af07b27737598dcf9a510fb1d792b13a66be",
|
||||
"size": 1158,
|
||||
"platform": {
|
||||
"architecture": "arm64",
|
||||
"os": "linux"
|
||||
}
|
||||
},
|
||||
{
|
||||
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
|
||||
"digest": "sha256:1c3ddf95a0788e23f72f25800c05abc4458946685e2b66788c3d978cde6da92b",
|
||||
"size": 1158,
|
||||
"platform": {
|
||||
"architecture": "s390x",
|
||||
"os": "linux"
|
||||
}
|
||||
},
|
||||
{
|
||||
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
|
||||
"digest": "sha256:05bcde6d460a284e5bc88026cd070277e8380355de3126cbc8fe8a452708c6b1",
|
||||
"size": 1159,
|
||||
"platform": {
|
||||
"architecture": "ppc64le",
|
||||
"os": "linux"
|
||||
}
|
||||
},
|
||||
{
|
||||
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
|
||||
"digest": "sha256:c04c57765304ab84f4f9807fff3e11605c3a60e16435c734b02c723680f6bd6e",
|
||||
"size": 1158,
|
||||
"platform": {
|
||||
"architecture": "riscv64",
|
||||
"os": "linux"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
Use the `--raw` option to print the original JSON bytes instead of the formatted
|
||||
output.
|
||||
|
@@ -9,10 +9,10 @@ Inspect current builder instance
|
||||
|
||||
### Options
|
||||
|
||||
| Name | Type | Default | Description |
|
||||
| --- | --- | --- | --- |
|
||||
| [`--bootstrap`](#bootstrap) | | | Ensure builder has booted before inspecting |
|
||||
| [`--builder`](#builder) | `string` | | Override the configured builder instance |
|
||||
| Name | Description |
|
||||
| --- | --- |
|
||||
| [`--bootstrap`](#bootstrap) | Ensure builder has booted before inspecting |
|
||||
| [`--builder string`](#builder) | Override the configured builder instance |
|
||||
|
||||
|
||||
<!---MARKER_GEN_END-->
|
||||
|
@@ -14,17 +14,18 @@ List builder instances
|
||||
|
||||
Lists all builder instances and the nodes for each instance
|
||||
|
||||
**Example**
|
||||
|
||||
```console
|
||||
$ docker buildx ls
|
||||
|
||||
NAME/NODE DRIVER/ENDPOINT STATUS PLATFORMS
|
||||
elated_tesla * docker-container
|
||||
elated_tesla0 unix:///var/run/docker.sock running linux/amd64
|
||||
elated_tesla1 ssh://ubuntu@1.2.3.4 running linux/arm64*, linux/arm/v7, linux/arm/v6
|
||||
elated_tesla1 ssh://ubuntu@1.2.3.4 running linux/arm64, linux/arm/v7, linux/arm/v6
|
||||
default docker
|
||||
default default running linux/amd64
|
||||
```
|
||||
|
||||
Each builder has one or more nodes associated with it. The current builder's
|
||||
name is marked with a `*` in `NAME/NODE` and explicit node to build against for
|
||||
the target platform marked with a `*` in the `PLATFORMS` column.
|
||||
name is marked with a `*`.
|
||||
|
@@ -9,14 +9,14 @@ Remove build cache
|
||||
|
||||
### Options
|
||||
|
||||
| Name | Type | Default | Description |
|
||||
| --- | --- | --- | --- |
|
||||
| `-a`, `--all` | | | Remove all unused images, not just dangling ones |
|
||||
| [`--builder`](#builder) | `string` | | Override the configured builder instance |
|
||||
| `--filter` | `filter` | | Provide filter values (e.g., `until=24h`) |
|
||||
| `-f`, `--force` | | | Do not prompt for confirmation |
|
||||
| `--keep-storage` | `bytes` | `0` | Amount of disk space to keep for cache |
|
||||
| `--verbose` | | | Provide a more verbose output |
|
||||
| Name | Description |
|
||||
| --- | --- |
|
||||
| `-a`, `--all` | Remove all unused images, not just dangling ones |
|
||||
| [`--builder string`](#builder) | Override the configured builder instance |
|
||||
| `--filter filter` | Provide filter values (e.g., `until=24h`) |
|
||||
| `-f`, `--force` | Do not prompt for confirmation |
|
||||
| `--keep-storage bytes` | Amount of disk space to keep for cache |
|
||||
| `--verbose` | Provide a more verbose output |
|
||||
|
||||
|
||||
<!---MARKER_GEN_END-->
|
||||
|
@@ -9,13 +9,10 @@ Remove a builder instance
|
||||
|
||||
### Options
|
||||
|
||||
| Name | Type | Default | Description |
|
||||
| --- | --- | --- | --- |
|
||||
| [`--all-inactive`](#all-inactive) | | | Remove all inactive builders |
|
||||
| [`--builder`](#builder) | `string` | | Override the configured builder instance |
|
||||
| [`-f`](#force), [`--force`](#force) | | | Do not prompt for confirmation |
|
||||
| [`--keep-daemon`](#keep-daemon) | | | Keep the buildkitd daemon running |
|
||||
| [`--keep-state`](#keep-state) | | | Keep BuildKit state |
|
||||
| Name | Description |
|
||||
| --- | --- |
|
||||
| [`--builder string`](#builder) | Override the configured builder instance |
|
||||
| [`--keep-state`](#keep-state) | Keep BuildKit state |
|
||||
|
||||
|
||||
<!---MARKER_GEN_END-->
|
||||
@@ -27,32 +24,10 @@ default builder.
|
||||
|
||||
## Examples
|
||||
|
||||
### <a name="all-inactive"></a> Remove all inactive builders (--all-inactive)
|
||||
|
||||
Remove builders that are not in running state.
|
||||
|
||||
```console
|
||||
$ docker buildx rm --all-inactive
|
||||
WARNING! This will remove all builders that are not in running state. Are you sure you want to continue? [y/N] y
|
||||
```
|
||||
|
||||
### <a name="builder"></a> Override the configured builder instance (--builder)
|
||||
|
||||
Same as [`buildx --builder`](buildx.md#builder).
|
||||
|
||||
### <a name="force"></a> Do not prompt for confirmation (--force)
|
||||
|
||||
Do not prompt for confirmation before removing inactive builders.
|
||||
|
||||
```console
|
||||
$ docker buildx rm --all-inactive --force
|
||||
```
|
||||
|
||||
### <a name="keep-daemon"></a> Keep the buildkitd daemon running (--keep-daemon)
|
||||
|
||||
Keep the buildkitd daemon running after the buildx context is removed. This is useful when you manage buildkitd daemons and buildx contexts independently.
|
||||
Currently, only supported by the [`docker-container` and `kubernetes` drivers](buildx_create.md#driver).
|
||||
|
||||
### <a name="keep-state"></a> Keep BuildKit state (--keep-state)
|
||||
|
||||
Keep BuildKit state, so it can be reused by a new builder with the same name.
|
||||
|
@@ -9,9 +9,9 @@ Stop builder instance
|
||||
|
||||
### Options
|
||||
|
||||
| Name | Type | Default | Description |
|
||||
| --- | --- | --- | --- |
|
||||
| [`--builder`](#builder) | `string` | | Override the configured builder instance |
|
||||
| Name | Description |
|
||||
| --- | --- |
|
||||
| [`--builder string`](#builder) | Override the configured builder instance |
|
||||
|
||||
|
||||
<!---MARKER_GEN_END-->
|
||||
|
@@ -9,11 +9,11 @@ Set the current builder instance
|
||||
|
||||
### Options
|
||||
|
||||
| Name | Type | Default | Description |
|
||||
| --- | --- | --- | --- |
|
||||
| [`--builder`](#builder) | `string` | | Override the configured builder instance |
|
||||
| `--default` | | | Set builder as default for current context |
|
||||
| `--global` | | | Builder persists context changes |
|
||||
| Name | Description |
|
||||
| --- | --- |
|
||||
| [`--builder string`](#builder) | Override the configured builder instance |
|
||||
| `--default` | Set builder as default for current context |
|
||||
| `--global` | Builder persists context changes |
|
||||
|
||||
|
||||
<!---MARKER_GEN_END-->
|
||||
|
@@ -10,9 +10,10 @@ Show buildx version information
|
||||
|
||||
<!---MARKER_GEN_END-->
|
||||
|
||||
## Description
|
||||
## Examples
|
||||
|
||||
### View version information
|
||||
|
||||
View version information
|
||||
|
||||
```console
|
||||
$ docker buildx version
|
||||
|
@@ -298,7 +298,7 @@ func (d *Driver) Stop(ctx context.Context, force bool) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Driver) Rm(ctx context.Context, force, rmVolume, rmDaemon bool) error {
|
||||
func (d *Driver) Rm(ctx context.Context, force bool, rmVolume bool) error {
|
||||
info, err := d.Info(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -308,22 +308,20 @@ func (d *Driver) Rm(ctx context.Context, force, rmVolume, rmDaemon bool) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if rmDaemon {
|
||||
if err := d.DockerAPI.ContainerRemove(ctx, d.Name, dockertypes.ContainerRemoveOptions{
|
||||
RemoveVolumes: true,
|
||||
Force: force,
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, v := range container.Mounts {
|
||||
if v.Name != d.Name+volumeStateSuffix {
|
||||
continue
|
||||
}
|
||||
if err := d.DockerAPI.ContainerRemove(ctx, d.Name, dockertypes.ContainerRemoveOptions{
|
||||
RemoveVolumes: true,
|
||||
Force: force,
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
for _, v := range container.Mounts {
|
||||
if v.Name == d.Name+volumeStateSuffix {
|
||||
if rmVolume {
|
||||
return d.DockerAPI.VolumeRemove(ctx, d.Name+volumeStateSuffix, false)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@@ -33,7 +33,7 @@ func (d *Driver) Stop(ctx context.Context, force bool) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Driver) Rm(ctx context.Context, force, rmVolume, rmDaemon bool) error {
|
||||
func (d *Driver) Rm(ctx context.Context, force bool, rmVolume bool) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@@ -54,7 +54,7 @@ type Driver interface {
|
||||
Bootstrap(context.Context, progress.Logger) error
|
||||
Info(context.Context) (*Info, error)
|
||||
Stop(ctx context.Context, force bool) error
|
||||
Rm(ctx context.Context, force, rmVolume, rmDaemon bool) error
|
||||
Rm(ctx context.Context, force bool, rmVolume bool) error
|
||||
Client(ctx context.Context) (*client.Client, error)
|
||||
Features() map[Feature]bool
|
||||
IsMobyDriver() bool
|
||||
|
@@ -1,225 +0,0 @@
|
||||
package context
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/cli/cli/context"
|
||||
"github.com/docker/cli/cli/context/store"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
|
||||
)
|
||||
|
||||
func testEndpoint(server, defaultNamespace string, ca, cert, key []byte, skipTLSVerify bool) Endpoint {
|
||||
var tlsData *context.TLSData
|
||||
if ca != nil || cert != nil || key != nil {
|
||||
tlsData = &context.TLSData{
|
||||
CA: ca,
|
||||
Cert: cert,
|
||||
Key: key,
|
||||
}
|
||||
}
|
||||
return Endpoint{
|
||||
EndpointMeta: EndpointMeta{
|
||||
EndpointMetaBase: context.EndpointMetaBase{
|
||||
Host: server,
|
||||
SkipTLSVerify: skipTLSVerify,
|
||||
},
|
||||
DefaultNamespace: defaultNamespace,
|
||||
},
|
||||
TLSData: tlsData,
|
||||
}
|
||||
}
|
||||
|
||||
var testStoreCfg = store.NewConfig(
|
||||
func() interface{} {
|
||||
return &map[string]interface{}{}
|
||||
},
|
||||
store.EndpointTypeGetter(KubernetesEndpoint, func() interface{} { return &EndpointMeta{} }),
|
||||
)
|
||||
|
||||
func TestSaveLoadContexts(t *testing.T) {
|
||||
storeDir, err := ioutil.TempDir("", "test-load-save-k8-context")
|
||||
require.NoError(t, err)
|
||||
defer os.RemoveAll(storeDir)
|
||||
store := store.New(storeDir, testStoreCfg)
|
||||
require.NoError(t, save(store, testEndpoint("https://test", "test", nil, nil, nil, false), "raw-notls"))
|
||||
require.NoError(t, save(store, testEndpoint("https://test", "test", nil, nil, nil, true), "raw-notls-skip"))
|
||||
require.NoError(t, save(store, testEndpoint("https://test", "test", []byte("ca"), []byte("cert"), []byte("key"), true), "raw-tls"))
|
||||
|
||||
kcFile, err := ioutil.TempFile(os.TempDir(), "test-load-save-k8-context")
|
||||
require.NoError(t, err)
|
||||
defer os.Remove(kcFile.Name())
|
||||
defer kcFile.Close()
|
||||
cfg := clientcmdapi.NewConfig()
|
||||
cfg.AuthInfos["user"] = clientcmdapi.NewAuthInfo()
|
||||
cfg.Contexts["context1"] = clientcmdapi.NewContext()
|
||||
cfg.Clusters["cluster1"] = clientcmdapi.NewCluster()
|
||||
cfg.Contexts["context2"] = clientcmdapi.NewContext()
|
||||
cfg.Clusters["cluster2"] = clientcmdapi.NewCluster()
|
||||
cfg.AuthInfos["user"].ClientCertificateData = []byte("cert")
|
||||
cfg.AuthInfos["user"].ClientKeyData = []byte("key")
|
||||
cfg.Clusters["cluster1"].Server = "https://server1"
|
||||
cfg.Clusters["cluster1"].InsecureSkipTLSVerify = true
|
||||
cfg.Clusters["cluster2"].Server = "https://server2"
|
||||
cfg.Clusters["cluster2"].CertificateAuthorityData = []byte("ca")
|
||||
cfg.Contexts["context1"].AuthInfo = "user"
|
||||
cfg.Contexts["context1"].Cluster = "cluster1"
|
||||
cfg.Contexts["context1"].Namespace = "namespace1"
|
||||
cfg.Contexts["context2"].AuthInfo = "user"
|
||||
cfg.Contexts["context2"].Cluster = "cluster2"
|
||||
cfg.Contexts["context2"].Namespace = "namespace2"
|
||||
cfg.CurrentContext = "context1"
|
||||
cfgData, err := clientcmd.Write(*cfg)
|
||||
require.NoError(t, err)
|
||||
_, err = kcFile.Write(cfgData)
|
||||
require.NoError(t, err)
|
||||
kcFile.Close()
|
||||
|
||||
epDefault, err := FromKubeConfig(kcFile.Name(), "", "")
|
||||
require.NoError(t, err)
|
||||
epContext2, err := FromKubeConfig(kcFile.Name(), "context2", "namespace-override")
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, save(store, epDefault, "embed-default-context"))
|
||||
require.NoError(t, save(store, epContext2, "embed-context2"))
|
||||
|
||||
rawNoTLSMeta, err := store.GetMetadata("raw-notls")
|
||||
require.NoError(t, err)
|
||||
rawNoTLSSkipMeta, err := store.GetMetadata("raw-notls-skip")
|
||||
require.NoError(t, err)
|
||||
rawTLSMeta, err := store.GetMetadata("raw-tls")
|
||||
require.NoError(t, err)
|
||||
embededDefaultMeta, err := store.GetMetadata("embed-default-context")
|
||||
require.NoError(t, err)
|
||||
embededContext2Meta, err := store.GetMetadata("embed-context2")
|
||||
require.NoError(t, err)
|
||||
|
||||
rawNoTLS := EndpointFromContext(rawNoTLSMeta)
|
||||
rawNoTLSSkip := EndpointFromContext(rawNoTLSSkipMeta)
|
||||
rawTLS := EndpointFromContext(rawTLSMeta)
|
||||
embededDefault := EndpointFromContext(embededDefaultMeta)
|
||||
embededContext2 := EndpointFromContext(embededContext2Meta)
|
||||
|
||||
rawNoTLSEP, err := rawNoTLS.WithTLSData(store, "raw-notls")
|
||||
require.NoError(t, err)
|
||||
checkClientConfig(t, rawNoTLSEP, "https://test", "test", nil, nil, nil, false)
|
||||
rawNoTLSSkipEP, err := rawNoTLSSkip.WithTLSData(store, "raw-notls-skip")
|
||||
require.NoError(t, err)
|
||||
checkClientConfig(t, rawNoTLSSkipEP, "https://test", "test", nil, nil, nil, true)
|
||||
rawTLSEP, err := rawTLS.WithTLSData(store, "raw-tls")
|
||||
require.NoError(t, err)
|
||||
checkClientConfig(t, rawTLSEP, "https://test", "test", []byte("ca"), []byte("cert"), []byte("key"), true)
|
||||
embededDefaultEP, err := embededDefault.WithTLSData(store, "embed-default-context")
|
||||
require.NoError(t, err)
|
||||
checkClientConfig(t, embededDefaultEP, "https://server1", "namespace1", nil, []byte("cert"), []byte("key"), true)
|
||||
embededContext2EP, err := embededContext2.WithTLSData(store, "embed-context2")
|
||||
require.NoError(t, err)
|
||||
checkClientConfig(t, embededContext2EP, "https://server2", "namespace-override", []byte("ca"), []byte("cert"), []byte("key"), false)
|
||||
}
|
||||
|
||||
func checkClientConfig(t *testing.T, ep Endpoint, server, namespace string, ca, cert, key []byte, skipTLSVerify bool) {
|
||||
config := ep.KubernetesConfig()
|
||||
cfg, err := config.ClientConfig()
|
||||
require.NoError(t, err)
|
||||
ns, _, _ := config.Namespace()
|
||||
assert.Equal(t, server, cfg.Host)
|
||||
assert.Equal(t, namespace, ns)
|
||||
assert.Equal(t, ca, cfg.CAData)
|
||||
assert.Equal(t, cert, cfg.CertData)
|
||||
assert.Equal(t, key, cfg.KeyData)
|
||||
assert.Equal(t, skipTLSVerify, cfg.Insecure)
|
||||
}
|
||||
|
||||
func save(s store.Writer, ep Endpoint, name string) error {
|
||||
meta := store.Metadata{
|
||||
Endpoints: map[string]interface{}{
|
||||
KubernetesEndpoint: ep.EndpointMeta,
|
||||
},
|
||||
Name: name,
|
||||
}
|
||||
if err := s.CreateOrUpdate(meta); err != nil {
|
||||
return err
|
||||
}
|
||||
return s.ResetEndpointTLSMaterial(name, KubernetesEndpoint, ep.TLSData.ToStoreTLSData())
|
||||
}
|
||||
|
||||
func TestSaveLoadGKEConfig(t *testing.T) {
|
||||
storeDir, err := ioutil.TempDir("", t.Name())
|
||||
require.NoError(t, err)
|
||||
defer os.RemoveAll(storeDir)
|
||||
store := store.New(storeDir, testStoreCfg)
|
||||
cfg, err := clientcmd.LoadFromFile("fixtures/gke-kubeconfig")
|
||||
require.NoError(t, err)
|
||||
clientCfg := clientcmd.NewDefaultClientConfig(*cfg, &clientcmd.ConfigOverrides{})
|
||||
expectedCfg, err := clientCfg.ClientConfig()
|
||||
require.NoError(t, err)
|
||||
ep, err := FromKubeConfig("fixtures/gke-kubeconfig", "", "")
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, save(store, ep, "gke-context"))
|
||||
persistedMetadata, err := store.GetMetadata("gke-context")
|
||||
require.NoError(t, err)
|
||||
persistedEPMeta := EndpointFromContext(persistedMetadata)
|
||||
assert.True(t, persistedEPMeta != nil)
|
||||
persistedEP, err := persistedEPMeta.WithTLSData(store, "gke-context")
|
||||
require.NoError(t, err)
|
||||
persistedCfg := persistedEP.KubernetesConfig()
|
||||
actualCfg, err := persistedCfg.ClientConfig()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, expectedCfg.AuthProvider, actualCfg.AuthProvider)
|
||||
}
|
||||
|
||||
func TestSaveLoadEKSConfig(t *testing.T) {
|
||||
storeDir, err := ioutil.TempDir("", t.Name())
|
||||
require.NoError(t, err)
|
||||
defer os.RemoveAll(storeDir)
|
||||
store := store.New(storeDir, testStoreCfg)
|
||||
cfg, err := clientcmd.LoadFromFile("fixtures/eks-kubeconfig")
|
||||
require.NoError(t, err)
|
||||
clientCfg := clientcmd.NewDefaultClientConfig(*cfg, &clientcmd.ConfigOverrides{})
|
||||
expectedCfg, err := clientCfg.ClientConfig()
|
||||
require.NoError(t, err)
|
||||
ep, err := FromKubeConfig("fixtures/eks-kubeconfig", "", "")
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, save(store, ep, "eks-context"))
|
||||
persistedMetadata, err := store.GetMetadata("eks-context")
|
||||
require.NoError(t, err)
|
||||
persistedEPMeta := EndpointFromContext(persistedMetadata)
|
||||
assert.True(t, persistedEPMeta != nil)
|
||||
persistedEP, err := persistedEPMeta.WithTLSData(store, "eks-context")
|
||||
require.NoError(t, err)
|
||||
persistedCfg := persistedEP.KubernetesConfig()
|
||||
actualCfg, err := persistedCfg.ClientConfig()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, expectedCfg.ExecProvider, actualCfg.ExecProvider)
|
||||
}
|
||||
|
||||
func TestSaveLoadK3SConfig(t *testing.T) {
|
||||
storeDir, err := ioutil.TempDir("", t.Name())
|
||||
require.NoError(t, err)
|
||||
defer os.RemoveAll(storeDir)
|
||||
store := store.New(storeDir, testStoreCfg)
|
||||
cfg, err := clientcmd.LoadFromFile("fixtures/k3s-kubeconfig")
|
||||
require.NoError(t, err)
|
||||
clientCfg := clientcmd.NewDefaultClientConfig(*cfg, &clientcmd.ConfigOverrides{})
|
||||
expectedCfg, err := clientCfg.ClientConfig()
|
||||
require.NoError(t, err)
|
||||
ep, err := FromKubeConfig("fixtures/k3s-kubeconfig", "", "")
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, save(store, ep, "k3s-context"))
|
||||
persistedMetadata, err := store.GetMetadata("k3s-context")
|
||||
require.NoError(t, err)
|
||||
persistedEPMeta := EndpointFromContext(persistedMetadata)
|
||||
assert.True(t, persistedEPMeta != nil)
|
||||
persistedEP, err := persistedEPMeta.WithTLSData(store, "k3s-context")
|
||||
require.NoError(t, err)
|
||||
persistedCfg := persistedEP.KubernetesConfig()
|
||||
actualCfg, err := persistedCfg.ClientConfig()
|
||||
require.NoError(t, err)
|
||||
assert.True(t, len(actualCfg.Username) > 0)
|
||||
assert.True(t, len(actualCfg.Password) > 0)
|
||||
assert.Equal(t, expectedCfg.Username, actualCfg.Username)
|
||||
assert.Equal(t, expectedCfg.Password, actualCfg.Password)
|
||||
}
|
@@ -1,23 +0,0 @@
|
||||
apiVersion: v1
|
||||
clusters:
|
||||
- cluster:
|
||||
server: https://some-server
|
||||
name: kubernetes
|
||||
contexts:
|
||||
- context:
|
||||
cluster: kubernetes
|
||||
user: aws
|
||||
name: aws
|
||||
current-context: aws
|
||||
kind: Config
|
||||
preferences: {}
|
||||
users:
|
||||
- name: aws
|
||||
user:
|
||||
exec:
|
||||
apiVersion: client.authentication.k8s.io/v1alpha1
|
||||
command: heptio-authenticator-aws
|
||||
args:
|
||||
- "token"
|
||||
- "-i"
|
||||
- "eks-cf"
|
@@ -1,23 +0,0 @@
|
||||
apiVersion: v1
|
||||
clusters:
|
||||
- cluster:
|
||||
server: https://some-server
|
||||
name: gke_sample
|
||||
contexts:
|
||||
- context:
|
||||
cluster: gke_sample
|
||||
user: gke_sample
|
||||
name: gke_sample
|
||||
current-context: gke_sample
|
||||
kind: Config
|
||||
preferences: {}
|
||||
users:
|
||||
- name: gke_sample
|
||||
user:
|
||||
auth-provider:
|
||||
config:
|
||||
cmd-args: config config-helper --format=json
|
||||
cmd-path: /google/google-cloud-sdk/bin/gcloud
|
||||
expiry-key: '{.credential.token_expiry}'
|
||||
token-key: '{.credential.access_token}'
|
||||
name: gcp
|
@@ -1,20 +0,0 @@
|
||||
apiVersion: v1
|
||||
clusters:
|
||||
- cluster:
|
||||
certificate-authority-data: dGhlLWNh
|
||||
server: https://someserver
|
||||
name: test-cluster
|
||||
contexts:
|
||||
- context:
|
||||
cluster: test-cluster
|
||||
user: test-user
|
||||
namespace: zoinx
|
||||
name: test
|
||||
current-context: test
|
||||
kind: Config
|
||||
preferences: {}
|
||||
users:
|
||||
- name: test-user
|
||||
user:
|
||||
username: admin
|
||||
password: testpwd
|
@@ -1,20 +0,0 @@
|
||||
apiVersion: v1
|
||||
clusters:
|
||||
- cluster:
|
||||
certificate-authority-data: dGhlLWNh
|
||||
server: https://someserver
|
||||
name: test-cluster
|
||||
contexts:
|
||||
- context:
|
||||
cluster: test-cluster
|
||||
user: test-user
|
||||
namespace: zoinx
|
||||
name: test
|
||||
current-context: test
|
||||
kind: Config
|
||||
preferences: {}
|
||||
users:
|
||||
- name: test-user
|
||||
user:
|
||||
client-certificate-data: dGhlLWNlcnQ=
|
||||
client-key-data: dGhlLWtleQ==
|
@@ -1,23 +0,0 @@
|
||||
package context
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/cli/cli/config/configfile"
|
||||
cliflags "github.com/docker/cli/cli/flags"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestDefaultContextInitializer(t *testing.T) {
|
||||
cli, err := command.NewDockerCli()
|
||||
require.NoError(t, err)
|
||||
os.Setenv("KUBECONFIG", "./fixtures/test-kubeconfig")
|
||||
defer os.Unsetenv("KUBECONFIG")
|
||||
ctx, err := command.ResolveDefaultContext(&cliflags.CommonOptions{}, &configfile.ConfigFile{}, command.DefaultContextStoreConfig(), cli.Err())
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "default", ctx.Meta.Name)
|
||||
assert.Equal(t, "zoinx", ctx.Meta.Endpoints[KubernetesEndpoint].(EndpointMeta).DefaultNamespace)
|
||||
}
|
@@ -165,11 +165,7 @@ func (d *Driver) Stop(ctx context.Context, force bool) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Driver) Rm(ctx context.Context, force, rmVolume, rmDaemon bool) error {
|
||||
if !rmDaemon {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *Driver) Rm(ctx context.Context, force bool, rmVolume bool) error {
|
||||
if err := d.deploymentClient.Delete(ctx, d.deployment.Name, metav1.DeleteOptions{}); err != nil {
|
||||
if !apierrors.IsNotFound(err) {
|
||||
return errors.Wrapf(err, "error while calling deploymentClient.Delete for %q", d.deployment.Name)
|
||||
|
41
go.mod
41
go.mod
@@ -8,12 +8,13 @@ require (
|
||||
github.com/bugsnag/panicwrap v1.2.0 // indirect
|
||||
github.com/cenkalti/backoff v2.1.1+incompatible // indirect
|
||||
github.com/cloudflare/cfssl v0.0.0-20181213083726-b94e044bb51e // indirect
|
||||
github.com/compose-spec/compose-go v1.0.8
|
||||
github.com/compose-spec/compose-go v1.0.5
|
||||
github.com/containerd/console v1.0.3
|
||||
github.com/containerd/containerd v1.6.1
|
||||
github.com/docker/cli v20.10.12+incompatible
|
||||
github.com/docker/cli-docs-tool v0.4.0
|
||||
github.com/docker/distribution v2.8.0+incompatible
|
||||
github.com/containerd/containerd v1.5.5
|
||||
github.com/docker/cli v20.10.8+incompatible
|
||||
github.com/docker/cli-docs-tool v0.2.1
|
||||
github.com/docker/compose-on-kubernetes v0.4.19-0.20190128150448-356b2919c496 // indirect
|
||||
github.com/docker/distribution v2.7.1+incompatible
|
||||
github.com/docker/docker v20.10.7+incompatible
|
||||
github.com/docker/go v1.5.1-1.0.20160303222718-d30aec9fd63c // indirect
|
||||
github.com/docker/go-units v0.4.0
|
||||
@@ -30,10 +31,9 @@ require (
|
||||
github.com/jinzhu/gorm v1.9.2 // indirect
|
||||
github.com/jinzhu/inflection v0.0.0-20180308033659-04140366298a // indirect
|
||||
github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 // indirect
|
||||
github.com/moby/buildkit v0.10.0-rc2.0.20220308185020-fdecd0ae108b
|
||||
github.com/morikuni/aec v1.0.0
|
||||
github.com/moby/buildkit v0.9.1-0.20211019185819-8778943ac3da
|
||||
github.com/opencontainers/go-digest v1.0.0
|
||||
github.com/opencontainers/image-spec v1.0.2-0.20211117181255-693428a734f5
|
||||
github.com/opencontainers/image-spec v1.0.2-0.20210819154149-5ad6f50d6283
|
||||
github.com/pelletier/go-toml v1.9.4
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/serialx/hashring v0.0.0-20190422032157-8b2912629002
|
||||
@@ -42,23 +42,24 @@ require (
|
||||
github.com/spf13/pflag v1.0.5
|
||||
github.com/stretchr/testify v1.7.0
|
||||
github.com/theupdateframework/notary v0.6.1 // indirect
|
||||
github.com/zclconf/go-cty v1.10.0
|
||||
go.opentelemetry.io/otel v1.4.1
|
||||
go.opentelemetry.io/otel/trace v1.4.1
|
||||
github.com/tonistiigi/units v0.0.0-20180711220420-6950e57a87ea
|
||||
github.com/zclconf/go-cty v1.7.1
|
||||
go.opentelemetry.io/otel v1.0.0-RC1
|
||||
go.opentelemetry.io/otel/trace v1.0.0-RC1
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
|
||||
google.golang.org/grpc v1.44.0
|
||||
gopkg.in/dancannon/gorethink.v3 v3.0.5 // indirect
|
||||
gopkg.in/fatih/pool.v2 v2.0.0 // indirect
|
||||
gopkg.in/gorethink/gorethink.v3 v3.0.5 // indirect
|
||||
k8s.io/api v0.23.4
|
||||
k8s.io/apimachinery v0.23.4
|
||||
k8s.io/client-go v0.23.4
|
||||
k8s.io/api v0.22.1
|
||||
k8s.io/apimachinery v0.22.1
|
||||
k8s.io/client-go v0.22.1
|
||||
)
|
||||
|
||||
replace (
|
||||
github.com/docker/cli => github.com/docker/cli v20.10.3-0.20220226190722-8667ccd1124c+incompatible
|
||||
github.com/docker/docker => github.com/docker/docker v20.10.3-0.20220121014307-40bb9831756f+incompatible
|
||||
k8s.io/api => k8s.io/api v0.22.4
|
||||
k8s.io/apimachinery => k8s.io/apimachinery v0.22.4
|
||||
k8s.io/client-go => k8s.io/client-go v0.22.4
|
||||
github.com/docker/cli => github.com/docker/cli v20.10.3-0.20210702143511-f782d1355eff+incompatible
|
||||
github.com/docker/docker => github.com/tonistiigi/docker v0.10.1-0.20211122204227-65a6f25dbca2
|
||||
github.com/tonistiigi/fsutil => github.com/tonistiigi/fsutil v0.0.0-20211122210416-da5201e0b3af
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc => github.com/tonistiigi/opentelemetry-go-contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.0.0-20210714055410-d010b05b4939
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace => github.com/tonistiigi/opentelemetry-go-contrib/instrumentation/net/http/httptrace/otelhttptrace v0.0.0-20210714055410-d010b05b4939
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp => github.com/tonistiigi/opentelemetry-go-contrib/instrumentation/net/http/otelhttp v0.0.0-20210714055410-d010b05b4939
|
||||
)
|
||||
|
@@ -1,7 +1,6 @@
|
||||
# syntax=docker/dockerfile:1.3-labs
|
||||
|
||||
ARG GO_VERSION=1.17
|
||||
ARG MODOUTDATED_VERSION=v0.8.0
|
||||
|
||||
FROM golang:${GO_VERSION}-alpine AS base
|
||||
RUN apk add --no-cache git rsync
|
||||
@@ -37,9 +36,8 @@ if [ -n "$(git status --porcelain -- go.mod go.sum vendor)" ]; then
|
||||
fi
|
||||
EOT
|
||||
|
||||
FROM psampaz/go-mod-outdated:${MODOUTDATED_VERSION} AS go-mod-outdated
|
||||
FROM psampaz/go-mod-outdated:v0.8.0 AS go-mod-outdated
|
||||
FROM base AS outdated
|
||||
ARG _RANDOM
|
||||
RUN --mount=target=.,ro \
|
||||
--mount=target=/go/pkg/mod,type=cache \
|
||||
--mount=from=go-mod-outdated,source=/home/go-mod-outdated,target=/usr/bin/go-mod-outdated \
|
||||
|
108
hack/test-driver
108
hack/test-driver
@@ -10,20 +10,13 @@ set -eu -o pipefail
|
||||
: ${MULTI_NODE=0}
|
||||
: ${PLATFORMS=linux/amd64,linux/arm64}
|
||||
|
||||
function buildxCmd {
|
||||
(set -x ; $BUILDX_CMD "$@")
|
||||
}
|
||||
|
||||
function clean {
|
||||
rm -rf "$context"
|
||||
if [ "$builderName" != "default" ]; then
|
||||
buildxCmd rm "$builderName"
|
||||
fi
|
||||
${BUILDX_CMD} rm "$builderName"
|
||||
}
|
||||
|
||||
context=$(mktemp -d -t buildx-output.XXXXXXXXXX)
|
||||
dockerfile=${context}/Dockerfile
|
||||
bakedef=${context}/docker-bake.hcl
|
||||
trap clean EXIT
|
||||
|
||||
builderName=buildx-test-$(openssl rand -hex 16)
|
||||
@@ -51,12 +44,15 @@ if [ "$DRIVER" != "docker" ]; then
|
||||
if [ "$firstNode" = "0" ]; then
|
||||
createFlags="$createFlags --append"
|
||||
fi
|
||||
buildxCmd create ${createFlags} \
|
||||
--name="${builderName}" \
|
||||
--node="${builderName}-${platform/\//-}" \
|
||||
--driver="${DRIVER}" \
|
||||
--driver-opt="${driverOpt}" \
|
||||
--platform="${platform}"
|
||||
(
|
||||
set -x
|
||||
${BUILDX_CMD} create ${createFlags} \
|
||||
--name="${builderName}" \
|
||||
--node="${builderName}-${platform/\//-}" \
|
||||
--driver="${DRIVER}" \
|
||||
--driver-opt="${driverOpt}" \
|
||||
--platform="${platform}"
|
||||
)
|
||||
firstNode=0
|
||||
done
|
||||
else
|
||||
@@ -64,37 +60,27 @@ if [ "$DRIVER" != "docker" ]; then
|
||||
if [ -f "$BUILDKIT_CFG" ]; then
|
||||
createFlags="$createFlags --config=${BUILDKIT_CFG}"
|
||||
fi
|
||||
buildxCmd create ${createFlags} \
|
||||
--name="${builderName}" \
|
||||
--driver="${DRIVER}" \
|
||||
--driver-opt="${driverOpt}" \
|
||||
--platform="${PLATFORMS}"
|
||||
(
|
||||
set -x
|
||||
${BUILDX_CMD} create ${createFlags} \
|
||||
--name="${builderName}" \
|
||||
--driver="${DRIVER}" \
|
||||
--driver-opt="${driverOpt}" \
|
||||
--platform="${PLATFORMS}"
|
||||
)
|
||||
fi
|
||||
fi
|
||||
|
||||
function buildOutput {
|
||||
local name=$1
|
||||
if [ "$DRIVER" != "docker" ]; then
|
||||
if [ "${MULTI_NODE}" = "1" ]; then
|
||||
echo "type=cacheonly"
|
||||
else
|
||||
echo "type=oci,dest=${context}/${name}.tar"
|
||||
fi
|
||||
else
|
||||
echo "type=docker,name=${name}"
|
||||
fi
|
||||
}
|
||||
|
||||
# multi-platform not supported by docker driver
|
||||
buildPlatformFlag=
|
||||
bakePlatformFlag=
|
||||
if [ "$DRIVER" != "docker" ]; then
|
||||
buildPlatformFlag=--platform="${PLATFORMS}"
|
||||
bakePlatformFlag=--set="*.platform=${PLATFORMS}"
|
||||
fi
|
||||
|
||||
set -x
|
||||
|
||||
# inspect and bootstrap
|
||||
buildxCmd inspect --bootstrap --builder="${builderName}"
|
||||
${BUILDX_CMD} inspect --bootstrap --builder="${builderName}"
|
||||
|
||||
# create dockerfile
|
||||
cat > "${dockerfile}" <<EOL
|
||||
@@ -102,60 +88,14 @@ FROM busybox as build
|
||||
ARG TARGETPLATFORM
|
||||
ARG BUILDPLATFORM
|
||||
RUN echo "I am running on \$BUILDPLATFORM, building for \$TARGETPLATFORM" > /log
|
||||
|
||||
FROM busybox AS log
|
||||
FROM busybox
|
||||
COPY --from=build /log /log
|
||||
RUN cat /log
|
||||
RUN uname -a
|
||||
|
||||
FROM busybox AS hello
|
||||
RUN echo hello > /hello
|
||||
|
||||
FROM scratch
|
||||
COPY --from=log /log /log
|
||||
COPY --from=hello /hello /hello
|
||||
EOL
|
||||
|
||||
# build
|
||||
buildxCmd build ${buildPlatformFlag} \
|
||||
--output="$(buildOutput buildx-test-build)" \
|
||||
${BUILDX_CMD} build ${buildPlatformFlag} \
|
||||
--output="type=cacheonly" \
|
||||
--builder="${builderName}" \
|
||||
--metadata-file="${context}/metadata-build.json" \
|
||||
"${context}"
|
||||
cat "${context}/metadata-build.json"
|
||||
|
||||
# create bake def
|
||||
cat > "${bakedef}" <<EOL
|
||||
group "default" {
|
||||
targets = ["release"]
|
||||
}
|
||||
group "all" {
|
||||
targets = ["log", "hello"]
|
||||
}
|
||||
target "release" {
|
||||
output = ["$(buildOutput buildx-test-bake-release)"]
|
||||
}
|
||||
target "log" {
|
||||
output = ["$(buildOutput buildx-test-bake-log)"]
|
||||
}
|
||||
target "hello" {
|
||||
output = ["$(buildOutput buildx-test-bake-hello)"]
|
||||
}
|
||||
EOL
|
||||
|
||||
# bake default target
|
||||
buildxCmd bake ${bakePlatformFlag} \
|
||||
--file="${bakedef}" \
|
||||
--builder="${builderName}" \
|
||||
--set "*.context=${context}" \
|
||||
--metadata-file="${context}/metadata-bake-def.json"
|
||||
cat "${context}/metadata-bake-def.json"
|
||||
|
||||
# bake all target
|
||||
buildxCmd bake ${bakePlatformFlag} \
|
||||
--file="${bakedef}" \
|
||||
--builder="${builderName}" \
|
||||
--set "*.context=${context}" \
|
||||
--metadata-file="${context}/metadata-bake-all.json" \
|
||||
all
|
||||
cat "${context}/metadata-bake-all.json"
|
||||
|
@@ -37,32 +37,6 @@ func GetCurrentEndpoint(dockerCli command.Cli) (string, error) {
|
||||
return de, nil
|
||||
}
|
||||
|
||||
func GetProxyConfig(dockerCli command.Cli) map[string]string {
|
||||
cfg := dockerCli.ConfigFile()
|
||||
host := dockerCli.Client().DaemonHost()
|
||||
|
||||
proxy, ok := cfg.Proxies[host]
|
||||
if !ok {
|
||||
proxy = cfg.Proxies["default"]
|
||||
}
|
||||
|
||||
m := map[string]string{}
|
||||
|
||||
if v := proxy.HTTPProxy; v != "" {
|
||||
m["HTTP_PROXY"] = v
|
||||
}
|
||||
if v := proxy.HTTPSProxy; v != "" {
|
||||
m["HTTPS_PROXY"] = v
|
||||
}
|
||||
if v := proxy.NoProxy; v != "" {
|
||||
m["NO_PROXY"] = v
|
||||
}
|
||||
if v := proxy.FTPProxy; v != "" {
|
||||
m["FTP_PROXY"] = v
|
||||
}
|
||||
return m
|
||||
}
|
||||
|
||||
// GetDockerEndpoint returns docker endpoint string for given context
|
||||
func GetDockerEndpoint(dockerCli command.Cli, name string) (string, error) {
|
||||
list, err := dockerCli.ContextStore().List()
|
||||
|
@@ -8,18 +8,13 @@ import (
|
||||
"io"
|
||||
"net/http"
|
||||
|
||||
"github.com/containerd/containerd/log"
|
||||
"github.com/containerd/containerd/remotes"
|
||||
"github.com/containerd/containerd/remotes/docker"
|
||||
"github.com/docker/buildx/util/resolver"
|
||||
clitypes "github.com/docker/cli/cli/config/types"
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/moby/buildkit/util/contentutil"
|
||||
"github.com/moby/buildkit/util/imageutil"
|
||||
"github.com/moby/buildkit/util/tracing"
|
||||
"github.com/opencontainers/go-digest"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type Auth interface {
|
||||
@@ -32,16 +27,14 @@ type Opt struct {
|
||||
}
|
||||
|
||||
type Resolver struct {
|
||||
auth docker.Authorizer
|
||||
hosts docker.RegistryHosts
|
||||
buffer contentutil.Buffer
|
||||
auth docker.Authorizer
|
||||
hosts docker.RegistryHosts
|
||||
}
|
||||
|
||||
func New(opt Opt) *Resolver {
|
||||
return &Resolver{
|
||||
auth: docker.NewDockerAuthorizer(docker.WithAuthCreds(toCredentialsFunc(opt.Auth)), docker.WithAuthClient(http.DefaultClient)),
|
||||
hosts: resolver.NewRegistryConfig(opt.RegistryConfig),
|
||||
buffer: contentutil.NewBuffer(),
|
||||
auth: docker.NewDockerAuthorizer(docker.WithAuthCreds(toCredentialsFunc(opt.Auth)), docker.WithAuthClient(http.DefaultClient)),
|
||||
hosts: resolver.NewRegistryConfig(opt.RegistryConfig),
|
||||
}
|
||||
}
|
||||
|
||||
@@ -54,20 +47,14 @@ func (r *Resolver) resolver() remotes.Resolver {
|
||||
}
|
||||
for i := range res {
|
||||
res[i].Authorizer = r.auth
|
||||
res[i].Client = tracing.DefaultClient
|
||||
}
|
||||
return res, nil
|
||||
},
|
||||
Client: tracing.DefaultClient,
|
||||
})
|
||||
}
|
||||
|
||||
func (r *Resolver) Resolve(ctx context.Context, in string) (string, ocispec.Descriptor, error) {
|
||||
// discard containerd logger to avoid printing unnecessary info during image reference resolution.
|
||||
// https://github.com/containerd/containerd/blob/1a88cf5242445657258e0c744def5017d7cfb492/remotes/docker/resolver.go#L288
|
||||
logger := logrus.New()
|
||||
logger.Out = io.Discard
|
||||
ctx = log.WithLogger(ctx, logrus.NewEntry(logger))
|
||||
|
||||
ref, err := parseRef(in)
|
||||
if err != nil {
|
||||
return "", ocispec.Descriptor{}, err
|
||||
@@ -162,11 +149,3 @@ func RegistryAuthForRef(ref string, a Auth) (string, error) {
|
||||
}
|
||||
return base64.URLEncoding.EncodeToString(buf), nil
|
||||
}
|
||||
|
||||
func (r *Resolver) ImageConfig(ctx context.Context, in string, platform *ocispec.Platform) (digest.Digest, []byte, error) {
|
||||
in, _, err := r.Resolve(ctx, in)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
return imageutil.Config(ctx, in, r.resolver(), r.buffer, nil, platform)
|
||||
}
|
||||
|
@@ -1,357 +1,74 @@
|
||||
package imagetools
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"sort"
|
||||
"strings"
|
||||
"text/tabwriter"
|
||||
"text/template"
|
||||
|
||||
"github.com/containerd/containerd/images"
|
||||
"github.com/containerd/containerd/platforms"
|
||||
"github.com/docker/distribution/reference"
|
||||
binfotypes "github.com/moby/buildkit/util/buildinfo/types"
|
||||
"github.com/opencontainers/go-digest"
|
||||
ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/sync/errgroup"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
)
|
||||
|
||||
const defaultPfx = " "
|
||||
|
||||
type Printer struct {
|
||||
ctx context.Context
|
||||
resolver *Resolver
|
||||
|
||||
name string
|
||||
format string
|
||||
|
||||
raw []byte
|
||||
ref reference.Named
|
||||
manifest ocispecs.Descriptor
|
||||
index ocispecs.Index
|
||||
platforms []ocispecs.Platform
|
||||
}
|
||||
|
||||
func NewPrinter(ctx context.Context, opt Opt, name string, format string) (*Printer, error) {
|
||||
resolver := New(opt)
|
||||
|
||||
ref, err := parseRef(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
dt, manifest, err := resolver.Get(ctx, name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var index ocispecs.Index
|
||||
if err = json.Unmarshal(dt, &index); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var pforms []ocispecs.Platform
|
||||
switch manifest.MediaType {
|
||||
case images.MediaTypeDockerSchema2ManifestList, ocispecs.MediaTypeImageIndex:
|
||||
for _, m := range index.Manifests {
|
||||
pforms = append(pforms, *m.Platform)
|
||||
}
|
||||
default:
|
||||
pforms = append(pforms, platforms.DefaultSpec())
|
||||
}
|
||||
|
||||
return &Printer{
|
||||
ctx: ctx,
|
||||
resolver: resolver,
|
||||
name: name,
|
||||
format: format,
|
||||
raw: dt,
|
||||
ref: ref,
|
||||
manifest: manifest,
|
||||
index: index,
|
||||
platforms: pforms,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (p *Printer) Print(raw bool, out io.Writer) error {
|
||||
if raw {
|
||||
_, err := fmt.Fprintf(out, "%s", p.raw) // avoid newline to keep digest
|
||||
return err
|
||||
}
|
||||
|
||||
if p.format == "" {
|
||||
w := tabwriter.NewWriter(out, 0, 0, 1, ' ', 0)
|
||||
_, _ = fmt.Fprintf(w, "Name:\t%s\n", p.ref.String())
|
||||
_, _ = fmt.Fprintf(w, "MediaType:\t%s\n", p.manifest.MediaType)
|
||||
_, _ = fmt.Fprintf(w, "Digest:\t%s\n", p.manifest.Digest)
|
||||
_ = w.Flush()
|
||||
switch p.manifest.MediaType {
|
||||
case images.MediaTypeDockerSchema2ManifestList, ocispecs.MediaTypeImageIndex:
|
||||
if err := p.printManifestList(out); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
tpl, err := template.New("").Funcs(template.FuncMap{
|
||||
"json": func(v interface{}) string {
|
||||
b, _ := json.MarshalIndent(v, "", " ")
|
||||
return string(b)
|
||||
},
|
||||
}).Parse(p.format)
|
||||
func PrintManifestList(dt []byte, desc ocispec.Descriptor, refstr string, out io.Writer) error {
|
||||
ref, err := parseRef(refstr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
imageconfigs := make(map[string]*ocispecs.Image)
|
||||
buildinfos := make(map[string]*binfotypes.BuildInfo)
|
||||
|
||||
eg, _ := errgroup.WithContext(p.ctx)
|
||||
for _, platform := range p.platforms {
|
||||
func(platform ocispecs.Platform) {
|
||||
eg.Go(func() error {
|
||||
img, dtic, err := p.getImageConfig(&platform)
|
||||
if err != nil {
|
||||
return err
|
||||
} else if img != nil {
|
||||
imageconfigs[platforms.Format(platform)] = img
|
||||
}
|
||||
if bi, err := p.getBuildInfo(dtic); err != nil {
|
||||
return err
|
||||
} else if bi != nil {
|
||||
buildinfos[platforms.Format(platform)] = bi
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}(platform)
|
||||
}
|
||||
if err := eg.Wait(); err != nil {
|
||||
var mfst ocispec.Index
|
||||
if err := json.Unmarshal(dt, &mfst); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
format := tpl.Root.String()
|
||||
|
||||
var manifest interface{}
|
||||
switch p.manifest.MediaType {
|
||||
case images.MediaTypeDockerSchema2Manifest, ocispecs.MediaTypeImageManifest:
|
||||
manifest = p.manifest
|
||||
case images.MediaTypeDockerSchema2ManifestList, ocispecs.MediaTypeImageIndex:
|
||||
manifest = struct {
|
||||
SchemaVersion int `json:"schemaVersion"`
|
||||
MediaType string `json:"mediaType,omitempty"`
|
||||
Digest digest.Digest `json:"digest"`
|
||||
Size int64 `json:"size"`
|
||||
Manifests []ocispecs.Descriptor `json:"manifests"`
|
||||
Annotations map[string]string `json:"annotations,omitempty"`
|
||||
}{
|
||||
SchemaVersion: p.index.Versioned.SchemaVersion,
|
||||
MediaType: p.index.MediaType,
|
||||
Digest: p.manifest.Digest,
|
||||
Size: p.manifest.Size,
|
||||
Manifests: p.index.Manifests,
|
||||
Annotations: p.index.Annotations,
|
||||
}
|
||||
}
|
||||
|
||||
switch {
|
||||
// TODO: print formatted config
|
||||
case strings.HasPrefix(format, "{{.Manifest"), strings.HasPrefix(format, "{{.BuildInfo"):
|
||||
w := tabwriter.NewWriter(out, 0, 0, 1, ' ', 0)
|
||||
_, _ = fmt.Fprintf(w, "Name:\t%s\n", p.ref.String())
|
||||
if strings.HasPrefix(format, "{{.Manifest") {
|
||||
_, _ = fmt.Fprintf(w, "MediaType:\t%s\n", p.manifest.MediaType)
|
||||
_, _ = fmt.Fprintf(w, "Digest:\t%s\n", p.manifest.Digest)
|
||||
_ = w.Flush()
|
||||
switch p.manifest.MediaType {
|
||||
case images.MediaTypeDockerSchema2ManifestList, ocispecs.MediaTypeImageIndex:
|
||||
_ = p.printManifestList(out)
|
||||
}
|
||||
} else if strings.HasPrefix(format, "{{.BuildInfo") {
|
||||
_ = w.Flush()
|
||||
_ = p.printBuildInfos(buildinfos, out)
|
||||
}
|
||||
default:
|
||||
if len(p.platforms) > 1 {
|
||||
return tpl.Execute(out, struct {
|
||||
Name string `json:"name,omitempty"`
|
||||
Manifest interface{} `json:"manifest,omitempty"`
|
||||
Image map[string]*ocispecs.Image `json:"image,omitempty"`
|
||||
BuildInfo map[string]*binfotypes.BuildInfo `json:"buildinfo,omitempty"`
|
||||
}{
|
||||
Name: p.name,
|
||||
Manifest: manifest,
|
||||
Image: imageconfigs,
|
||||
BuildInfo: buildinfos,
|
||||
})
|
||||
}
|
||||
var ic *ocispecs.Image
|
||||
for _, v := range imageconfigs {
|
||||
ic = v
|
||||
}
|
||||
var bi *binfotypes.BuildInfo
|
||||
for _, v := range buildinfos {
|
||||
bi = v
|
||||
}
|
||||
return tpl.Execute(out, struct {
|
||||
Name string `json:"name,omitempty"`
|
||||
Manifest interface{} `json:"manifest,omitempty"`
|
||||
Image *ocispecs.Image `json:"image,omitempty"`
|
||||
BuildInfo *binfotypes.BuildInfo `json:"buildinfo,omitempty"`
|
||||
}{
|
||||
Name: p.name,
|
||||
Manifest: manifest,
|
||||
Image: ic,
|
||||
BuildInfo: bi,
|
||||
})
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Printer) printManifestList(out io.Writer) error {
|
||||
w := tabwriter.NewWriter(out, 0, 0, 1, ' ', 0)
|
||||
_, _ = fmt.Fprintf(w, "\t\n")
|
||||
_, _ = fmt.Fprintf(w, "Manifests:\t\n")
|
||||
_ = w.Flush()
|
||||
|
||||
fmt.Fprintf(w, "Name:\t%s\n", ref.String())
|
||||
fmt.Fprintf(w, "MediaType:\t%s\n", desc.MediaType)
|
||||
fmt.Fprintf(w, "Digest:\t%s\n", desc.Digest)
|
||||
fmt.Fprintf(w, "\t\n")
|
||||
|
||||
fmt.Fprintf(w, "Manifests:\t\n")
|
||||
w.Flush()
|
||||
|
||||
pfx := " "
|
||||
|
||||
w = tabwriter.NewWriter(os.Stdout, 0, 0, 1, ' ', 0)
|
||||
for i, m := range p.index.Manifests {
|
||||
for i, m := range mfst.Manifests {
|
||||
if i != 0 {
|
||||
_, _ = fmt.Fprintf(w, "\t\n")
|
||||
fmt.Fprintf(w, "\t\n")
|
||||
}
|
||||
cr, err := reference.WithDigest(p.ref, m.Digest)
|
||||
cr, err := reference.WithDigest(ref, m.Digest)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, _ = fmt.Fprintf(w, "%sName:\t%s\n", defaultPfx, cr.String())
|
||||
_, _ = fmt.Fprintf(w, "%sMediaType:\t%s\n", defaultPfx, m.MediaType)
|
||||
fmt.Fprintf(w, "%sName:\t%s\n", pfx, cr.String())
|
||||
fmt.Fprintf(w, "%sMediaType:\t%s\n", pfx, m.MediaType)
|
||||
if p := m.Platform; p != nil {
|
||||
_, _ = fmt.Fprintf(w, "%sPlatform:\t%s\n", defaultPfx, platforms.Format(*p))
|
||||
fmt.Fprintf(w, "%sPlatform:\t%s\n", pfx, platforms.Format(*p))
|
||||
if p.OSVersion != "" {
|
||||
_, _ = fmt.Fprintf(w, "%sOSVersion:\t%s\n", defaultPfx, p.OSVersion)
|
||||
fmt.Fprintf(w, "%sOSVersion:\t%s\n", pfx, p.OSVersion)
|
||||
}
|
||||
if len(p.OSFeatures) > 0 {
|
||||
_, _ = fmt.Fprintf(w, "%sOSFeatures:\t%s\n", defaultPfx, strings.Join(p.OSFeatures, ", "))
|
||||
fmt.Fprintf(w, "%sOSFeatures:\t%s\n", pfx, strings.Join(p.OSFeatures, ", "))
|
||||
}
|
||||
if len(m.URLs) > 0 {
|
||||
_, _ = fmt.Fprintf(w, "%sURLs:\t%s\n", defaultPfx, strings.Join(m.URLs, ", "))
|
||||
fmt.Fprintf(w, "%sURLs:\t%s\n", pfx, strings.Join(m.URLs, ", "))
|
||||
}
|
||||
if len(m.Annotations) > 0 {
|
||||
_ = w.Flush()
|
||||
w.Flush()
|
||||
w2 := tabwriter.NewWriter(os.Stdout, 0, 0, 1, ' ', 0)
|
||||
pfx2 := pfx + " "
|
||||
for k, v := range m.Annotations {
|
||||
_, _ = fmt.Fprintf(w2, "%s%s:\t%s\n", defaultPfx+defaultPfx, k, v)
|
||||
fmt.Fprintf(w2, "%s%s:\t%s\n", pfx2, k, v)
|
||||
}
|
||||
_ = w2.Flush()
|
||||
w2.Flush()
|
||||
}
|
||||
}
|
||||
}
|
||||
return w.Flush()
|
||||
}
|
||||
|
||||
func (p *Printer) printBuildInfos(bis map[string]*binfotypes.BuildInfo, out io.Writer) error {
|
||||
if len(bis) == 0 {
|
||||
return nil
|
||||
} else if len(bis) == 1 {
|
||||
for _, bi := range bis {
|
||||
return p.printBuildInfo(bi, "", out)
|
||||
}
|
||||
}
|
||||
var pkeys []string
|
||||
for _, pform := range p.platforms {
|
||||
pkeys = append(pkeys, platforms.Format(pform))
|
||||
}
|
||||
sort.Strings(pkeys)
|
||||
for _, platform := range pkeys {
|
||||
bi := bis[platform]
|
||||
w := tabwriter.NewWriter(out, 0, 0, 1, ' ', 0)
|
||||
_, _ = fmt.Fprintf(w, "\t\nPlatform:\t%s\t\n", platform)
|
||||
_ = w.Flush()
|
||||
if err := p.printBuildInfo(bi, "", out); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (p *Printer) printBuildInfo(bi *binfotypes.BuildInfo, pfx string, out io.Writer) error {
|
||||
w := tabwriter.NewWriter(out, 0, 0, 1, ' ', 0)
|
||||
_, _ = fmt.Fprintf(w, "%sFrontend:\t%s\n", pfx, bi.Frontend)
|
||||
|
||||
if len(bi.Attrs) > 0 {
|
||||
_, _ = fmt.Fprintf(w, "%sAttrs:\t\n", pfx)
|
||||
_ = w.Flush()
|
||||
for k, v := range bi.Attrs {
|
||||
_, _ = fmt.Fprintf(w, "%s%s:\t%s\n", pfx+defaultPfx, k, *v)
|
||||
}
|
||||
}
|
||||
|
||||
if len(bi.Sources) > 0 {
|
||||
_, _ = fmt.Fprintf(w, "%sSources:\t\n", pfx)
|
||||
_ = w.Flush()
|
||||
for i, v := range bi.Sources {
|
||||
if i != 0 {
|
||||
_, _ = fmt.Fprintf(w, "\t\n")
|
||||
}
|
||||
_, _ = fmt.Fprintf(w, "%sType:\t%s\n", pfx+defaultPfx, v.Type)
|
||||
_, _ = fmt.Fprintf(w, "%sRef:\t%s\n", pfx+defaultPfx, v.Ref)
|
||||
_, _ = fmt.Fprintf(w, "%sPin:\t%s\n", pfx+defaultPfx, v.Pin)
|
||||
}
|
||||
}
|
||||
|
||||
if len(bi.Deps) > 0 {
|
||||
_, _ = fmt.Fprintf(w, "%sDeps:\t\n", pfx)
|
||||
_ = w.Flush()
|
||||
firstPass := true
|
||||
for k, v := range bi.Deps {
|
||||
if !firstPass {
|
||||
_, _ = fmt.Fprintf(w, "\t\n")
|
||||
}
|
||||
_, _ = fmt.Fprintf(w, "%sName:\t%s\n", pfx+defaultPfx, k)
|
||||
_ = w.Flush()
|
||||
_ = p.printBuildInfo(&v, pfx+defaultPfx, out)
|
||||
firstPass = false
|
||||
}
|
||||
}
|
||||
|
||||
return w.Flush()
|
||||
}
|
||||
|
||||
func (p *Printer) getImageConfig(platform *ocispecs.Platform) (*ocispecs.Image, []byte, error) {
|
||||
_, dtic, err := p.resolver.ImageConfig(p.ctx, p.name, platform)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
var img *ocispecs.Image
|
||||
if err = json.Unmarshal(dtic, &img); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return img, dtic, nil
|
||||
}
|
||||
|
||||
func (p *Printer) getBuildInfo(dtic []byte) (*binfotypes.BuildInfo, error) {
|
||||
var binfo *binfotypes.BuildInfo
|
||||
if len(dtic) > 0 {
|
||||
var biconfig binfotypes.ImageConfig
|
||||
if err := json.Unmarshal(dtic, &biconfig); err != nil {
|
||||
return nil, errors.Wrap(err, "failed to unmarshal image config")
|
||||
}
|
||||
if len(biconfig.BuildInfo) > 0 {
|
||||
dtbi, err := base64.StdEncoding.DecodeString(biconfig.BuildInfo)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to decode build info")
|
||||
}
|
||||
if err = json.Unmarshal(dtbi, &binfo); err != nil {
|
||||
return nil, errors.Wrap(err, "failed to unmarshal build info")
|
||||
}
|
||||
}
|
||||
}
|
||||
return binfo, nil
|
||||
}
|
||||
|
@@ -1,38 +0,0 @@
|
||||
package logutil
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"strings"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func NewFilter(levels []logrus.Level, filters ...string) logrus.Hook {
|
||||
dl := logrus.New()
|
||||
dl.SetOutput(ioutil.Discard)
|
||||
return &logsFilter{
|
||||
levels: levels,
|
||||
filters: filters,
|
||||
discardLogger: dl,
|
||||
}
|
||||
}
|
||||
|
||||
type logsFilter struct {
|
||||
levels []logrus.Level
|
||||
filters []string
|
||||
discardLogger *logrus.Logger
|
||||
}
|
||||
|
||||
func (d *logsFilter) Levels() []logrus.Level {
|
||||
return d.levels
|
||||
}
|
||||
|
||||
func (d *logsFilter) Fire(entry *logrus.Entry) error {
|
||||
for _, f := range d.filters {
|
||||
if strings.Contains(entry.Message, f) {
|
||||
entry.Logger = d.discardLogger
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
@@ -1,16 +0,0 @@
|
||||
package logutil
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
type Formatter struct {
|
||||
logrus.TextFormatter
|
||||
}
|
||||
|
||||
func (f *Formatter) Format(entry *logrus.Entry) ([]byte, error) {
|
||||
return []byte(fmt.Sprintf("%s: %s\n", strings.ToUpper(entry.Level.String()), entry.Message)), nil
|
||||
}
|
@@ -1,52 +0,0 @@
|
||||
package logutil
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"sync"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func Pause(l *logrus.Logger) func() {
|
||||
// initialize formatter with original terminal settings
|
||||
l.Formatter.Format(logrus.NewEntry(l))
|
||||
|
||||
bw := newBufferedWriter(l.Out)
|
||||
l.SetOutput(bw)
|
||||
return func() {
|
||||
bw.resume()
|
||||
}
|
||||
}
|
||||
|
||||
type bufferedWriter struct {
|
||||
mu sync.Mutex
|
||||
buf *bytes.Buffer
|
||||
w io.Writer
|
||||
}
|
||||
|
||||
func newBufferedWriter(w io.Writer) *bufferedWriter {
|
||||
return &bufferedWriter{
|
||||
buf: bytes.NewBuffer(nil),
|
||||
w: w,
|
||||
}
|
||||
}
|
||||
|
||||
func (bw *bufferedWriter) Write(p []byte) (int, error) {
|
||||
bw.mu.Lock()
|
||||
defer bw.mu.Unlock()
|
||||
if bw.buf == nil {
|
||||
return bw.w.Write(p)
|
||||
}
|
||||
return bw.buf.Write(p)
|
||||
}
|
||||
|
||||
func (bw *bufferedWriter) resume() {
|
||||
bw.mu.Lock()
|
||||
defer bw.mu.Unlock()
|
||||
if bw.buf == nil {
|
||||
return
|
||||
}
|
||||
io.Copy(bw.w, bw.buf)
|
||||
bw.buf = nil
|
||||
}
|
@@ -8,14 +8,14 @@ import (
|
||||
|
||||
func WithPrefix(w Writer, pfx string, force bool) Writer {
|
||||
return &prefixed{
|
||||
Writer: w,
|
||||
pfx: pfx,
|
||||
force: force,
|
||||
main: w,
|
||||
pfx: pfx,
|
||||
force: force,
|
||||
}
|
||||
}
|
||||
|
||||
type prefixed struct {
|
||||
Writer
|
||||
main Writer
|
||||
pfx string
|
||||
force bool
|
||||
}
|
||||
@@ -26,7 +26,7 @@ func (p *prefixed) Write(v *client.SolveStatus) {
|
||||
v.Name = addPrefix(p.pfx, v.Name)
|
||||
}
|
||||
}
|
||||
p.Writer.Write(v)
|
||||
p.main.Write(v)
|
||||
}
|
||||
|
||||
func addPrefix(pfx, name string) string {
|
||||
|
@@ -5,14 +5,10 @@ import (
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"sync"
|
||||
|
||||
"github.com/containerd/console"
|
||||
"github.com/docker/buildx/util/logutil"
|
||||
"github.com/moby/buildkit/client"
|
||||
"github.com/moby/buildkit/util/progress/progressui"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
const (
|
||||
@@ -23,12 +19,9 @@ const (
|
||||
)
|
||||
|
||||
type Printer struct {
|
||||
status chan *client.SolveStatus
|
||||
done <-chan struct{}
|
||||
err error
|
||||
warnings []client.VertexWarning
|
||||
logMu sync.Mutex
|
||||
logSourceMap map[digest.Digest]interface{}
|
||||
status chan *client.SolveStatus
|
||||
done <-chan struct{}
|
||||
err error
|
||||
}
|
||||
|
||||
func (p *Printer) Wait() error {
|
||||
@@ -41,43 +34,13 @@ func (p *Printer) Write(s *client.SolveStatus) {
|
||||
p.status <- s
|
||||
}
|
||||
|
||||
func (p *Printer) Warnings() []client.VertexWarning {
|
||||
return p.warnings
|
||||
}
|
||||
|
||||
func (p *Printer) ValidateLogSource(dgst digest.Digest, v interface{}) bool {
|
||||
p.logMu.Lock()
|
||||
defer p.logMu.Unlock()
|
||||
src, ok := p.logSourceMap[dgst]
|
||||
if ok {
|
||||
if src == v {
|
||||
return true
|
||||
}
|
||||
} else {
|
||||
p.logSourceMap[dgst] = v
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (p *Printer) ClearLogSource(v interface{}) {
|
||||
p.logMu.Lock()
|
||||
defer p.logMu.Unlock()
|
||||
for d := range p.logSourceMap {
|
||||
if p.logSourceMap[d] == v {
|
||||
delete(p.logSourceMap, d)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func NewPrinter(ctx context.Context, w io.Writer, out console.File, mode string) *Printer {
|
||||
func NewPrinter(ctx context.Context, out console.File, mode string) *Printer {
|
||||
statusCh := make(chan *client.SolveStatus)
|
||||
doneCh := make(chan struct{})
|
||||
|
||||
pw := &Printer{
|
||||
status: statusCh,
|
||||
done: doneCh,
|
||||
logSourceMap: map[digest.Digest]interface{}{},
|
||||
status: statusCh,
|
||||
done: doneCh,
|
||||
}
|
||||
|
||||
if v := os.Getenv("BUILDKIT_PROGRESS"); v != "" && mode == PrinterModeAuto {
|
||||
@@ -86,6 +49,7 @@ func NewPrinter(ctx context.Context, w io.Writer, out console.File, mode string)
|
||||
|
||||
go func() {
|
||||
var c console.Console
|
||||
var w io.Writer = out
|
||||
switch mode {
|
||||
case PrinterModeQuiet:
|
||||
w = ioutil.Discard
|
||||
@@ -94,10 +58,8 @@ func NewPrinter(ctx context.Context, w io.Writer, out console.File, mode string)
|
||||
c = cons
|
||||
}
|
||||
}
|
||||
resumeLogs := logutil.Pause(logrus.StandardLogger())
|
||||
// not using shared context to not disrupt display but let is finish reporting errors
|
||||
pw.warnings, pw.err = progressui.DisplaySolveStatus(ctx, "", c, w, statusCh)
|
||||
resumeLogs()
|
||||
pw.err = progressui.DisplaySolveStatus(ctx, "", c, w, statusCh)
|
||||
close(doneCh)
|
||||
}()
|
||||
return pw
|
||||
|
@@ -10,8 +10,6 @@ import (
|
||||
|
||||
type Writer interface {
|
||||
Write(*client.SolveStatus)
|
||||
ValidateLogSource(digest.Digest, interface{}) bool
|
||||
ClearLogSource(interface{})
|
||||
}
|
||||
|
||||
func Write(w Writer, name string, f func() error) {
|
||||
@@ -49,20 +47,8 @@ func NewChannel(w Writer) (chan *client.SolveStatus, chan struct{}) {
|
||||
v, ok := <-ch
|
||||
if !ok {
|
||||
close(done)
|
||||
w.ClearLogSource(done)
|
||||
return
|
||||
}
|
||||
|
||||
if len(v.Logs) > 0 {
|
||||
logs := make([]*client.VertexLog, 0, len(v.Logs))
|
||||
for _, l := range v.Logs {
|
||||
if w.ValidateLogSource(l.Vertex, done) {
|
||||
logs = append(logs, l)
|
||||
}
|
||||
}
|
||||
v.Logs = logs
|
||||
}
|
||||
|
||||
w.Write(v)
|
||||
}
|
||||
}()
|
||||
|
@@ -1,74 +0,0 @@
|
||||
package waitmap
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
)
|
||||
|
||||
type Map struct {
|
||||
mu sync.RWMutex
|
||||
m map[string]interface{}
|
||||
ch map[string]chan struct{}
|
||||
}
|
||||
|
||||
func New() *Map {
|
||||
return &Map{
|
||||
m: make(map[string]interface{}),
|
||||
ch: make(map[string]chan struct{}),
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Map) Set(key string, value interface{}) {
|
||||
m.mu.Lock()
|
||||
defer m.mu.Unlock()
|
||||
|
||||
m.m[key] = value
|
||||
|
||||
if ch, ok := m.ch[key]; ok {
|
||||
if ch != nil {
|
||||
close(ch)
|
||||
}
|
||||
}
|
||||
m.ch[key] = nil
|
||||
}
|
||||
|
||||
func (m *Map) Get(ctx context.Context, keys ...string) (map[string]interface{}, error) {
|
||||
if len(keys) == 0 {
|
||||
return map[string]interface{}{}, nil
|
||||
}
|
||||
|
||||
if len(keys) > 1 {
|
||||
out := make(map[string]interface{})
|
||||
for _, key := range keys {
|
||||
mm, err := m.Get(ctx, key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
out[key] = mm[key]
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
key := keys[0]
|
||||
m.mu.Lock()
|
||||
ch, ok := m.ch[key]
|
||||
if !ok {
|
||||
ch = make(chan struct{})
|
||||
m.ch[key] = ch
|
||||
}
|
||||
|
||||
if ch != nil {
|
||||
m.mu.Unlock()
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return nil, ctx.Err()
|
||||
case <-ch:
|
||||
m.mu.Lock()
|
||||
}
|
||||
}
|
||||
|
||||
res := m.m[key]
|
||||
m.mu.Unlock()
|
||||
|
||||
return map[string]interface{}{key: res}, nil
|
||||
}
|
@@ -1,64 +0,0 @@
|
||||
package waitmap
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestGetAfter(t *testing.T) {
|
||||
m := New()
|
||||
|
||||
m.Set("foo", "bar")
|
||||
m.Set("bar", "baz")
|
||||
|
||||
ctx := context.TODO()
|
||||
v, err := m.Get(ctx, "foo", "bar")
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Equal(t, 2, len(v))
|
||||
require.Equal(t, "bar", v["foo"])
|
||||
require.Equal(t, "baz", v["bar"])
|
||||
|
||||
v, err = m.Get(ctx, "foo")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(v))
|
||||
require.Equal(t, "bar", v["foo"])
|
||||
}
|
||||
|
||||
func TestTimeout(t *testing.T) {
|
||||
m := New()
|
||||
|
||||
m.Set("foo", "bar")
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.TODO(), 100*time.Millisecond)
|
||||
defer cancel()
|
||||
|
||||
_, err := m.Get(ctx, "bar")
|
||||
require.Error(t, err)
|
||||
require.True(t, errors.Is(err, context.DeadlineExceeded))
|
||||
}
|
||||
|
||||
func TestBlocking(t *testing.T) {
|
||||
m := New()
|
||||
|
||||
m.Set("foo", "bar")
|
||||
|
||||
go func() {
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
m.Set("bar", "baz")
|
||||
time.Sleep(50 * time.Millisecond)
|
||||
m.Set("baz", "abc")
|
||||
}()
|
||||
|
||||
ctx := context.TODO()
|
||||
v, err := m.Get(ctx, "foo", "bar", "baz")
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 3, len(v))
|
||||
require.Equal(t, "bar", v["foo"])
|
||||
require.Equal(t, "baz", v["bar"])
|
||||
require.Equal(t, "abc", v["baz"])
|
||||
}
|
27
vendor/github.com/Microsoft/go-winio/README.md
generated
vendored
27
vendor/github.com/Microsoft/go-winio/README.md
generated
vendored
@@ -11,27 +11,12 @@ package.
|
||||
|
||||
Please see the LICENSE file for licensing information.
|
||||
|
||||
## Contributing
|
||||
This project has adopted the [Microsoft Open Source Code of
|
||||
Conduct](https://opensource.microsoft.com/codeofconduct/). For more information
|
||||
see the [Code of Conduct
|
||||
FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or contact
|
||||
[opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional
|
||||
questions or comments.
|
||||
|
||||
This project welcomes contributions and suggestions. Most contributions require you to agree to a Contributor License Agreement (CLA)
|
||||
declaring that you have the right to, and actually do, grant us the rights to use your contribution. For details, visit https://cla.microsoft.com.
|
||||
|
||||
When you submit a pull request, a CLA-bot will automatically determine whether you need to provide a CLA and decorate the PR
|
||||
appropriately (e.g., label, comment). Simply follow the instructions provided by the bot. You will only need to do this once across all repos using our CLA.
|
||||
|
||||
We also require that contributors sign their commits using git commit -s or git commit --signoff to certify they either authored the work themselves
|
||||
or otherwise have permission to use it in this project. Please see https://developercertificate.org/ for more info, as well as to make sure that you can
|
||||
attest to the rules listed. Our CI uses the DCO Github app to ensure that all commits in a given PR are signed-off.
|
||||
|
||||
|
||||
## Code of Conduct
|
||||
|
||||
This project has adopted the [Microsoft Open Source Code of Conduct](https://opensource.microsoft.com/codeofconduct/).
|
||||
For more information see the [Code of Conduct FAQ](https://opensource.microsoft.com/codeofconduct/faq/) or
|
||||
contact [opencode@microsoft.com](mailto:opencode@microsoft.com) with any additional questions or comments.
|
||||
|
||||
|
||||
|
||||
## Special Thanks
|
||||
Thanks to natefinch for the inspiration for this library. See https://github.com/natefinch/npipe
|
||||
for another named pipe implementation.
|
||||
|
95
vendor/github.com/apparentlymart/go-textseg/v13/LICENSE
generated
vendored
95
vendor/github.com/apparentlymart/go-textseg/v13/LICENSE
generated
vendored
@@ -1,95 +0,0 @@
|
||||
Copyright (c) 2017 Martin Atkins
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
|
||||
---------
|
||||
|
||||
Unicode table generation programs are under a separate copyright and license:
|
||||
|
||||
Copyright (c) 2014 Couchbase, Inc.
|
||||
Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file
|
||||
except in compliance with the License. You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software distributed under the
|
||||
License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
|
||||
either express or implied. See the License for the specific language governing permissions
|
||||
and limitations under the License.
|
||||
|
||||
---------
|
||||
|
||||
Grapheme break data is provided as part of the Unicode character database,
|
||||
copright 2016 Unicode, Inc, which is provided with the following license:
|
||||
|
||||
Unicode Data Files include all data files under the directories
|
||||
http://www.unicode.org/Public/, http://www.unicode.org/reports/,
|
||||
http://www.unicode.org/cldr/data/, http://source.icu-project.org/repos/icu/, and
|
||||
http://www.unicode.org/utility/trac/browser/.
|
||||
|
||||
Unicode Data Files do not include PDF online code charts under the
|
||||
directory http://www.unicode.org/Public/.
|
||||
|
||||
Software includes any source code published in the Unicode Standard
|
||||
or under the directories
|
||||
http://www.unicode.org/Public/, http://www.unicode.org/reports/,
|
||||
http://www.unicode.org/cldr/data/, http://source.icu-project.org/repos/icu/, and
|
||||
http://www.unicode.org/utility/trac/browser/.
|
||||
|
||||
NOTICE TO USER: Carefully read the following legal agreement.
|
||||
BY DOWNLOADING, INSTALLING, COPYING OR OTHERWISE USING UNICODE INC.'S
|
||||
DATA FILES ("DATA FILES"), AND/OR SOFTWARE ("SOFTWARE"),
|
||||
YOU UNEQUIVOCALLY ACCEPT, AND AGREE TO BE BOUND BY, ALL OF THE
|
||||
TERMS AND CONDITIONS OF THIS AGREEMENT.
|
||||
IF YOU DO NOT AGREE, DO NOT DOWNLOAD, INSTALL, COPY, DISTRIBUTE OR USE
|
||||
THE DATA FILES OR SOFTWARE.
|
||||
|
||||
COPYRIGHT AND PERMISSION NOTICE
|
||||
|
||||
Copyright © 1991-2017 Unicode, Inc. All rights reserved.
|
||||
Distributed under the Terms of Use in http://www.unicode.org/copyright.html.
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining
|
||||
a copy of the Unicode data files and any associated documentation
|
||||
(the "Data Files") or Unicode software and any associated documentation
|
||||
(the "Software") to deal in the Data Files or Software
|
||||
without restriction, including without limitation the rights to use,
|
||||
copy, modify, merge, publish, distribute, and/or sell copies of
|
||||
the Data Files or Software, and to permit persons to whom the Data Files
|
||||
or Software are furnished to do so, provided that either
|
||||
(a) this copyright and permission notice appear with all copies
|
||||
of the Data Files or Software, or
|
||||
(b) this copyright and permission notice appear in associated
|
||||
Documentation.
|
||||
|
||||
THE DATA FILES AND SOFTWARE ARE PROVIDED "AS IS", WITHOUT WARRANTY OF
|
||||
ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE
|
||||
WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
NONINFRINGEMENT OF THIRD PARTY RIGHTS.
|
||||
IN NO EVENT SHALL THE COPYRIGHT HOLDER OR HOLDERS INCLUDED IN THIS
|
||||
NOTICE BE LIABLE FOR ANY CLAIM, OR ANY SPECIAL INDIRECT OR CONSEQUENTIAL
|
||||
DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE,
|
||||
DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE OR OTHER
|
||||
TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
|
||||
PERFORMANCE OF THE DATA FILES OR SOFTWARE.
|
||||
|
||||
Except as contained in this notice, the name of a copyright holder
|
||||
shall not be used in advertising or otherwise to promote the sale,
|
||||
use or other dealings in these Data Files or Software without prior
|
||||
written authorization of the copyright holder.
|
30
vendor/github.com/apparentlymart/go-textseg/v13/textseg/all_tokens.go
generated
vendored
30
vendor/github.com/apparentlymart/go-textseg/v13/textseg/all_tokens.go
generated
vendored
@@ -1,30 +0,0 @@
|
||||
package textseg
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
)
|
||||
|
||||
// AllTokens is a utility that uses a bufio.SplitFunc to produce a slice of
|
||||
// all of the recognized tokens in the given buffer.
|
||||
func AllTokens(buf []byte, splitFunc bufio.SplitFunc) ([][]byte, error) {
|
||||
scanner := bufio.NewScanner(bytes.NewReader(buf))
|
||||
scanner.Split(splitFunc)
|
||||
var ret [][]byte
|
||||
for scanner.Scan() {
|
||||
ret = append(ret, scanner.Bytes())
|
||||
}
|
||||
return ret, scanner.Err()
|
||||
}
|
||||
|
||||
// TokenCount is a utility that uses a bufio.SplitFunc to count the number of
|
||||
// recognized tokens in the given buffer.
|
||||
func TokenCount(buf []byte, splitFunc bufio.SplitFunc) (int, error) {
|
||||
scanner := bufio.NewScanner(bytes.NewReader(buf))
|
||||
scanner.Split(splitFunc)
|
||||
var ret int
|
||||
for scanner.Scan() {
|
||||
ret++
|
||||
}
|
||||
return ret, scanner.Err()
|
||||
}
|
525
vendor/github.com/apparentlymart/go-textseg/v13/textseg/emoji_table.rl
generated
vendored
525
vendor/github.com/apparentlymart/go-textseg/v13/textseg/emoji_table.rl
generated
vendored
@@ -1,525 +0,0 @@
|
||||
# The following Ragel file was autogenerated with unicode2ragel.rb
|
||||
# from: https://www.unicode.org/Public/13.0.0/ucd/emoji/emoji-data.txt
|
||||
#
|
||||
# It defines ["Extended_Pictographic"].
|
||||
#
|
||||
# To use this, make sure that your alphtype is set to byte,
|
||||
# and that your input is in utf8.
|
||||
|
||||
%%{
|
||||
machine Emoji;
|
||||
|
||||
Extended_Pictographic =
|
||||
0xC2 0xA9 #E0.6 [1] (©️) copyright
|
||||
| 0xC2 0xAE #E0.6 [1] (®️) registered
|
||||
| 0xE2 0x80 0xBC #E0.6 [1] (‼️) double exclamation mark
|
||||
| 0xE2 0x81 0x89 #E0.6 [1] (⁉️) exclamation question ...
|
||||
| 0xE2 0x84 0xA2 #E0.6 [1] (™️) trade mark
|
||||
| 0xE2 0x84 0xB9 #E0.6 [1] (ℹ️) information
|
||||
| 0xE2 0x86 0x94..0x99 #E0.6 [6] (↔️..↙️) left-right arrow..do...
|
||||
| 0xE2 0x86 0xA9..0xAA #E0.6 [2] (↩️..↪️) right arrow curving ...
|
||||
| 0xE2 0x8C 0x9A..0x9B #E0.6 [2] (⌚..⌛) watch..hourglass done
|
||||
| 0xE2 0x8C 0xA8 #E1.0 [1] (⌨️) keyboard
|
||||
| 0xE2 0x8E 0x88 #E0.0 [1] (⎈) HELM SYMBOL
|
||||
| 0xE2 0x8F 0x8F #E1.0 [1] (⏏️) eject button
|
||||
| 0xE2 0x8F 0xA9..0xAC #E0.6 [4] (⏩..⏬) fast-forward button..f...
|
||||
| 0xE2 0x8F 0xAD..0xAE #E0.7 [2] (⏭️..⏮️) next track button..l...
|
||||
| 0xE2 0x8F 0xAF #E1.0 [1] (⏯️) play or pause button
|
||||
| 0xE2 0x8F 0xB0 #E0.6 [1] (⏰) alarm clock
|
||||
| 0xE2 0x8F 0xB1..0xB2 #E1.0 [2] (⏱️..⏲️) stopwatch..timer clock
|
||||
| 0xE2 0x8F 0xB3 #E0.6 [1] (⏳) hourglass not done
|
||||
| 0xE2 0x8F 0xB8..0xBA #E0.7 [3] (⏸️..⏺️) pause button..record...
|
||||
| 0xE2 0x93 0x82 #E0.6 [1] (Ⓜ️) circled M
|
||||
| 0xE2 0x96 0xAA..0xAB #E0.6 [2] (▪️..▫️) black small square.....
|
||||
| 0xE2 0x96 0xB6 #E0.6 [1] (▶️) play button
|
||||
| 0xE2 0x97 0x80 #E0.6 [1] (◀️) reverse button
|
||||
| 0xE2 0x97 0xBB..0xBE #E0.6 [4] (◻️..◾) white medium square.....
|
||||
| 0xE2 0x98 0x80..0x81 #E0.6 [2] (☀️..☁️) sun..cloud
|
||||
| 0xE2 0x98 0x82..0x83 #E0.7 [2] (☂️..☃️) umbrella..snowman
|
||||
| 0xE2 0x98 0x84 #E1.0 [1] (☄️) comet
|
||||
| 0xE2 0x98 0x85 #E0.0 [1] (★) BLACK STAR
|
||||
| 0xE2 0x98 0x87..0x8D #E0.0 [7] (☇..☍) LIGHTNING..OPPOSITION
|
||||
| 0xE2 0x98 0x8E #E0.6 [1] (☎️) telephone
|
||||
| 0xE2 0x98 0x8F..0x90 #E0.0 [2] (☏..☐) WHITE TELEPHONE..BALLO...
|
||||
| 0xE2 0x98 0x91 #E0.6 [1] (☑️) check box with check
|
||||
| 0xE2 0x98 0x92 #E0.0 [1] (☒) BALLOT BOX WITH X
|
||||
| 0xE2 0x98 0x94..0x95 #E0.6 [2] (☔..☕) umbrella with rain dro...
|
||||
| 0xE2 0x98 0x96..0x97 #E0.0 [2] (☖..☗) WHITE SHOGI PIECE..BLA...
|
||||
| 0xE2 0x98 0x98 #E1.0 [1] (☘️) shamrock
|
||||
| 0xE2 0x98 0x99..0x9C #E0.0 [4] (☙..☜) REVERSED ROTATED FLORA...
|
||||
| 0xE2 0x98 0x9D #E0.6 [1] (☝️) index pointing up
|
||||
| 0xE2 0x98 0x9E..0x9F #E0.0 [2] (☞..☟) WHITE RIGHT POINTING I...
|
||||
| 0xE2 0x98 0xA0 #E1.0 [1] (☠️) skull and crossbones
|
||||
| 0xE2 0x98 0xA1 #E0.0 [1] (☡) CAUTION SIGN
|
||||
| 0xE2 0x98 0xA2..0xA3 #E1.0 [2] (☢️..☣️) radioactive..biohazard
|
||||
| 0xE2 0x98 0xA4..0xA5 #E0.0 [2] (☤..☥) CADUCEUS..ANKH
|
||||
| 0xE2 0x98 0xA6 #E1.0 [1] (☦️) orthodox cross
|
||||
| 0xE2 0x98 0xA7..0xA9 #E0.0 [3] (☧..☩) CHI RHO..CROSS OF JERU...
|
||||
| 0xE2 0x98 0xAA #E0.7 [1] (☪️) star and crescent
|
||||
| 0xE2 0x98 0xAB..0xAD #E0.0 [3] (☫..☭) FARSI SYMBOL..HAMMER A...
|
||||
| 0xE2 0x98 0xAE #E1.0 [1] (☮️) peace symbol
|
||||
| 0xE2 0x98 0xAF #E0.7 [1] (☯️) yin yang
|
||||
| 0xE2 0x98 0xB0..0xB7 #E0.0 [8] (☰..☷) TRIGRAM FOR HEAVEN..TR...
|
||||
| 0xE2 0x98 0xB8..0xB9 #E0.7 [2] (☸️..☹️) wheel of dharma..fro...
|
||||
| 0xE2 0x98 0xBA #E0.6 [1] (☺️) smiling face
|
||||
| 0xE2 0x98 0xBB..0xBF #E0.0 [5] (☻..☿) BLACK SMILING FACE..ME...
|
||||
| 0xE2 0x99 0x80 #E4.0 [1] (♀️) female sign
|
||||
| 0xE2 0x99 0x81 #E0.0 [1] (♁) EARTH
|
||||
| 0xE2 0x99 0x82 #E4.0 [1] (♂️) male sign
|
||||
| 0xE2 0x99 0x83..0x87 #E0.0 [5] (♃..♇) JUPITER..PLUTO
|
||||
| 0xE2 0x99 0x88..0x93 #E0.6 [12] (♈..♓) Aries..Pisces
|
||||
| 0xE2 0x99 0x94..0x9E #E0.0 [11] (♔..♞) WHITE CHESS KING..BLAC...
|
||||
| 0xE2 0x99 0x9F #E11.0 [1] (♟️) chess pawn
|
||||
| 0xE2 0x99 0xA0 #E0.6 [1] (♠️) spade suit
|
||||
| 0xE2 0x99 0xA1..0xA2 #E0.0 [2] (♡..♢) WHITE HEART SUIT..WHIT...
|
||||
| 0xE2 0x99 0xA3 #E0.6 [1] (♣️) club suit
|
||||
| 0xE2 0x99 0xA4 #E0.0 [1] (♤) WHITE SPADE SUIT
|
||||
| 0xE2 0x99 0xA5..0xA6 #E0.6 [2] (♥️..♦️) heart suit..diamond ...
|
||||
| 0xE2 0x99 0xA7 #E0.0 [1] (♧) WHITE CLUB SUIT
|
||||
| 0xE2 0x99 0xA8 #E0.6 [1] (♨️) hot springs
|
||||
| 0xE2 0x99 0xA9..0xBA #E0.0 [18] (♩..♺) QUARTER NOTE..RECYCLIN...
|
||||
| 0xE2 0x99 0xBB #E0.6 [1] (♻️) recycling symbol
|
||||
| 0xE2 0x99 0xBC..0xBD #E0.0 [2] (♼..♽) RECYCLED PAPER SYMBOL....
|
||||
| 0xE2 0x99 0xBE #E11.0 [1] (♾️) infinity
|
||||
| 0xE2 0x99 0xBF #E0.6 [1] (♿) wheelchair symbol
|
||||
| 0xE2 0x9A 0x80..0x85 #E0.0 [6] (⚀..⚅) DIE FACE-1..DIE FACE-6
|
||||
| 0xE2 0x9A 0x90..0x91 #E0.0 [2] (⚐..⚑) WHITE FLAG..BLACK FLAG
|
||||
| 0xE2 0x9A 0x92 #E1.0 [1] (⚒️) hammer and pick
|
||||
| 0xE2 0x9A 0x93 #E0.6 [1] (⚓) anchor
|
||||
| 0xE2 0x9A 0x94 #E1.0 [1] (⚔️) crossed swords
|
||||
| 0xE2 0x9A 0x95 #E4.0 [1] (⚕️) medical symbol
|
||||
| 0xE2 0x9A 0x96..0x97 #E1.0 [2] (⚖️..⚗️) balance scale..alembic
|
||||
| 0xE2 0x9A 0x98 #E0.0 [1] (⚘) FLOWER
|
||||
| 0xE2 0x9A 0x99 #E1.0 [1] (⚙️) gear
|
||||
| 0xE2 0x9A 0x9A #E0.0 [1] (⚚) STAFF OF HERMES
|
||||
| 0xE2 0x9A 0x9B..0x9C #E1.0 [2] (⚛️..⚜️) atom symbol..fleur-d...
|
||||
| 0xE2 0x9A 0x9D..0x9F #E0.0 [3] (⚝..⚟) OUTLINED WHITE STAR..T...
|
||||
| 0xE2 0x9A 0xA0..0xA1 #E0.6 [2] (⚠️..⚡) warning..high voltage
|
||||
| 0xE2 0x9A 0xA2..0xA6 #E0.0 [5] (⚢..⚦) DOUBLED FEMALE SIGN..M...
|
||||
| 0xE2 0x9A 0xA7 #E13.0 [1] (⚧️) transgender symbol
|
||||
| 0xE2 0x9A 0xA8..0xA9 #E0.0 [2] (⚨..⚩) VERTICAL MALE WITH STR...
|
||||
| 0xE2 0x9A 0xAA..0xAB #E0.6 [2] (⚪..⚫) white circle..black ci...
|
||||
| 0xE2 0x9A 0xAC..0xAF #E0.0 [4] (⚬..⚯) MEDIUM SMALL WHITE CIR...
|
||||
| 0xE2 0x9A 0xB0..0xB1 #E1.0 [2] (⚰️..⚱️) coffin..funeral urn
|
||||
| 0xE2 0x9A 0xB2..0xBC #E0.0 [11] (⚲..⚼) NEUTER..SESQUIQUADRATE
|
||||
| 0xE2 0x9A 0xBD..0xBE #E0.6 [2] (⚽..⚾) soccer ball..baseball
|
||||
| 0xE2 0x9A 0xBF..0xFF #E0.0 [5] (⚿..⛃) SQUARED KEY..BLACK DRA...
|
||||
| 0xE2 0x9B 0x00..0x83 #
|
||||
| 0xE2 0x9B 0x84..0x85 #E0.6 [2] (⛄..⛅) snowman without snow.....
|
||||
| 0xE2 0x9B 0x86..0x87 #E0.0 [2] (⛆..⛇) RAIN..BLACK SNOWMAN
|
||||
| 0xE2 0x9B 0x88 #E0.7 [1] (⛈️) cloud with lightning ...
|
||||
| 0xE2 0x9B 0x89..0x8D #E0.0 [5] (⛉..⛍) TURNED WHITE SHOGI PIE...
|
||||
| 0xE2 0x9B 0x8E #E0.6 [1] (⛎) Ophiuchus
|
||||
| 0xE2 0x9B 0x8F #E0.7 [1] (⛏️) pick
|
||||
| 0xE2 0x9B 0x90 #E0.0 [1] (⛐) CAR SLIDING
|
||||
| 0xE2 0x9B 0x91 #E0.7 [1] (⛑️) rescue worker’s helmet
|
||||
| 0xE2 0x9B 0x92 #E0.0 [1] (⛒) CIRCLED CROSSING LANES
|
||||
| 0xE2 0x9B 0x93 #E0.7 [1] (⛓️) chains
|
||||
| 0xE2 0x9B 0x94 #E0.6 [1] (⛔) no entry
|
||||
| 0xE2 0x9B 0x95..0xA8 #E0.0 [20] (⛕..⛨) ALTERNATE ONE-WAY LEFT...
|
||||
| 0xE2 0x9B 0xA9 #E0.7 [1] (⛩️) shinto shrine
|
||||
| 0xE2 0x9B 0xAA #E0.6 [1] (⛪) church
|
||||
| 0xE2 0x9B 0xAB..0xAF #E0.0 [5] (⛫..⛯) CASTLE..MAP SYMBOL FOR...
|
||||
| 0xE2 0x9B 0xB0..0xB1 #E0.7 [2] (⛰️..⛱️) mountain..umbrella o...
|
||||
| 0xE2 0x9B 0xB2..0xB3 #E0.6 [2] (⛲..⛳) fountain..flag in hole
|
||||
| 0xE2 0x9B 0xB4 #E0.7 [1] (⛴️) ferry
|
||||
| 0xE2 0x9B 0xB5 #E0.6 [1] (⛵) sailboat
|
||||
| 0xE2 0x9B 0xB6 #E0.0 [1] (⛶) SQUARE FOUR CORNERS
|
||||
| 0xE2 0x9B 0xB7..0xB9 #E0.7 [3] (⛷️..⛹️) skier..person bounci...
|
||||
| 0xE2 0x9B 0xBA #E0.6 [1] (⛺) tent
|
||||
| 0xE2 0x9B 0xBB..0xBC #E0.0 [2] (⛻..⛼) JAPANESE BANK SYMBOL.....
|
||||
| 0xE2 0x9B 0xBD #E0.6 [1] (⛽) fuel pump
|
||||
| 0xE2 0x9B 0xBE..0xFF #E0.0 [4] (⛾..✁) CUP ON BLACK SQUARE..U...
|
||||
| 0xE2 0x9C 0x00..0x81 #
|
||||
| 0xE2 0x9C 0x82 #E0.6 [1] (✂️) scissors
|
||||
| 0xE2 0x9C 0x83..0x84 #E0.0 [2] (✃..✄) LOWER BLADE SCISSORS.....
|
||||
| 0xE2 0x9C 0x85 #E0.6 [1] (✅) check mark button
|
||||
| 0xE2 0x9C 0x88..0x8C #E0.6 [5] (✈️..✌️) airplane..victory hand
|
||||
| 0xE2 0x9C 0x8D #E0.7 [1] (✍️) writing hand
|
||||
| 0xE2 0x9C 0x8E #E0.0 [1] (✎) LOWER RIGHT PENCIL
|
||||
| 0xE2 0x9C 0x8F #E0.6 [1] (✏️) pencil
|
||||
| 0xE2 0x9C 0x90..0x91 #E0.0 [2] (✐..✑) UPPER RIGHT PENCIL..WH...
|
||||
| 0xE2 0x9C 0x92 #E0.6 [1] (✒️) black nib
|
||||
| 0xE2 0x9C 0x94 #E0.6 [1] (✔️) check mark
|
||||
| 0xE2 0x9C 0x96 #E0.6 [1] (✖️) multiply
|
||||
| 0xE2 0x9C 0x9D #E0.7 [1] (✝️) latin cross
|
||||
| 0xE2 0x9C 0xA1 #E0.7 [1] (✡️) star of David
|
||||
| 0xE2 0x9C 0xA8 #E0.6 [1] (✨) sparkles
|
||||
| 0xE2 0x9C 0xB3..0xB4 #E0.6 [2] (✳️..✴️) eight-spoked asteris...
|
||||
| 0xE2 0x9D 0x84 #E0.6 [1] (❄️) snowflake
|
||||
| 0xE2 0x9D 0x87 #E0.6 [1] (❇️) sparkle
|
||||
| 0xE2 0x9D 0x8C #E0.6 [1] (❌) cross mark
|
||||
| 0xE2 0x9D 0x8E #E0.6 [1] (❎) cross mark button
|
||||
| 0xE2 0x9D 0x93..0x95 #E0.6 [3] (❓..❕) question mark..white e...
|
||||
| 0xE2 0x9D 0x97 #E0.6 [1] (❗) exclamation mark
|
||||
| 0xE2 0x9D 0xA3 #E1.0 [1] (❣️) heart exclamation
|
||||
| 0xE2 0x9D 0xA4 #E0.6 [1] (❤️) red heart
|
||||
| 0xE2 0x9D 0xA5..0xA7 #E0.0 [3] (❥..❧) ROTATED HEAVY BLACK HE...
|
||||
| 0xE2 0x9E 0x95..0x97 #E0.6 [3] (➕..➗) plus..divide
|
||||
| 0xE2 0x9E 0xA1 #E0.6 [1] (➡️) right arrow
|
||||
| 0xE2 0x9E 0xB0 #E0.6 [1] (➰) curly loop
|
||||
| 0xE2 0x9E 0xBF #E1.0 [1] (➿) double curly loop
|
||||
| 0xE2 0xA4 0xB4..0xB5 #E0.6 [2] (⤴️..⤵️) right arrow curving ...
|
||||
| 0xE2 0xAC 0x85..0x87 #E0.6 [3] (⬅️..⬇️) left arrow..down arrow
|
||||
| 0xE2 0xAC 0x9B..0x9C #E0.6 [2] (⬛..⬜) black large square..wh...
|
||||
| 0xE2 0xAD 0x90 #E0.6 [1] (⭐) star
|
||||
| 0xE2 0xAD 0x95 #E0.6 [1] (⭕) hollow red circle
|
||||
| 0xE3 0x80 0xB0 #E0.6 [1] (〰️) wavy dash
|
||||
| 0xE3 0x80 0xBD #E0.6 [1] (〽️) part alternation mark
|
||||
| 0xE3 0x8A 0x97 #E0.6 [1] (㊗️) Japanese “congratulat...
|
||||
| 0xE3 0x8A 0x99 #E0.6 [1] (㊙️) Japanese “secret” button
|
||||
| 0xF0 0x9F 0x80 0x80..0x83 #E0.0 [4] (🀀..🀃) MAHJONG TILE EAST W...
|
||||
| 0xF0 0x9F 0x80 0x84 #E0.6 [1] (🀄) mahjong red dragon
|
||||
| 0xF0 0x9F 0x80 0x85..0xFF #E0.0 [202] (🀅..🃎) MAHJONG TILE ...
|
||||
| 0xF0 0x9F 0x81..0x82 0x00..0xFF #
|
||||
| 0xF0 0x9F 0x83 0x00..0x8E #
|
||||
| 0xF0 0x9F 0x83 0x8F #E0.6 [1] (🃏) joker
|
||||
| 0xF0 0x9F 0x83 0x90..0xBF #E0.0 [48] (..) <reserved-1F0D0>..<...
|
||||
| 0xF0 0x9F 0x84 0x8D..0x8F #E0.0 [3] (🄍..🄏) CIRCLED ZERO WITH S...
|
||||
| 0xF0 0x9F 0x84 0xAF #E0.0 [1] (🄯) COPYLEFT SYMBOL
|
||||
| 0xF0 0x9F 0x85 0xAC..0xAF #E0.0 [4] (🅬..🅯) RAISED MR SIGN..CIR...
|
||||
| 0xF0 0x9F 0x85 0xB0..0xB1 #E0.6 [2] (🅰️..🅱️) A button (blood t...
|
||||
| 0xF0 0x9F 0x85 0xBE..0xBF #E0.6 [2] (🅾️..🅿️) O button (blood t...
|
||||
| 0xF0 0x9F 0x86 0x8E #E0.6 [1] (🆎) AB button (blood type)
|
||||
| 0xF0 0x9F 0x86 0x91..0x9A #E0.6 [10] (🆑..🆚) CL button..VS button
|
||||
| 0xF0 0x9F 0x86 0xAD..0xFF #E0.0 [57] (🆭..) MASK WORK SYMBOL..<...
|
||||
| 0xF0 0x9F 0x87 0x00..0xA5 #
|
||||
| 0xF0 0x9F 0x88 0x81..0x82 #E0.6 [2] (🈁..🈂️) Japanese “here” bu...
|
||||
| 0xF0 0x9F 0x88 0x83..0x8F #E0.0 [13] (..) <reserved-1F203>..<...
|
||||
| 0xF0 0x9F 0x88 0x9A #E0.6 [1] (🈚) Japanese “free of char...
|
||||
| 0xF0 0x9F 0x88 0xAF #E0.6 [1] (🈯) Japanese “reserved” bu...
|
||||
| 0xF0 0x9F 0x88 0xB2..0xBA #E0.6 [9] (🈲..🈺) Japanese “prohibite...
|
||||
| 0xF0 0x9F 0x88 0xBC..0xBF #E0.0 [4] (..) <reserved-1F23C>..<...
|
||||
| 0xF0 0x9F 0x89 0x89..0x8F #E0.0 [7] (..) <reserved-1F249>..<...
|
||||
| 0xF0 0x9F 0x89 0x90..0x91 #E0.6 [2] (🉐..🉑) Japanese “bargain” ...
|
||||
| 0xF0 0x9F 0x89 0x92..0xFF #E0.0 [174] (..) <reserved-1F2...
|
||||
| 0xF0 0x9F 0x8A..0x8A 0x00..0xFF #
|
||||
| 0xF0 0x9F 0x8B 0x00..0xBF #
|
||||
| 0xF0 0x9F 0x8C 0x80..0x8C #E0.6 [13] (🌀..🌌) cyclone..milky way
|
||||
| 0xF0 0x9F 0x8C 0x8D..0x8E #E0.7 [2] (🌍..🌎) globe showing Europ...
|
||||
| 0xF0 0x9F 0x8C 0x8F #E0.6 [1] (🌏) globe showing Asia-Aus...
|
||||
| 0xF0 0x9F 0x8C 0x90 #E1.0 [1] (🌐) globe with meridians
|
||||
| 0xF0 0x9F 0x8C 0x91 #E0.6 [1] (🌑) new moon
|
||||
| 0xF0 0x9F 0x8C 0x92 #E1.0 [1] (🌒) waxing crescent moon
|
||||
| 0xF0 0x9F 0x8C 0x93..0x95 #E0.6 [3] (🌓..🌕) first quarter moon....
|
||||
| 0xF0 0x9F 0x8C 0x96..0x98 #E1.0 [3] (🌖..🌘) waning gibbous moon...
|
||||
| 0xF0 0x9F 0x8C 0x99 #E0.6 [1] (🌙) crescent moon
|
||||
| 0xF0 0x9F 0x8C 0x9A #E1.0 [1] (🌚) new moon face
|
||||
| 0xF0 0x9F 0x8C 0x9B #E0.6 [1] (🌛) first quarter moon face
|
||||
| 0xF0 0x9F 0x8C 0x9C #E0.7 [1] (🌜) last quarter moon face
|
||||
| 0xF0 0x9F 0x8C 0x9D..0x9E #E1.0 [2] (🌝..🌞) full moon face..sun...
|
||||
| 0xF0 0x9F 0x8C 0x9F..0xA0 #E0.6 [2] (🌟..🌠) glowing star..shoot...
|
||||
| 0xF0 0x9F 0x8C 0xA1 #E0.7 [1] (🌡️) thermometer
|
||||
| 0xF0 0x9F 0x8C 0xA2..0xA3 #E0.0 [2] (🌢..🌣) BLACK DROPLET..WHIT...
|
||||
| 0xF0 0x9F 0x8C 0xA4..0xAC #E0.7 [9] (🌤️..🌬️) sun behind small ...
|
||||
| 0xF0 0x9F 0x8C 0xAD..0xAF #E1.0 [3] (🌭..🌯) hot dog..burrito
|
||||
| 0xF0 0x9F 0x8C 0xB0..0xB1 #E0.6 [2] (🌰..🌱) chestnut..seedling
|
||||
| 0xF0 0x9F 0x8C 0xB2..0xB3 #E1.0 [2] (🌲..🌳) evergreen tree..dec...
|
||||
| 0xF0 0x9F 0x8C 0xB4..0xB5 #E0.6 [2] (🌴..🌵) palm tree..cactus
|
||||
| 0xF0 0x9F 0x8C 0xB6 #E0.7 [1] (🌶️) hot pepper
|
||||
| 0xF0 0x9F 0x8C 0xB7..0xFF #E0.6 [20] (🌷..🍊) tulip..tangerine
|
||||
| 0xF0 0x9F 0x8D 0x00..0x8A #
|
||||
| 0xF0 0x9F 0x8D 0x8B #E1.0 [1] (🍋) lemon
|
||||
| 0xF0 0x9F 0x8D 0x8C..0x8F #E0.6 [4] (🍌..🍏) banana..green apple
|
||||
| 0xF0 0x9F 0x8D 0x90 #E1.0 [1] (🍐) pear
|
||||
| 0xF0 0x9F 0x8D 0x91..0xBB #E0.6 [43] (🍑..🍻) peach..clinking bee...
|
||||
| 0xF0 0x9F 0x8D 0xBC #E1.0 [1] (🍼) baby bottle
|
||||
| 0xF0 0x9F 0x8D 0xBD #E0.7 [1] (🍽️) fork and knife with p...
|
||||
| 0xF0 0x9F 0x8D 0xBE..0xBF #E1.0 [2] (🍾..🍿) bottle with popping...
|
||||
| 0xF0 0x9F 0x8E 0x80..0x93 #E0.6 [20] (🎀..🎓) ribbon..graduation cap
|
||||
| 0xF0 0x9F 0x8E 0x94..0x95 #E0.0 [2] (🎔..🎕) HEART WITH TIP ON T...
|
||||
| 0xF0 0x9F 0x8E 0x96..0x97 #E0.7 [2] (🎖️..🎗️) military medal..r...
|
||||
| 0xF0 0x9F 0x8E 0x98 #E0.0 [1] (🎘) MUSICAL KEYBOARD WITH ...
|
||||
| 0xF0 0x9F 0x8E 0x99..0x9B #E0.7 [3] (🎙️..🎛️) studio microphone...
|
||||
| 0xF0 0x9F 0x8E 0x9C..0x9D #E0.0 [2] (🎜..🎝) BEAMED ASCENDING MU...
|
||||
| 0xF0 0x9F 0x8E 0x9E..0x9F #E0.7 [2] (🎞️..🎟️) film frames..admi...
|
||||
| 0xF0 0x9F 0x8E 0xA0..0xFF #E0.6 [37] (🎠..🏄) carousel horse..per...
|
||||
| 0xF0 0x9F 0x8F 0x00..0x84 #
|
||||
| 0xF0 0x9F 0x8F 0x85 #E1.0 [1] (🏅) sports medal
|
||||
| 0xF0 0x9F 0x8F 0x86 #E0.6 [1] (🏆) trophy
|
||||
| 0xF0 0x9F 0x8F 0x87 #E1.0 [1] (🏇) horse racing
|
||||
| 0xF0 0x9F 0x8F 0x88 #E0.6 [1] (🏈) american football
|
||||
| 0xF0 0x9F 0x8F 0x89 #E1.0 [1] (🏉) rugby football
|
||||
| 0xF0 0x9F 0x8F 0x8A #E0.6 [1] (🏊) person swimming
|
||||
| 0xF0 0x9F 0x8F 0x8B..0x8E #E0.7 [4] (🏋️..🏎️) person lifting we...
|
||||
| 0xF0 0x9F 0x8F 0x8F..0x93 #E1.0 [5] (🏏..🏓) cricket game..ping ...
|
||||
| 0xF0 0x9F 0x8F 0x94..0x9F #E0.7 [12] (🏔️..🏟️) snow-capped mount...
|
||||
| 0xF0 0x9F 0x8F 0xA0..0xA3 #E0.6 [4] (🏠..🏣) house..Japanese pos...
|
||||
| 0xF0 0x9F 0x8F 0xA4 #E1.0 [1] (🏤) post office
|
||||
| 0xF0 0x9F 0x8F 0xA5..0xB0 #E0.6 [12] (🏥..🏰) hospital..castle
|
||||
| 0xF0 0x9F 0x8F 0xB1..0xB2 #E0.0 [2] (🏱..🏲) WHITE PENNANT..BLAC...
|
||||
| 0xF0 0x9F 0x8F 0xB3 #E0.7 [1] (🏳️) white flag
|
||||
| 0xF0 0x9F 0x8F 0xB4 #E1.0 [1] (🏴) black flag
|
||||
| 0xF0 0x9F 0x8F 0xB5 #E0.7 [1] (🏵️) rosette
|
||||
| 0xF0 0x9F 0x8F 0xB6 #E0.0 [1] (🏶) BLACK ROSETTE
|
||||
| 0xF0 0x9F 0x8F 0xB7 #E0.7 [1] (🏷️) label
|
||||
| 0xF0 0x9F 0x8F 0xB8..0xBA #E1.0 [3] (🏸..🏺) badminton..amphora
|
||||
| 0xF0 0x9F 0x90 0x80..0x87 #E1.0 [8] (🐀..🐇) rat..rabbit
|
||||
| 0xF0 0x9F 0x90 0x88 #E0.7 [1] (🐈) cat
|
||||
| 0xF0 0x9F 0x90 0x89..0x8B #E1.0 [3] (🐉..🐋) dragon..whale
|
||||
| 0xF0 0x9F 0x90 0x8C..0x8E #E0.6 [3] (🐌..🐎) snail..horse
|
||||
| 0xF0 0x9F 0x90 0x8F..0x90 #E1.0 [2] (🐏..🐐) ram..goat
|
||||
| 0xF0 0x9F 0x90 0x91..0x92 #E0.6 [2] (🐑..🐒) ewe..monkey
|
||||
| 0xF0 0x9F 0x90 0x93 #E1.0 [1] (🐓) rooster
|
||||
| 0xF0 0x9F 0x90 0x94 #E0.6 [1] (🐔) chicken
|
||||
| 0xF0 0x9F 0x90 0x95 #E0.7 [1] (🐕) dog
|
||||
| 0xF0 0x9F 0x90 0x96 #E1.0 [1] (🐖) pig
|
||||
| 0xF0 0x9F 0x90 0x97..0xA9 #E0.6 [19] (🐗..🐩) boar..poodle
|
||||
| 0xF0 0x9F 0x90 0xAA #E1.0 [1] (🐪) camel
|
||||
| 0xF0 0x9F 0x90 0xAB..0xBE #E0.6 [20] (🐫..🐾) two-hump camel..paw...
|
||||
| 0xF0 0x9F 0x90 0xBF #E0.7 [1] (🐿️) chipmunk
|
||||
| 0xF0 0x9F 0x91 0x80 #E0.6 [1] (👀) eyes
|
||||
| 0xF0 0x9F 0x91 0x81 #E0.7 [1] (👁️) eye
|
||||
| 0xF0 0x9F 0x91 0x82..0xA4 #E0.6 [35] (👂..👤) ear..bust in silhou...
|
||||
| 0xF0 0x9F 0x91 0xA5 #E1.0 [1] (👥) busts in silhouette
|
||||
| 0xF0 0x9F 0x91 0xA6..0xAB #E0.6 [6] (👦..👫) boy..woman and man ...
|
||||
| 0xF0 0x9F 0x91 0xAC..0xAD #E1.0 [2] (👬..👭) men holding hands.....
|
||||
| 0xF0 0x9F 0x91 0xAE..0xFF #E0.6 [63] (👮..💬) police officer..spe...
|
||||
| 0xF0 0x9F 0x92 0x00..0xAC #
|
||||
| 0xF0 0x9F 0x92 0xAD #E1.0 [1] (💭) thought balloon
|
||||
| 0xF0 0x9F 0x92 0xAE..0xB5 #E0.6 [8] (💮..💵) white flower..dolla...
|
||||
| 0xF0 0x9F 0x92 0xB6..0xB7 #E1.0 [2] (💶..💷) euro banknote..poun...
|
||||
| 0xF0 0x9F 0x92 0xB8..0xFF #E0.6 [52] (💸..📫) money with wings..c...
|
||||
| 0xF0 0x9F 0x93 0x00..0xAB #
|
||||
| 0xF0 0x9F 0x93 0xAC..0xAD #E0.7 [2] (📬..📭) open mailbox with r...
|
||||
| 0xF0 0x9F 0x93 0xAE #E0.6 [1] (📮) postbox
|
||||
| 0xF0 0x9F 0x93 0xAF #E1.0 [1] (📯) postal horn
|
||||
| 0xF0 0x9F 0x93 0xB0..0xB4 #E0.6 [5] (📰..📴) newspaper..mobile p...
|
||||
| 0xF0 0x9F 0x93 0xB5 #E1.0 [1] (📵) no mobile phones
|
||||
| 0xF0 0x9F 0x93 0xB6..0xB7 #E0.6 [2] (📶..📷) antenna bars..camera
|
||||
| 0xF0 0x9F 0x93 0xB8 #E1.0 [1] (📸) camera with flash
|
||||
| 0xF0 0x9F 0x93 0xB9..0xBC #E0.6 [4] (📹..📼) video camera..video...
|
||||
| 0xF0 0x9F 0x93 0xBD #E0.7 [1] (📽️) film projector
|
||||
| 0xF0 0x9F 0x93 0xBE #E0.0 [1] (📾) PORTABLE STEREO
|
||||
| 0xF0 0x9F 0x93 0xBF..0xFF #E1.0 [4] (📿..🔂) prayer beads..repea...
|
||||
| 0xF0 0x9F 0x94 0x00..0x82 #
|
||||
| 0xF0 0x9F 0x94 0x83 #E0.6 [1] (🔃) clockwise vertical arrows
|
||||
| 0xF0 0x9F 0x94 0x84..0x87 #E1.0 [4] (🔄..🔇) counterclockwise ar...
|
||||
| 0xF0 0x9F 0x94 0x88 #E0.7 [1] (🔈) speaker low volume
|
||||
| 0xF0 0x9F 0x94 0x89 #E1.0 [1] (🔉) speaker medium volume
|
||||
| 0xF0 0x9F 0x94 0x8A..0x94 #E0.6 [11] (🔊..🔔) speaker high volume...
|
||||
| 0xF0 0x9F 0x94 0x95 #E1.0 [1] (🔕) bell with slash
|
||||
| 0xF0 0x9F 0x94 0x96..0xAB #E0.6 [22] (🔖..🔫) bookmark..pistol
|
||||
| 0xF0 0x9F 0x94 0xAC..0xAD #E1.0 [2] (🔬..🔭) microscope..telescope
|
||||
| 0xF0 0x9F 0x94 0xAE..0xBD #E0.6 [16] (🔮..🔽) crystal ball..downw...
|
||||
| 0xF0 0x9F 0x95 0x86..0x88 #E0.0 [3] (🕆..🕈) WHITE LATIN CROSS.....
|
||||
| 0xF0 0x9F 0x95 0x89..0x8A #E0.7 [2] (🕉️..🕊️) om..dove
|
||||
| 0xF0 0x9F 0x95 0x8B..0x8E #E1.0 [4] (🕋..🕎) kaaba..menorah
|
||||
| 0xF0 0x9F 0x95 0x8F #E0.0 [1] (🕏) BOWL OF HYGIEIA
|
||||
| 0xF0 0x9F 0x95 0x90..0x9B #E0.6 [12] (🕐..🕛) one o’clock..twelve...
|
||||
| 0xF0 0x9F 0x95 0x9C..0xA7 #E0.7 [12] (🕜..🕧) one-thirty..twelve-...
|
||||
| 0xF0 0x9F 0x95 0xA8..0xAE #E0.0 [7] (🕨..🕮) RIGHT SPEAKER..BOOK
|
||||
| 0xF0 0x9F 0x95 0xAF..0xB0 #E0.7 [2] (🕯️..🕰️) candle..mantelpie...
|
||||
| 0xF0 0x9F 0x95 0xB1..0xB2 #E0.0 [2] (🕱..🕲) BLACK SKULL AND CRO...
|
||||
| 0xF0 0x9F 0x95 0xB3..0xB9 #E0.7 [7] (🕳️..🕹️) hole..joystick
|
||||
| 0xF0 0x9F 0x95 0xBA #E3.0 [1] (🕺) man dancing
|
||||
| 0xF0 0x9F 0x95 0xBB..0xFF #E0.0 [12] (🕻..🖆) LEFT HAND TELEPHONE...
|
||||
| 0xF0 0x9F 0x96 0x00..0x86 #
|
||||
| 0xF0 0x9F 0x96 0x87 #E0.7 [1] (🖇️) linked paperclips
|
||||
| 0xF0 0x9F 0x96 0x88..0x89 #E0.0 [2] (🖈..🖉) BLACK PUSHPIN..LOWE...
|
||||
| 0xF0 0x9F 0x96 0x8A..0x8D #E0.7 [4] (🖊️..🖍️) pen..crayon
|
||||
| 0xF0 0x9F 0x96 0x8E..0x8F #E0.0 [2] (🖎..🖏) LEFT WRITING HAND.....
|
||||
| 0xF0 0x9F 0x96 0x90 #E0.7 [1] (🖐️) hand with fingers spl...
|
||||
| 0xF0 0x9F 0x96 0x91..0x94 #E0.0 [4] (🖑..🖔) REVERSED RAISED HAN...
|
||||
| 0xF0 0x9F 0x96 0x95..0x96 #E1.0 [2] (🖕..🖖) middle finger..vulc...
|
||||
| 0xF0 0x9F 0x96 0x97..0xA3 #E0.0 [13] (🖗..🖣) WHITE DOWN POINTING...
|
||||
| 0xF0 0x9F 0x96 0xA4 #E3.0 [1] (🖤) black heart
|
||||
| 0xF0 0x9F 0x96 0xA5 #E0.7 [1] (🖥️) desktop computer
|
||||
| 0xF0 0x9F 0x96 0xA6..0xA7 #E0.0 [2] (🖦..🖧) KEYBOARD AND MOUSE....
|
||||
| 0xF0 0x9F 0x96 0xA8 #E0.7 [1] (🖨️) printer
|
||||
| 0xF0 0x9F 0x96 0xA9..0xB0 #E0.0 [8] (🖩..🖰) POCKET CALCULATOR.....
|
||||
| 0xF0 0x9F 0x96 0xB1..0xB2 #E0.7 [2] (🖱️..🖲️) computer mouse..t...
|
||||
| 0xF0 0x9F 0x96 0xB3..0xBB #E0.0 [9] (🖳..🖻) OLD PERSONAL COMPUT...
|
||||
| 0xF0 0x9F 0x96 0xBC #E0.7 [1] (🖼️) framed picture
|
||||
| 0xF0 0x9F 0x96 0xBD..0xFF #E0.0 [5] (🖽..🗁) FRAME WITH TILES..O...
|
||||
| 0xF0 0x9F 0x97 0x00..0x81 #
|
||||
| 0xF0 0x9F 0x97 0x82..0x84 #E0.7 [3] (🗂️..🗄️) card index divide...
|
||||
| 0xF0 0x9F 0x97 0x85..0x90 #E0.0 [12] (🗅..🗐) EMPTY NOTE..PAGES
|
||||
| 0xF0 0x9F 0x97 0x91..0x93 #E0.7 [3] (🗑️..🗓️) wastebasket..spir...
|
||||
| 0xF0 0x9F 0x97 0x94..0x9B #E0.0 [8] (🗔..🗛) DESKTOP WINDOW..DEC...
|
||||
| 0xF0 0x9F 0x97 0x9C..0x9E #E0.7 [3] (🗜️..🗞️) clamp..rolled-up ...
|
||||
| 0xF0 0x9F 0x97 0x9F..0xA0 #E0.0 [2] (🗟..🗠) PAGE WITH CIRCLED T...
|
||||
| 0xF0 0x9F 0x97 0xA1 #E0.7 [1] (🗡️) dagger
|
||||
| 0xF0 0x9F 0x97 0xA2 #E0.0 [1] (🗢) LIPS
|
||||
| 0xF0 0x9F 0x97 0xA3 #E0.7 [1] (🗣️) speaking head
|
||||
| 0xF0 0x9F 0x97 0xA4..0xA7 #E0.0 [4] (🗤..🗧) THREE RAYS ABOVE..T...
|
||||
| 0xF0 0x9F 0x97 0xA8 #E2.0 [1] (🗨️) left speech bubble
|
||||
| 0xF0 0x9F 0x97 0xA9..0xAE #E0.0 [6] (🗩..🗮) RIGHT SPEECH BUBBLE...
|
||||
| 0xF0 0x9F 0x97 0xAF #E0.7 [1] (🗯️) right anger bubble
|
||||
| 0xF0 0x9F 0x97 0xB0..0xB2 #E0.0 [3] (🗰..🗲) MOOD BUBBLE..LIGHTN...
|
||||
| 0xF0 0x9F 0x97 0xB3 #E0.7 [1] (🗳️) ballot box with ballot
|
||||
| 0xF0 0x9F 0x97 0xB4..0xB9 #E0.0 [6] (🗴..🗹) BALLOT SCRIPT X..BA...
|
||||
| 0xF0 0x9F 0x97 0xBA #E0.7 [1] (🗺️) world map
|
||||
| 0xF0 0x9F 0x97 0xBB..0xBF #E0.6 [5] (🗻..🗿) mount fuji..moai
|
||||
| 0xF0 0x9F 0x98 0x80 #E1.0 [1] (😀) grinning face
|
||||
| 0xF0 0x9F 0x98 0x81..0x86 #E0.6 [6] (😁..😆) beaming face with s...
|
||||
| 0xF0 0x9F 0x98 0x87..0x88 #E1.0 [2] (😇..😈) smiling face with h...
|
||||
| 0xF0 0x9F 0x98 0x89..0x8D #E0.6 [5] (😉..😍) winking face..smili...
|
||||
| 0xF0 0x9F 0x98 0x8E #E1.0 [1] (😎) smiling face with sung...
|
||||
| 0xF0 0x9F 0x98 0x8F #E0.6 [1] (😏) smirking face
|
||||
| 0xF0 0x9F 0x98 0x90 #E0.7 [1] (😐) neutral face
|
||||
| 0xF0 0x9F 0x98 0x91 #E1.0 [1] (😑) expressionless face
|
||||
| 0xF0 0x9F 0x98 0x92..0x94 #E0.6 [3] (😒..😔) unamused face..pens...
|
||||
| 0xF0 0x9F 0x98 0x95 #E1.0 [1] (😕) confused face
|
||||
| 0xF0 0x9F 0x98 0x96 #E0.6 [1] (😖) confounded face
|
||||
| 0xF0 0x9F 0x98 0x97 #E1.0 [1] (😗) kissing face
|
||||
| 0xF0 0x9F 0x98 0x98 #E0.6 [1] (😘) face blowing a kiss
|
||||
| 0xF0 0x9F 0x98 0x99 #E1.0 [1] (😙) kissing face with smil...
|
||||
| 0xF0 0x9F 0x98 0x9A #E0.6 [1] (😚) kissing face with clos...
|
||||
| 0xF0 0x9F 0x98 0x9B #E1.0 [1] (😛) face with tongue
|
||||
| 0xF0 0x9F 0x98 0x9C..0x9E #E0.6 [3] (😜..😞) winking face with t...
|
||||
| 0xF0 0x9F 0x98 0x9F #E1.0 [1] (😟) worried face
|
||||
| 0xF0 0x9F 0x98 0xA0..0xA5 #E0.6 [6] (😠..😥) angry face..sad but...
|
||||
| 0xF0 0x9F 0x98 0xA6..0xA7 #E1.0 [2] (😦..😧) frowning face with ...
|
||||
| 0xF0 0x9F 0x98 0xA8..0xAB #E0.6 [4] (😨..😫) fearful face..tired...
|
||||
| 0xF0 0x9F 0x98 0xAC #E1.0 [1] (😬) grimacing face
|
||||
| 0xF0 0x9F 0x98 0xAD #E0.6 [1] (😭) loudly crying face
|
||||
| 0xF0 0x9F 0x98 0xAE..0xAF #E1.0 [2] (😮..😯) face with open mout...
|
||||
| 0xF0 0x9F 0x98 0xB0..0xB3 #E0.6 [4] (😰..😳) anxious face with s...
|
||||
| 0xF0 0x9F 0x98 0xB4 #E1.0 [1] (😴) sleeping face
|
||||
| 0xF0 0x9F 0x98 0xB5 #E0.6 [1] (😵) dizzy face
|
||||
| 0xF0 0x9F 0x98 0xB6 #E1.0 [1] (😶) face without mouth
|
||||
| 0xF0 0x9F 0x98 0xB7..0xFF #E0.6 [10] (😷..🙀) face with medical m...
|
||||
| 0xF0 0x9F 0x99 0x00..0x80 #
|
||||
| 0xF0 0x9F 0x99 0x81..0x84 #E1.0 [4] (🙁..🙄) slightly frowning f...
|
||||
| 0xF0 0x9F 0x99 0x85..0x8F #E0.6 [11] (🙅..🙏) person gesturing NO...
|
||||
| 0xF0 0x9F 0x9A 0x80 #E0.6 [1] (🚀) rocket
|
||||
| 0xF0 0x9F 0x9A 0x81..0x82 #E1.0 [2] (🚁..🚂) helicopter..locomotive
|
||||
| 0xF0 0x9F 0x9A 0x83..0x85 #E0.6 [3] (🚃..🚅) railway car..bullet...
|
||||
| 0xF0 0x9F 0x9A 0x86 #E1.0 [1] (🚆) train
|
||||
| 0xF0 0x9F 0x9A 0x87 #E0.6 [1] (🚇) metro
|
||||
| 0xF0 0x9F 0x9A 0x88 #E1.0 [1] (🚈) light rail
|
||||
| 0xF0 0x9F 0x9A 0x89 #E0.6 [1] (🚉) station
|
||||
| 0xF0 0x9F 0x9A 0x8A..0x8B #E1.0 [2] (🚊..🚋) tram..tram car
|
||||
| 0xF0 0x9F 0x9A 0x8C #E0.6 [1] (🚌) bus
|
||||
| 0xF0 0x9F 0x9A 0x8D #E0.7 [1] (🚍) oncoming bus
|
||||
| 0xF0 0x9F 0x9A 0x8E #E1.0 [1] (🚎) trolleybus
|
||||
| 0xF0 0x9F 0x9A 0x8F #E0.6 [1] (🚏) bus stop
|
||||
| 0xF0 0x9F 0x9A 0x90 #E1.0 [1] (🚐) minibus
|
||||
| 0xF0 0x9F 0x9A 0x91..0x93 #E0.6 [3] (🚑..🚓) ambulance..police car
|
||||
| 0xF0 0x9F 0x9A 0x94 #E0.7 [1] (🚔) oncoming police car
|
||||
| 0xF0 0x9F 0x9A 0x95 #E0.6 [1] (🚕) taxi
|
||||
| 0xF0 0x9F 0x9A 0x96 #E1.0 [1] (🚖) oncoming taxi
|
||||
| 0xF0 0x9F 0x9A 0x97 #E0.6 [1] (🚗) automobile
|
||||
| 0xF0 0x9F 0x9A 0x98 #E0.7 [1] (🚘) oncoming automobile
|
||||
| 0xF0 0x9F 0x9A 0x99..0x9A #E0.6 [2] (🚙..🚚) sport utility vehic...
|
||||
| 0xF0 0x9F 0x9A 0x9B..0xA1 #E1.0 [7] (🚛..🚡) articulated lorry.....
|
||||
| 0xF0 0x9F 0x9A 0xA2 #E0.6 [1] (🚢) ship
|
||||
| 0xF0 0x9F 0x9A 0xA3 #E1.0 [1] (🚣) person rowing boat
|
||||
| 0xF0 0x9F 0x9A 0xA4..0xA5 #E0.6 [2] (🚤..🚥) speedboat..horizont...
|
||||
| 0xF0 0x9F 0x9A 0xA6 #E1.0 [1] (🚦) vertical traffic light
|
||||
| 0xF0 0x9F 0x9A 0xA7..0xAD #E0.6 [7] (🚧..🚭) construction..no sm...
|
||||
| 0xF0 0x9F 0x9A 0xAE..0xB1 #E1.0 [4] (🚮..🚱) litter in bin sign....
|
||||
| 0xF0 0x9F 0x9A 0xB2 #E0.6 [1] (🚲) bicycle
|
||||
| 0xF0 0x9F 0x9A 0xB3..0xB5 #E1.0 [3] (🚳..🚵) no bicycles..person...
|
||||
| 0xF0 0x9F 0x9A 0xB6 #E0.6 [1] (🚶) person walking
|
||||
| 0xF0 0x9F 0x9A 0xB7..0xB8 #E1.0 [2] (🚷..🚸) no pedestrians..chi...
|
||||
| 0xF0 0x9F 0x9A 0xB9..0xBE #E0.6 [6] (🚹..🚾) men’s room..water c...
|
||||
| 0xF0 0x9F 0x9A 0xBF #E1.0 [1] (🚿) shower
|
||||
| 0xF0 0x9F 0x9B 0x80 #E0.6 [1] (🛀) person taking bath
|
||||
| 0xF0 0x9F 0x9B 0x81..0x85 #E1.0 [5] (🛁..🛅) bathtub..left luggage
|
||||
| 0xF0 0x9F 0x9B 0x86..0x8A #E0.0 [5] (🛆..🛊) TRIANGLE WITH ROUND...
|
||||
| 0xF0 0x9F 0x9B 0x8B #E0.7 [1] (🛋️) couch and lamp
|
||||
| 0xF0 0x9F 0x9B 0x8C #E1.0 [1] (🛌) person in bed
|
||||
| 0xF0 0x9F 0x9B 0x8D..0x8F #E0.7 [3] (🛍️..🛏️) shopping bags..bed
|
||||
| 0xF0 0x9F 0x9B 0x90 #E1.0 [1] (🛐) place of worship
|
||||
| 0xF0 0x9F 0x9B 0x91..0x92 #E3.0 [2] (🛑..🛒) stop sign..shopping...
|
||||
| 0xF0 0x9F 0x9B 0x93..0x94 #E0.0 [2] (🛓..🛔) STUPA..PAGODA
|
||||
| 0xF0 0x9F 0x9B 0x95 #E12.0 [1] (🛕) hindu temple
|
||||
| 0xF0 0x9F 0x9B 0x96..0x97 #E13.0 [2] (🛖..🛗) hut..elevator
|
||||
| 0xF0 0x9F 0x9B 0x98..0x9F #E0.0 [8] (..🛟) <reserved-1F6D8>..<...
|
||||
| 0xF0 0x9F 0x9B 0xA0..0xA5 #E0.7 [6] (🛠️..🛥️) hammer and wrench...
|
||||
| 0xF0 0x9F 0x9B 0xA6..0xA8 #E0.0 [3] (🛦..🛨) UP-POINTING MILITAR...
|
||||
| 0xF0 0x9F 0x9B 0xA9 #E0.7 [1] (🛩️) small airplane
|
||||
| 0xF0 0x9F 0x9B 0xAA #E0.0 [1] (🛪) NORTHEAST-POINTING AIR...
|
||||
| 0xF0 0x9F 0x9B 0xAB..0xAC #E1.0 [2] (🛫..🛬) airplane departure....
|
||||
| 0xF0 0x9F 0x9B 0xAD..0xAF #E0.0 [3] (..) <reserved-1F6ED>..<...
|
||||
| 0xF0 0x9F 0x9B 0xB0 #E0.7 [1] (🛰️) satellite
|
||||
| 0xF0 0x9F 0x9B 0xB1..0xB2 #E0.0 [2] (🛱..🛲) ONCOMING FIRE ENGIN...
|
||||
| 0xF0 0x9F 0x9B 0xB3 #E0.7 [1] (🛳️) passenger ship
|
||||
| 0xF0 0x9F 0x9B 0xB4..0xB6 #E3.0 [3] (🛴..🛶) kick scooter..canoe
|
||||
| 0xF0 0x9F 0x9B 0xB7..0xB8 #E5.0 [2] (🛷..🛸) sled..flying saucer
|
||||
| 0xF0 0x9F 0x9B 0xB9 #E11.0 [1] (🛹) skateboard
|
||||
| 0xF0 0x9F 0x9B 0xBA #E12.0 [1] (🛺) auto rickshaw
|
||||
| 0xF0 0x9F 0x9B 0xBB..0xBC #E13.0 [2] (🛻..🛼) pickup truck..rolle...
|
||||
| 0xF0 0x9F 0x9B 0xBD..0xBF #E0.0 [3] (..) <reserved-1F6FD>..<...
|
||||
| 0xF0 0x9F 0x9D 0xB4..0xBF #E0.0 [12] (🝴..🝿) <reserved-1F774>..<...
|
||||
| 0xF0 0x9F 0x9F 0x95..0x9F #E0.0 [11] (🟕..) CIRCLED TRIANGLE..<...
|
||||
| 0xF0 0x9F 0x9F 0xA0..0xAB #E12.0 [12] (🟠..🟫) orange circle..brow...
|
||||
| 0xF0 0x9F 0x9F 0xAC..0xBF #E0.0 [20] (..) <reserved-1F7EC>..<...
|
||||
| 0xF0 0x9F 0xA0 0x8C..0x8F #E0.0 [4] (..) <reserved-1F80C>..<...
|
||||
| 0xF0 0x9F 0xA1 0x88..0x8F #E0.0 [8] (..) <reserved-1F848>..<...
|
||||
| 0xF0 0x9F 0xA1 0x9A..0x9F #E0.0 [6] (..) <reserved-1F85A>..<...
|
||||
| 0xF0 0x9F 0xA2 0x88..0x8F #E0.0 [8] (..) <reserved-1F888>..<...
|
||||
| 0xF0 0x9F 0xA2 0xAE..0xFF #E0.0 [82] (..) <reserved-1F8AE>..<...
|
||||
| 0xF0 0x9F 0xA3 0x00..0xBF #
|
||||
| 0xF0 0x9F 0xA4 0x8C #E13.0 [1] (🤌) pinched fingers
|
||||
| 0xF0 0x9F 0xA4 0x8D..0x8F #E12.0 [3] (🤍..🤏) white heart..pinchi...
|
||||
| 0xF0 0x9F 0xA4 0x90..0x98 #E1.0 [9] (🤐..🤘) zipper-mouth face.....
|
||||
| 0xF0 0x9F 0xA4 0x99..0x9E #E3.0 [6] (🤙..🤞) call me hand..cross...
|
||||
| 0xF0 0x9F 0xA4 0x9F #E5.0 [1] (🤟) love-you gesture
|
||||
| 0xF0 0x9F 0xA4 0xA0..0xA7 #E3.0 [8] (🤠..🤧) cowboy hat face..sn...
|
||||
| 0xF0 0x9F 0xA4 0xA8..0xAF #E5.0 [8] (🤨..🤯) face with raised ey...
|
||||
| 0xF0 0x9F 0xA4 0xB0 #E3.0 [1] (🤰) pregnant woman
|
||||
| 0xF0 0x9F 0xA4 0xB1..0xB2 #E5.0 [2] (🤱..🤲) breast-feeding..pal...
|
||||
| 0xF0 0x9F 0xA4 0xB3..0xBA #E3.0 [8] (🤳..🤺) selfie..person fencing
|
||||
| 0xF0 0x9F 0xA4 0xBC..0xBE #E3.0 [3] (🤼..🤾) people wrestling..p...
|
||||
| 0xF0 0x9F 0xA4 0xBF #E12.0 [1] (🤿) diving mask
|
||||
| 0xF0 0x9F 0xA5 0x80..0x85 #E3.0 [6] (🥀..🥅) wilted flower..goal...
|
||||
| 0xF0 0x9F 0xA5 0x87..0x8B #E3.0 [5] (🥇..🥋) 1st place medal..ma...
|
||||
| 0xF0 0x9F 0xA5 0x8C #E5.0 [1] (🥌) curling stone
|
||||
| 0xF0 0x9F 0xA5 0x8D..0x8F #E11.0 [3] (🥍..🥏) lacrosse..flying disc
|
||||
| 0xF0 0x9F 0xA5 0x90..0x9E #E3.0 [15] (🥐..🥞) croissant..pancakes
|
||||
| 0xF0 0x9F 0xA5 0x9F..0xAB #E5.0 [13] (🥟..🥫) dumpling..canned food
|
||||
| 0xF0 0x9F 0xA5 0xAC..0xB0 #E11.0 [5] (🥬..🥰) leafy green..smilin...
|
||||
| 0xF0 0x9F 0xA5 0xB1 #E12.0 [1] (🥱) yawning face
|
||||
| 0xF0 0x9F 0xA5 0xB2 #E13.0 [1] (🥲) smiling face with tear
|
||||
| 0xF0 0x9F 0xA5 0xB3..0xB6 #E11.0 [4] (🥳..🥶) partying face..cold...
|
||||
| 0xF0 0x9F 0xA5 0xB7..0xB8 #E13.0 [2] (🥷..🥸) ninja..disguised face
|
||||
| 0xF0 0x9F 0xA5 0xB9 #E0.0 [1] (🥹) <reserved-1F979>
|
||||
| 0xF0 0x9F 0xA5 0xBA #E11.0 [1] (🥺) pleading face
|
||||
| 0xF0 0x9F 0xA5 0xBB #E12.0 [1] (🥻) sari
|
||||
| 0xF0 0x9F 0xA5 0xBC..0xBF #E11.0 [4] (🥼..🥿) lab coat..flat shoe
|
||||
| 0xF0 0x9F 0xA6 0x80..0x84 #E1.0 [5] (🦀..🦄) crab..unicorn
|
||||
| 0xF0 0x9F 0xA6 0x85..0x91 #E3.0 [13] (🦅..🦑) eagle..squid
|
||||
| 0xF0 0x9F 0xA6 0x92..0x97 #E5.0 [6] (🦒..🦗) giraffe..cricket
|
||||
| 0xF0 0x9F 0xA6 0x98..0xA2 #E11.0 [11] (🦘..🦢) kangaroo..swan
|
||||
| 0xF0 0x9F 0xA6 0xA3..0xA4 #E13.0 [2] (🦣..🦤) mammoth..dodo
|
||||
| 0xF0 0x9F 0xA6 0xA5..0xAA #E12.0 [6] (🦥..🦪) sloth..oyster
|
||||
| 0xF0 0x9F 0xA6 0xAB..0xAD #E13.0 [3] (🦫..🦭) beaver..seal
|
||||
| 0xF0 0x9F 0xA6 0xAE..0xAF #E12.0 [2] (🦮..🦯) guide dog..white cane
|
||||
| 0xF0 0x9F 0xA6 0xB0..0xB9 #E11.0 [10] (🦰..🦹) red hair..supervillain
|
||||
| 0xF0 0x9F 0xA6 0xBA..0xBF #E12.0 [6] (🦺..🦿) safety vest..mechan...
|
||||
| 0xF0 0x9F 0xA7 0x80 #E1.0 [1] (🧀) cheese wedge
|
||||
| 0xF0 0x9F 0xA7 0x81..0x82 #E11.0 [2] (🧁..🧂) cupcake..salt
|
||||
| 0xF0 0x9F 0xA7 0x83..0x8A #E12.0 [8] (🧃..🧊) beverage box..ice
|
||||
| 0xF0 0x9F 0xA7 0x8B #E13.0 [1] (🧋) bubble tea
|
||||
| 0xF0 0x9F 0xA7 0x8C #E0.0 [1] (🧌) <reserved-1F9CC>
|
||||
| 0xF0 0x9F 0xA7 0x8D..0x8F #E12.0 [3] (🧍..🧏) person standing..de...
|
||||
| 0xF0 0x9F 0xA7 0x90..0xA6 #E5.0 [23] (🧐..🧦) face with monocle.....
|
||||
| 0xF0 0x9F 0xA7 0xA7..0xBF #E11.0 [25] (🧧..🧿) red envelope..nazar...
|
||||
| 0xF0 0x9F 0xA8 0x80..0xFF #E0.0 [112] (🨀..) NEUTRAL CHESS KING....
|
||||
| 0xF0 0x9F 0xA9 0x00..0xAF #
|
||||
| 0xF0 0x9F 0xA9 0xB0..0xB3 #E12.0 [4] (🩰..🩳) ballet shoes..shorts
|
||||
| 0xF0 0x9F 0xA9 0xB4 #E13.0 [1] (🩴) thong sandal
|
||||
| 0xF0 0x9F 0xA9 0xB5..0xB7 #E0.0 [3] (🩵..🩷) <reserved-1FA75>..<...
|
||||
| 0xF0 0x9F 0xA9 0xB8..0xBA #E12.0 [3] (🩸..🩺) drop of blood..stet...
|
||||
| 0xF0 0x9F 0xA9 0xBB..0xBF #E0.0 [5] (🩻..) <reserved-1FA7B>..<...
|
||||
| 0xF0 0x9F 0xAA 0x80..0x82 #E12.0 [3] (🪀..🪂) yo-yo..parachute
|
||||
| 0xF0 0x9F 0xAA 0x83..0x86 #E13.0 [4] (🪃..🪆) boomerang..nesting ...
|
||||
| 0xF0 0x9F 0xAA 0x87..0x8F #E0.0 [9] (🪇..) <reserved-1FA87>..<...
|
||||
| 0xF0 0x9F 0xAA 0x90..0x95 #E12.0 [6] (🪐..🪕) ringed planet..banjo
|
||||
| 0xF0 0x9F 0xAA 0x96..0xA8 #E13.0 [19] (🪖..🪨) military helmet..rock
|
||||
| 0xF0 0x9F 0xAA 0xA9..0xAF #E0.0 [7] (🪩..🪯) <reserved-1FAA9>..<...
|
||||
| 0xF0 0x9F 0xAA 0xB0..0xB6 #E13.0 [7] (🪰..🪶) fly..feather
|
||||
| 0xF0 0x9F 0xAA 0xB7..0xBF #E0.0 [9] (🪷..🪿) <reserved-1FAB7>..<...
|
||||
| 0xF0 0x9F 0xAB 0x80..0x82 #E13.0 [3] (🫀..🫂) anatomical heart..p...
|
||||
| 0xF0 0x9F 0xAB 0x83..0x8F #E0.0 [13] (🫃..🫏) <reserved-1FAC3>..<...
|
||||
| 0xF0 0x9F 0xAB 0x90..0x96 #E13.0 [7] (🫐..🫖) blueberries..teapot
|
||||
| 0xF0 0x9F 0xAB 0x97..0xBF #E0.0 [41] (🫗..) <reserved-1FAD7>..<...
|
||||
| 0xF0 0x9F 0xB0 0x80..0xFF #E0.0[1022] (..) <reserved-1FC...
|
||||
| 0xF0 0x9F 0xB1..0xBE 0x00..0xFF #
|
||||
| 0xF0 0x9F 0xBF 0x00..0xBD #
|
||||
;
|
||||
|
||||
}%%
|
8
vendor/github.com/apparentlymart/go-textseg/v13/textseg/generate.go
generated
vendored
8
vendor/github.com/apparentlymart/go-textseg/v13/textseg/generate.go
generated
vendored
@@ -1,8 +0,0 @@
|
||||
package textseg
|
||||
|
||||
//go:generate go run make_tables.go -output tables.go
|
||||
//go:generate go run make_test_tables.go -output tables_test.go
|
||||
//go:generate ruby unicode2ragel.rb --url=https://www.unicode.org/Public/13.0.0/ucd/auxiliary/GraphemeBreakProperty.txt -m GraphemeCluster -p "Prepend,CR,LF,Control,Extend,Regional_Indicator,SpacingMark,L,V,T,LV,LVT,ZWJ" -o grapheme_clusters_table.rl
|
||||
//go:generate ruby unicode2ragel.rb --url=https://www.unicode.org/Public/13.0.0/ucd/emoji/emoji-data.txt -m Emoji -p "Extended_Pictographic" -o emoji_table.rl
|
||||
//go:generate ragel -Z grapheme_clusters.rl
|
||||
//go:generate gofmt -w grapheme_clusters.go
|
4138
vendor/github.com/apparentlymart/go-textseg/v13/textseg/grapheme_clusters.go
generated
vendored
4138
vendor/github.com/apparentlymart/go-textseg/v13/textseg/grapheme_clusters.go
generated
vendored
File diff suppressed because it is too large
Load Diff
133
vendor/github.com/apparentlymart/go-textseg/v13/textseg/grapheme_clusters.rl
generated
vendored
133
vendor/github.com/apparentlymart/go-textseg/v13/textseg/grapheme_clusters.rl
generated
vendored
@@ -1,133 +0,0 @@
|
||||
package textseg
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
// Generated from grapheme_clusters.rl. DO NOT EDIT
|
||||
%%{
|
||||
# (except you are actually in grapheme_clusters.rl here, so edit away!)
|
||||
|
||||
machine graphclust;
|
||||
write data;
|
||||
}%%
|
||||
|
||||
var Error = errors.New("invalid UTF8 text")
|
||||
|
||||
// ScanGraphemeClusters is a split function for bufio.Scanner that splits
|
||||
// on grapheme cluster boundaries.
|
||||
func ScanGraphemeClusters(data []byte, atEOF bool) (int, []byte, error) {
|
||||
if len(data) == 0 {
|
||||
return 0, nil, nil
|
||||
}
|
||||
|
||||
// Ragel state
|
||||
cs := 0 // Current State
|
||||
p := 0 // "Pointer" into data
|
||||
pe := len(data) // End-of-data "pointer"
|
||||
ts := 0
|
||||
te := 0
|
||||
act := 0
|
||||
eof := pe
|
||||
|
||||
// Make Go compiler happy
|
||||
_ = ts
|
||||
_ = te
|
||||
_ = act
|
||||
_ = eof
|
||||
|
||||
startPos := 0
|
||||
endPos := 0
|
||||
|
||||
%%{
|
||||
include GraphemeCluster "grapheme_clusters_table.rl";
|
||||
include Emoji "emoji_table.rl";
|
||||
|
||||
action start {
|
||||
startPos = p
|
||||
}
|
||||
|
||||
action end {
|
||||
endPos = p
|
||||
}
|
||||
|
||||
action emit {
|
||||
return endPos+1, data[startPos:endPos+1], nil
|
||||
}
|
||||
|
||||
ZWJGlue = ZWJ (Extended_Pictographic Extend*)?;
|
||||
AnyExtender = Extend | ZWJGlue | SpacingMark;
|
||||
Extension = AnyExtender*;
|
||||
ReplacementChar = (0xEF 0xBF 0xBD);
|
||||
|
||||
CRLFSeq = CR LF;
|
||||
ControlSeq = Control | ReplacementChar;
|
||||
HangulSeq = (
|
||||
L+ (((LV? V+ | LVT) T*)?|LV?) |
|
||||
LV V* T* |
|
||||
V+ T* |
|
||||
LVT T* |
|
||||
T+
|
||||
) Extension;
|
||||
EmojiSeq = Extended_Pictographic Extend* Extension;
|
||||
ZWJSeq = ZWJ (ZWJ | Extend | SpacingMark)*;
|
||||
EmojiFlagSeq = Regional_Indicator Regional_Indicator? Extension;
|
||||
|
||||
UTF8Cont = 0x80 .. 0xBF;
|
||||
AnyUTF8 = (
|
||||
0x00..0x7F |
|
||||
0xC0..0xDF . UTF8Cont |
|
||||
0xE0..0xEF . UTF8Cont . UTF8Cont |
|
||||
0xF0..0xF7 . UTF8Cont . UTF8Cont . UTF8Cont
|
||||
);
|
||||
|
||||
# OtherSeq is any character that isn't at the start of one of the extended sequences above, followed by extension
|
||||
OtherSeq = (AnyUTF8 - (CR|LF|Control|ReplacementChar|L|LV|V|LVT|T|Extended_Pictographic|ZWJ|Regional_Indicator|Prepend)) (Extend | ZWJ | SpacingMark)*;
|
||||
|
||||
# PrependSeq is prepend followed by any of the other patterns above, except control characters which explicitly break
|
||||
PrependSeq = Prepend+ (HangulSeq|EmojiSeq|ZWJSeq|EmojiFlagSeq|OtherSeq)?;
|
||||
|
||||
CRLFTok = CRLFSeq >start @end;
|
||||
ControlTok = ControlSeq >start @end;
|
||||
HangulTok = HangulSeq >start @end;
|
||||
EmojiTok = EmojiSeq >start @end;
|
||||
ZWJTok = ZWJSeq >start @end;
|
||||
EmojiFlagTok = EmojiFlagSeq >start @end;
|
||||
OtherTok = OtherSeq >start @end;
|
||||
PrependTok = PrependSeq >start @end;
|
||||
|
||||
main := |*
|
||||
CRLFTok => emit;
|
||||
ControlTok => emit;
|
||||
HangulTok => emit;
|
||||
EmojiTok => emit;
|
||||
ZWJTok => emit;
|
||||
EmojiFlagTok => emit;
|
||||
PrependTok => emit;
|
||||
OtherTok => emit;
|
||||
|
||||
# any single valid UTF-8 character would also be valid per spec,
|
||||
# but we'll handle that separately after the loop so we can deal
|
||||
# with requesting more bytes if we're not at EOF.
|
||||
*|;
|
||||
|
||||
write init;
|
||||
write exec;
|
||||
}%%
|
||||
|
||||
// If we fall out here then we were unable to complete a sequence.
|
||||
// If we weren't able to complete a sequence then either we've
|
||||
// reached the end of a partial buffer (so there's more data to come)
|
||||
// or we have an isolated symbol that would normally be part of a
|
||||
// grapheme cluster but has appeared in isolation here.
|
||||
|
||||
if !atEOF {
|
||||
// Request more
|
||||
return 0, nil, nil
|
||||
}
|
||||
|
||||
// Just take the first UTF-8 sequence and return that.
|
||||
_, seqLen := utf8.DecodeRune(data)
|
||||
return seqLen, data[:seqLen], nil
|
||||
}
|
1609
vendor/github.com/apparentlymart/go-textseg/v13/textseg/grapheme_clusters_table.rl
generated
vendored
1609
vendor/github.com/apparentlymart/go-textseg/v13/textseg/grapheme_clusters_table.rl
generated
vendored
File diff suppressed because it is too large
Load Diff
5833
vendor/github.com/apparentlymart/go-textseg/v13/textseg/tables.go
generated
vendored
5833
vendor/github.com/apparentlymart/go-textseg/v13/textseg/tables.go
generated
vendored
File diff suppressed because it is too large
Load Diff
335
vendor/github.com/apparentlymart/go-textseg/v13/textseg/unicode2ragel.rb
generated
vendored
335
vendor/github.com/apparentlymart/go-textseg/v13/textseg/unicode2ragel.rb
generated
vendored
@@ -1,335 +0,0 @@
|
||||
#!/usr/bin/env ruby
|
||||
#
|
||||
# This scripted has been updated to accept more command-line arguments:
|
||||
#
|
||||
# -u, --url URL to process
|
||||
# -m, --machine Machine name
|
||||
# -p, --properties Properties to add to the machine
|
||||
# -o, --output Write output to file
|
||||
#
|
||||
# Updated by: Marty Schoch <marty.schoch@gmail.com>
|
||||
#
|
||||
# This script uses the unicode spec to generate a Ragel state machine
|
||||
# that recognizes unicode alphanumeric characters. It generates 5
|
||||
# character classes: uupper, ulower, ualpha, udigit, and ualnum.
|
||||
# Currently supported encodings are UTF-8 [default] and UCS-4.
|
||||
#
|
||||
# Usage: unicode2ragel.rb [options]
|
||||
# -e, --encoding [ucs4 | utf8] Data encoding
|
||||
# -h, --help Show this message
|
||||
#
|
||||
# This script was originally written as part of the Ferret search
|
||||
# engine library.
|
||||
#
|
||||
# Author: Rakan El-Khalil <rakan@well.com>
|
||||
|
||||
require 'optparse'
|
||||
require 'open-uri'
|
||||
|
||||
ENCODINGS = [ :utf8, :ucs4 ]
|
||||
ALPHTYPES = { :utf8 => "byte", :ucs4 => "rune" }
|
||||
DEFAULT_CHART_URL = "http://www.unicode.org/Public/5.1.0/ucd/DerivedCoreProperties.txt"
|
||||
DEFAULT_MACHINE_NAME= "WChar"
|
||||
|
||||
###
|
||||
# Display vars & default option
|
||||
|
||||
TOTAL_WIDTH = 80
|
||||
RANGE_WIDTH = 23
|
||||
@encoding = :utf8
|
||||
@chart_url = DEFAULT_CHART_URL
|
||||
machine_name = DEFAULT_MACHINE_NAME
|
||||
properties = []
|
||||
@output = $stdout
|
||||
|
||||
###
|
||||
# Option parsing
|
||||
|
||||
cli_opts = OptionParser.new do |opts|
|
||||
opts.on("-e", "--encoding [ucs4 | utf8]", "Data encoding") do |o|
|
||||
@encoding = o.downcase.to_sym
|
||||
end
|
||||
opts.on("-h", "--help", "Show this message") do
|
||||
puts opts
|
||||
exit
|
||||
end
|
||||
opts.on("-u", "--url URL", "URL to process") do |o|
|
||||
@chart_url = o
|
||||
end
|
||||
opts.on("-m", "--machine MACHINE_NAME", "Machine name") do |o|
|
||||
machine_name = o
|
||||
end
|
||||
opts.on("-p", "--properties x,y,z", Array, "Properties to add to machine") do |o|
|
||||
properties = o
|
||||
end
|
||||
opts.on("-o", "--output FILE", "output file") do |o|
|
||||
@output = File.new(o, "w+")
|
||||
end
|
||||
end
|
||||
|
||||
cli_opts.parse(ARGV)
|
||||
unless ENCODINGS.member? @encoding
|
||||
puts "Invalid encoding: #{@encoding}"
|
||||
puts cli_opts
|
||||
exit
|
||||
end
|
||||
|
||||
##
|
||||
# Downloads the document at url and yields every alpha line's hex
|
||||
# range and description.
|
||||
|
||||
def each_alpha( url, property )
|
||||
URI.open( url ) do |file|
|
||||
file.each_line do |line|
|
||||
next if line =~ /^#/;
|
||||
next if line !~ /; #{property} *#/;
|
||||
|
||||
range, description = line.split(/;/)
|
||||
range.strip!
|
||||
description.gsub!(/.*#/, '').strip!
|
||||
|
||||
if range =~ /\.\./
|
||||
start, stop = range.split '..'
|
||||
else start = stop = range
|
||||
end
|
||||
|
||||
yield start.hex .. stop.hex, description
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
###
|
||||
# Formats to hex at minimum width
|
||||
|
||||
def to_hex( n )
|
||||
r = "%0X" % n
|
||||
r = "0#{r}" unless (r.length % 2).zero?
|
||||
r
|
||||
end
|
||||
|
||||
###
|
||||
# UCS4 is just a straight hex conversion of the unicode codepoint.
|
||||
|
||||
def to_ucs4( range )
|
||||
rangestr = "0x" + to_hex(range.begin)
|
||||
rangestr << "..0x" + to_hex(range.end) if range.begin != range.end
|
||||
[ rangestr ]
|
||||
end
|
||||
|
||||
##
|
||||
# 0x00 - 0x7f -> 0zzzzzzz[7]
|
||||
# 0x80 - 0x7ff -> 110yyyyy[5] 10zzzzzz[6]
|
||||
# 0x800 - 0xffff -> 1110xxxx[4] 10yyyyyy[6] 10zzzzzz[6]
|
||||
# 0x010000 - 0x10ffff -> 11110www[3] 10xxxxxx[6] 10yyyyyy[6] 10zzzzzz[6]
|
||||
|
||||
UTF8_BOUNDARIES = [0x7f, 0x7ff, 0xffff, 0x10ffff]
|
||||
|
||||
def to_utf8_enc( n )
|
||||
r = 0
|
||||
if n <= 0x7f
|
||||
r = n
|
||||
elsif n <= 0x7ff
|
||||
y = 0xc0 | (n >> 6)
|
||||
z = 0x80 | (n & 0x3f)
|
||||
r = y << 8 | z
|
||||
elsif n <= 0xffff
|
||||
x = 0xe0 | (n >> 12)
|
||||
y = 0x80 | (n >> 6) & 0x3f
|
||||
z = 0x80 | n & 0x3f
|
||||
r = x << 16 | y << 8 | z
|
||||
elsif n <= 0x10ffff
|
||||
w = 0xf0 | (n >> 18)
|
||||
x = 0x80 | (n >> 12) & 0x3f
|
||||
y = 0x80 | (n >> 6) & 0x3f
|
||||
z = 0x80 | n & 0x3f
|
||||
r = w << 24 | x << 16 | y << 8 | z
|
||||
end
|
||||
|
||||
to_hex(r)
|
||||
end
|
||||
|
||||
def from_utf8_enc( n )
|
||||
n = n.hex
|
||||
r = 0
|
||||
if n <= 0x7f
|
||||
r = n
|
||||
elsif n <= 0xdfff
|
||||
y = (n >> 8) & 0x1f
|
||||
z = n & 0x3f
|
||||
r = y << 6 | z
|
||||
elsif n <= 0xefffff
|
||||
x = (n >> 16) & 0x0f
|
||||
y = (n >> 8) & 0x3f
|
||||
z = n & 0x3f
|
||||
r = x << 10 | y << 6 | z
|
||||
elsif n <= 0xf7ffffff
|
||||
w = (n >> 24) & 0x07
|
||||
x = (n >> 16) & 0x3f
|
||||
y = (n >> 8) & 0x3f
|
||||
z = n & 0x3f
|
||||
r = w << 18 | x << 12 | y << 6 | z
|
||||
end
|
||||
r
|
||||
end
|
||||
|
||||
###
|
||||
# Given a range, splits it up into ranges that can be continuously
|
||||
# encoded into utf8. Eg: 0x00 .. 0xff => [0x00..0x7f, 0x80..0xff]
|
||||
# This is not strictly needed since the current [5.1] unicode standard
|
||||
# doesn't have ranges that straddle utf8 boundaries. This is included
|
||||
# for completeness as there is no telling if that will ever change.
|
||||
|
||||
def utf8_ranges( range )
|
||||
ranges = []
|
||||
UTF8_BOUNDARIES.each do |max|
|
||||
if range.begin <= max
|
||||
if range.end <= max
|
||||
ranges << range
|
||||
return ranges
|
||||
end
|
||||
|
||||
ranges << (range.begin .. max)
|
||||
range = (max + 1) .. range.end
|
||||
end
|
||||
end
|
||||
ranges
|
||||
end
|
||||
|
||||
def build_range( start, stop )
|
||||
size = start.size/2
|
||||
left = size - 1
|
||||
return [""] if size < 1
|
||||
|
||||
a = start[0..1]
|
||||
b = stop[0..1]
|
||||
|
||||
###
|
||||
# Shared prefix
|
||||
|
||||
if a == b
|
||||
return build_range(start[2..-1], stop[2..-1]).map do |elt|
|
||||
"0x#{a} " + elt
|
||||
end
|
||||
end
|
||||
|
||||
###
|
||||
# Unshared prefix, end of run
|
||||
|
||||
return ["0x#{a}..0x#{b} "] if left.zero?
|
||||
|
||||
###
|
||||
# Unshared prefix, not end of run
|
||||
# Range can be 0x123456..0x56789A
|
||||
# Which is equivalent to:
|
||||
# 0x123456 .. 0x12FFFF
|
||||
# 0x130000 .. 0x55FFFF
|
||||
# 0x560000 .. 0x56789A
|
||||
|
||||
ret = []
|
||||
ret << build_range(start, a + "FF" * left)
|
||||
|
||||
###
|
||||
# Only generate middle range if need be.
|
||||
|
||||
if a.hex+1 != b.hex
|
||||
max = to_hex(b.hex - 1)
|
||||
max = "FF" if b == "FF"
|
||||
ret << "0x#{to_hex(a.hex+1)}..0x#{max} " + "0x00..0xFF " * left
|
||||
end
|
||||
|
||||
###
|
||||
# Don't generate last range if it is covered by first range
|
||||
|
||||
ret << build_range(b + "00" * left, stop) unless b == "FF"
|
||||
ret.flatten!
|
||||
end
|
||||
|
||||
def to_utf8( range )
|
||||
utf8_ranges( range ).map do |r|
|
||||
begin_enc = to_utf8_enc(r.begin)
|
||||
end_enc = to_utf8_enc(r.end)
|
||||
build_range begin_enc, end_enc
|
||||
end.flatten!
|
||||
end
|
||||
|
||||
##
|
||||
# Perform a 3-way comparison of the number of codepoints advertised by
|
||||
# the unicode spec for the given range, the originally parsed range,
|
||||
# and the resulting utf8 encoded range.
|
||||
|
||||
def count_codepoints( code )
|
||||
code.split(' ').inject(1) do |acc, elt|
|
||||
if elt =~ /0x(.+)\.\.0x(.+)/
|
||||
if @encoding == :utf8
|
||||
acc * (from_utf8_enc($2) - from_utf8_enc($1) + 1)
|
||||
else
|
||||
acc * ($2.hex - $1.hex + 1)
|
||||
end
|
||||
else
|
||||
acc
|
||||
end
|
||||
end
|
||||
end
|
||||
|
||||
def is_valid?( range, desc, codes )
|
||||
spec_count = 1
|
||||
spec_count = $1.to_i if desc =~ /\[(\d+)\]/
|
||||
range_count = range.end - range.begin + 1
|
||||
|
||||
sum = codes.inject(0) { |acc, elt| acc + count_codepoints(elt) }
|
||||
sum == spec_count and sum == range_count
|
||||
end
|
||||
|
||||
##
|
||||
# Generate the state maching to stdout
|
||||
|
||||
def generate_machine( name, property )
|
||||
pipe = " "
|
||||
@output.puts " #{name} = "
|
||||
each_alpha( @chart_url, property ) do |range, desc|
|
||||
|
||||
codes = (@encoding == :ucs4) ? to_ucs4(range) : to_utf8(range)
|
||||
|
||||
#raise "Invalid encoding of range #{range}: #{codes.inspect}" unless
|
||||
# is_valid? range, desc, codes
|
||||
|
||||
range_width = codes.map { |a| a.size }.max
|
||||
range_width = RANGE_WIDTH if range_width < RANGE_WIDTH
|
||||
|
||||
desc_width = TOTAL_WIDTH - RANGE_WIDTH - 11
|
||||
desc_width -= (range_width - RANGE_WIDTH) if range_width > RANGE_WIDTH
|
||||
|
||||
if desc.size > desc_width
|
||||
desc = desc[0..desc_width - 4] + "..."
|
||||
end
|
||||
|
||||
codes.each_with_index do |r, idx|
|
||||
desc = "" unless idx.zero?
|
||||
code = "%-#{range_width}s" % r
|
||||
@output.puts " #{pipe} #{code} ##{desc}"
|
||||
pipe = "|"
|
||||
end
|
||||
end
|
||||
@output.puts " ;"
|
||||
@output.puts ""
|
||||
end
|
||||
|
||||
@output.puts <<EOF
|
||||
# The following Ragel file was autogenerated with #{$0}
|
||||
# from: #{@chart_url}
|
||||
#
|
||||
# It defines #{properties}.
|
||||
#
|
||||
# To use this, make sure that your alphtype is set to #{ALPHTYPES[@encoding]},
|
||||
# and that your input is in #{@encoding}.
|
||||
|
||||
%%{
|
||||
machine #{machine_name};
|
||||
|
||||
EOF
|
||||
|
||||
properties.each { |x| generate_machine( x, x ) }
|
||||
|
||||
@output.puts <<EOF
|
||||
}%%
|
||||
EOF
|
19
vendor/github.com/apparentlymart/go-textseg/v13/textseg/utf8_seqs.go
generated
vendored
19
vendor/github.com/apparentlymart/go-textseg/v13/textseg/utf8_seqs.go
generated
vendored
@@ -1,19 +0,0 @@
|
||||
package textseg
|
||||
|
||||
import "unicode/utf8"
|
||||
|
||||
// ScanGraphemeClusters is a split function for bufio.Scanner that splits
|
||||
// on UTF8 sequence boundaries.
|
||||
//
|
||||
// This is included largely for completeness, since this behavior is already
|
||||
// built in to Go when ranging over a string.
|
||||
func ScanUTF8Sequences(data []byte, atEOF bool) (int, []byte, error) {
|
||||
if len(data) == 0 {
|
||||
return 0, nil, nil
|
||||
}
|
||||
r, seqLen := utf8.DecodeRune(data)
|
||||
if r == utf8.RuneError && !atEOF {
|
||||
return 0, nil, nil
|
||||
}
|
||||
return seqLen, data[:seqLen], nil
|
||||
}
|
8
vendor/github.com/cespare/xxhash/v2/.travis.yml
generated
vendored
Normal file
8
vendor/github.com/cespare/xxhash/v2/.travis.yml
generated
vendored
Normal file
@@ -0,0 +1,8 @@
|
||||
language: go
|
||||
go:
|
||||
- "1.x"
|
||||
- master
|
||||
env:
|
||||
- TAGS=""
|
||||
- TAGS="-tags purego"
|
||||
script: go test $TAGS -v ./...
|
6
vendor/github.com/cespare/xxhash/v2/README.md
generated
vendored
6
vendor/github.com/cespare/xxhash/v2/README.md
generated
vendored
@@ -1,7 +1,7 @@
|
||||
# xxhash
|
||||
|
||||
[](https://pkg.go.dev/github.com/cespare/xxhash/v2)
|
||||
[](https://github.com/cespare/xxhash/actions/workflows/test.yml)
|
||||
[](https://godoc.org/github.com/cespare/xxhash)
|
||||
[](https://travis-ci.org/cespare/xxhash)
|
||||
|
||||
xxhash is a Go implementation of the 64-bit
|
||||
[xxHash](http://cyan4973.github.io/xxHash/) algorithm, XXH64. This is a
|
||||
@@ -64,6 +64,4 @@ $ go test -benchtime 10s -bench '/xxhash,direct,bytes'
|
||||
|
||||
- [InfluxDB](https://github.com/influxdata/influxdb)
|
||||
- [Prometheus](https://github.com/prometheus/prometheus)
|
||||
- [VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics)
|
||||
- [FreeCache](https://github.com/coocood/freecache)
|
||||
- [FastCache](https://github.com/VictoriaMetrics/fastcache)
|
||||
|
1
vendor/github.com/cespare/xxhash/v2/xxhash.go
generated
vendored
1
vendor/github.com/cespare/xxhash/v2/xxhash.go
generated
vendored
@@ -193,6 +193,7 @@ func (d *Digest) UnmarshalBinary(b []byte) error {
|
||||
b, d.v4 = consumeUint64(b)
|
||||
b, d.total = consumeUint64(b)
|
||||
copy(d.mem[:], b)
|
||||
b = b[len(d.mem):]
|
||||
d.n = int(d.total % uint64(len(d.mem)))
|
||||
return nil
|
||||
}
|
||||
|
62
vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s
generated
vendored
62
vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s
generated
vendored
@@ -6,7 +6,7 @@
|
||||
|
||||
// Register allocation:
|
||||
// AX h
|
||||
// SI pointer to advance through b
|
||||
// CX pointer to advance through b
|
||||
// DX n
|
||||
// BX loop end
|
||||
// R8 v1, k1
|
||||
@@ -16,39 +16,39 @@
|
||||
// R12 tmp
|
||||
// R13 prime1v
|
||||
// R14 prime2v
|
||||
// DI prime4v
|
||||
// R15 prime4v
|
||||
|
||||
// round reads from and advances the buffer pointer in SI.
|
||||
// round reads from and advances the buffer pointer in CX.
|
||||
// It assumes that R13 has prime1v and R14 has prime2v.
|
||||
#define round(r) \
|
||||
MOVQ (SI), R12 \
|
||||
ADDQ $8, SI \
|
||||
MOVQ (CX), R12 \
|
||||
ADDQ $8, CX \
|
||||
IMULQ R14, R12 \
|
||||
ADDQ R12, r \
|
||||
ROLQ $31, r \
|
||||
IMULQ R13, r
|
||||
|
||||
// mergeRound applies a merge round on the two registers acc and val.
|
||||
// It assumes that R13 has prime1v, R14 has prime2v, and DI has prime4v.
|
||||
// It assumes that R13 has prime1v, R14 has prime2v, and R15 has prime4v.
|
||||
#define mergeRound(acc, val) \
|
||||
IMULQ R14, val \
|
||||
ROLQ $31, val \
|
||||
IMULQ R13, val \
|
||||
XORQ val, acc \
|
||||
IMULQ R13, acc \
|
||||
ADDQ DI, acc
|
||||
ADDQ R15, acc
|
||||
|
||||
// func Sum64(b []byte) uint64
|
||||
TEXT ·Sum64(SB), NOSPLIT, $0-32
|
||||
// Load fixed primes.
|
||||
MOVQ ·prime1v(SB), R13
|
||||
MOVQ ·prime2v(SB), R14
|
||||
MOVQ ·prime4v(SB), DI
|
||||
MOVQ ·prime4v(SB), R15
|
||||
|
||||
// Load slice.
|
||||
MOVQ b_base+0(FP), SI
|
||||
MOVQ b_base+0(FP), CX
|
||||
MOVQ b_len+8(FP), DX
|
||||
LEAQ (SI)(DX*1), BX
|
||||
LEAQ (CX)(DX*1), BX
|
||||
|
||||
// The first loop limit will be len(b)-32.
|
||||
SUBQ $32, BX
|
||||
@@ -65,14 +65,14 @@ TEXT ·Sum64(SB), NOSPLIT, $0-32
|
||||
XORQ R11, R11
|
||||
SUBQ R13, R11
|
||||
|
||||
// Loop until SI > BX.
|
||||
// Loop until CX > BX.
|
||||
blockLoop:
|
||||
round(R8)
|
||||
round(R9)
|
||||
round(R10)
|
||||
round(R11)
|
||||
|
||||
CMPQ SI, BX
|
||||
CMPQ CX, BX
|
||||
JLE blockLoop
|
||||
|
||||
MOVQ R8, AX
|
||||
@@ -100,16 +100,16 @@ noBlocks:
|
||||
afterBlocks:
|
||||
ADDQ DX, AX
|
||||
|
||||
// Right now BX has len(b)-32, and we want to loop until SI > len(b)-8.
|
||||
// Right now BX has len(b)-32, and we want to loop until CX > len(b)-8.
|
||||
ADDQ $24, BX
|
||||
|
||||
CMPQ SI, BX
|
||||
CMPQ CX, BX
|
||||
JG fourByte
|
||||
|
||||
wordLoop:
|
||||
// Calculate k1.
|
||||
MOVQ (SI), R8
|
||||
ADDQ $8, SI
|
||||
MOVQ (CX), R8
|
||||
ADDQ $8, CX
|
||||
IMULQ R14, R8
|
||||
ROLQ $31, R8
|
||||
IMULQ R13, R8
|
||||
@@ -117,18 +117,18 @@ wordLoop:
|
||||
XORQ R8, AX
|
||||
ROLQ $27, AX
|
||||
IMULQ R13, AX
|
||||
ADDQ DI, AX
|
||||
ADDQ R15, AX
|
||||
|
||||
CMPQ SI, BX
|
||||
CMPQ CX, BX
|
||||
JLE wordLoop
|
||||
|
||||
fourByte:
|
||||
ADDQ $4, BX
|
||||
CMPQ SI, BX
|
||||
CMPQ CX, BX
|
||||
JG singles
|
||||
|
||||
MOVL (SI), R8
|
||||
ADDQ $4, SI
|
||||
MOVL (CX), R8
|
||||
ADDQ $4, CX
|
||||
IMULQ R13, R8
|
||||
XORQ R8, AX
|
||||
|
||||
@@ -138,19 +138,19 @@ fourByte:
|
||||
|
||||
singles:
|
||||
ADDQ $4, BX
|
||||
CMPQ SI, BX
|
||||
CMPQ CX, BX
|
||||
JGE finalize
|
||||
|
||||
singlesLoop:
|
||||
MOVBQZX (SI), R12
|
||||
ADDQ $1, SI
|
||||
MOVBQZX (CX), R12
|
||||
ADDQ $1, CX
|
||||
IMULQ ·prime5v(SB), R12
|
||||
XORQ R12, AX
|
||||
|
||||
ROLQ $11, AX
|
||||
IMULQ R13, AX
|
||||
|
||||
CMPQ SI, BX
|
||||
CMPQ CX, BX
|
||||
JL singlesLoop
|
||||
|
||||
finalize:
|
||||
@@ -179,9 +179,9 @@ TEXT ·writeBlocks(SB), NOSPLIT, $0-40
|
||||
MOVQ ·prime2v(SB), R14
|
||||
|
||||
// Load slice.
|
||||
MOVQ b_base+8(FP), SI
|
||||
MOVQ b_base+8(FP), CX
|
||||
MOVQ b_len+16(FP), DX
|
||||
LEAQ (SI)(DX*1), BX
|
||||
LEAQ (CX)(DX*1), BX
|
||||
SUBQ $32, BX
|
||||
|
||||
// Load vN from d.
|
||||
@@ -199,7 +199,7 @@ blockLoop:
|
||||
round(R10)
|
||||
round(R11)
|
||||
|
||||
CMPQ SI, BX
|
||||
CMPQ CX, BX
|
||||
JLE blockLoop
|
||||
|
||||
// Copy vN back to d.
|
||||
@@ -208,8 +208,8 @@ blockLoop:
|
||||
MOVQ R10, 16(AX)
|
||||
MOVQ R11, 24(AX)
|
||||
|
||||
// The number of bytes written is SI minus the old base pointer.
|
||||
SUBQ b_base+8(FP), SI
|
||||
MOVQ SI, ret+32(FP)
|
||||
// The number of bytes written is CX minus the old base pointer.
|
||||
SUBQ b_base+8(FP), CX
|
||||
MOVQ CX, ret+32(FP)
|
||||
|
||||
RET
|
||||
|
55
vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go
generated
vendored
55
vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go
generated
vendored
@@ -6,52 +6,41 @@
|
||||
package xxhash
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// Notes:
|
||||
//
|
||||
// See https://groups.google.com/d/msg/golang-nuts/dcjzJy-bSpw/tcZYBzQqAQAJ
|
||||
// for some discussion about these unsafe conversions.
|
||||
//
|
||||
// In the future it's possible that compiler optimizations will make these
|
||||
// XxxString functions unnecessary by realizing that calls such as
|
||||
// Sum64([]byte(s)) don't need to copy s. See https://golang.org/issue/2205.
|
||||
// If that happens, even if we keep these functions they can be replaced with
|
||||
// the trivial safe code.
|
||||
|
||||
// NOTE: The usual way of doing an unsafe string-to-[]byte conversion is:
|
||||
// unsafe operations unnecessary: https://golang.org/issue/2205.
|
||||
//
|
||||
// var b []byte
|
||||
// bh := (*reflect.SliceHeader)(unsafe.Pointer(&b))
|
||||
// bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data
|
||||
// bh.Len = len(s)
|
||||
// bh.Cap = len(s)
|
||||
//
|
||||
// Unfortunately, as of Go 1.15.3 the inliner's cost model assigns a high enough
|
||||
// weight to this sequence of expressions that any function that uses it will
|
||||
// not be inlined. Instead, the functions below use a different unsafe
|
||||
// conversion designed to minimize the inliner weight and allow both to be
|
||||
// inlined. There is also a test (TestInlining) which verifies that these are
|
||||
// inlined.
|
||||
//
|
||||
// See https://github.com/golang/go/issues/42739 for discussion.
|
||||
// Both of these wrapper functions still incur function call overhead since they
|
||||
// will not be inlined. We could write Go/asm copies of Sum64 and Digest.Write
|
||||
// for strings to squeeze out a bit more speed. Mid-stack inlining should
|
||||
// eventually fix this.
|
||||
|
||||
// Sum64String computes the 64-bit xxHash digest of s.
|
||||
// It may be faster than Sum64([]byte(s)) by avoiding a copy.
|
||||
func Sum64String(s string) uint64 {
|
||||
b := *(*[]byte)(unsafe.Pointer(&sliceHeader{s, len(s)}))
|
||||
var b []byte
|
||||
bh := (*reflect.SliceHeader)(unsafe.Pointer(&b))
|
||||
bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data
|
||||
bh.Len = len(s)
|
||||
bh.Cap = len(s)
|
||||
return Sum64(b)
|
||||
}
|
||||
|
||||
// WriteString adds more data to d. It always returns len(s), nil.
|
||||
// It may be faster than Write([]byte(s)) by avoiding a copy.
|
||||
func (d *Digest) WriteString(s string) (n int, err error) {
|
||||
d.Write(*(*[]byte)(unsafe.Pointer(&sliceHeader{s, len(s)})))
|
||||
// d.Write always returns len(s), nil.
|
||||
// Ignoring the return output and returning these fixed values buys a
|
||||
// savings of 6 in the inliner's cost model.
|
||||
return len(s), nil
|
||||
}
|
||||
|
||||
// sliceHeader is similar to reflect.SliceHeader, but it assumes that the layout
|
||||
// of the first two words is the same as the layout of a string.
|
||||
type sliceHeader struct {
|
||||
s string
|
||||
cap int
|
||||
var b []byte
|
||||
bh := (*reflect.SliceHeader)(unsafe.Pointer(&b))
|
||||
bh.Data = (*reflect.StringHeader)(unsafe.Pointer(&s)).Data
|
||||
bh.Len = len(s)
|
||||
bh.Cap = len(s)
|
||||
return d.Write(b)
|
||||
}
|
||||
|
2
vendor/github.com/compose-spec/compose-go/loader/full-example.yml
generated
vendored
2
vendor/github.com/compose-spec/compose-go/loader/full-example.yml
generated
vendored
@@ -268,7 +268,7 @@ services:
|
||||
- .:/code
|
||||
- ./static:/var/www/html
|
||||
# User-relative path
|
||||
- ~/configs:/etc/configs:ro
|
||||
- ~/configs:/etc/configs/:ro
|
||||
# Named volume
|
||||
- datavolume:/var/lib/mysql
|
||||
- type: bind
|
||||
|
54
vendor/github.com/compose-spec/compose-go/loader/loader.go
generated
vendored
54
vendor/github.com/compose-spec/compose-go/loader/loader.go
generated
vendored
@@ -145,7 +145,7 @@ func Load(configDetails types.ConfigDetails, options ...func(*Options)) (*types.
|
||||
op(opts)
|
||||
}
|
||||
|
||||
var configs []*types.Config
|
||||
configs := []*types.Config{}
|
||||
for i, file := range configDetails.ConfigFiles {
|
||||
configDict := file.Config
|
||||
if configDict == nil {
|
||||
@@ -222,14 +222,14 @@ func Load(configDetails types.ConfigDetails, options ...func(*Options)) (*types.
|
||||
}
|
||||
|
||||
func parseConfig(b []byte, opts *Options) (map[string]interface{}, error) {
|
||||
yml, err := ParseYAML(b)
|
||||
yaml, err := ParseYAML(b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !opts.SkipInterpolation {
|
||||
return interp.Interpolate(yml, *opts.Interpolate)
|
||||
return interp.Interpolate(yaml, *opts.Interpolate)
|
||||
}
|
||||
return yml, err
|
||||
return yaml, err
|
||||
}
|
||||
|
||||
func groupXFieldsIntoExtensions(dict map[string]interface{}) map[string]interface{} {
|
||||
@@ -342,8 +342,8 @@ func createTransformHook(additionalTransformers ...Transformer) mapstructure.Dec
|
||||
reflect.TypeOf(types.UlimitsConfig{}): transformUlimits,
|
||||
reflect.TypeOf(types.UnitBytes(0)): transformSize,
|
||||
reflect.TypeOf([]types.ServicePortConfig{}): transformServicePort,
|
||||
reflect.TypeOf(types.ServiceSecretConfig{}): transformFileReferenceConfig,
|
||||
reflect.TypeOf(types.ServiceConfigObjConfig{}): transformFileReferenceConfig,
|
||||
reflect.TypeOf(types.ServiceSecretConfig{}): transformStringSourceMap,
|
||||
reflect.TypeOf(types.ServiceConfigObjConfig{}): transformStringSourceMap,
|
||||
reflect.TypeOf(types.StringOrNumberList{}): transformStringOrNumberList,
|
||||
reflect.TypeOf(map[string]*types.ServiceNetworkConfig{}): transformServiceNetworkMap,
|
||||
reflect.TypeOf(types.Mapping{}): transformMappingOrListFunc("=", false),
|
||||
@@ -372,7 +372,7 @@ func createTransformHook(additionalTransformers ...Transformer) mapstructure.Dec
|
||||
}
|
||||
}
|
||||
|
||||
// keys need to be converted to strings for jsonschema
|
||||
// keys needs to be converted to strings for jsonschema
|
||||
func convertToStringKeysRecursive(value interface{}, keyPrefix string) (interface{}, error) {
|
||||
if mapping, ok := value.(map[interface{}]interface{}); ok {
|
||||
dict := make(map[string]interface{})
|
||||
@@ -396,7 +396,7 @@ func convertToStringKeysRecursive(value interface{}, keyPrefix string) (interfac
|
||||
return dict, nil
|
||||
}
|
||||
if list, ok := value.([]interface{}); ok {
|
||||
var convertedList []interface{}
|
||||
convertedList := []interface{}{}
|
||||
for index, entry := range list {
|
||||
newKeyPrefix := fmt.Sprintf("%s[%d]", keyPrefix, index)
|
||||
convertedEntry, err := convertToStringKeysRecursive(entry, newKeyPrefix)
|
||||
@@ -532,7 +532,7 @@ func LoadService(name string, serviceDict map[string]interface{}, workingDir str
|
||||
}
|
||||
|
||||
for i, volume := range serviceConfig.Volumes {
|
||||
if volume.Type != types.VolumeTypeBind {
|
||||
if volume.Type != "bind" {
|
||||
continue
|
||||
}
|
||||
|
||||
@@ -552,14 +552,14 @@ func resolveEnvironment(serviceConfig *types.ServiceConfig, workingDir string, l
|
||||
environment := types.MappingWithEquals{}
|
||||
|
||||
if len(serviceConfig.EnvFile) > 0 {
|
||||
for _, envFile := range serviceConfig.EnvFile {
|
||||
filePath := absPath(workingDir, envFile)
|
||||
for _, file := range serviceConfig.EnvFile {
|
||||
filePath := absPath(workingDir, file)
|
||||
file, err := os.Open(filePath)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer file.Close()
|
||||
fileVars, err := godotenv.ParseWithLookup(file, godotenv.LookupFn(lookupEnv))
|
||||
fileVars, err := godotenv.Parse(file)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -797,7 +797,7 @@ var transformServicePort TransformerFunc = func(data interface{}) (interface{},
|
||||
// We process the list instead of individual items here.
|
||||
// The reason is that one entry might be mapped to multiple ServicePortConfig.
|
||||
// Therefore we take an input of a list and return an output of a list.
|
||||
var ports []interface{}
|
||||
ports := []interface{}{}
|
||||
for _, entry := range entries {
|
||||
switch value := entry.(type) {
|
||||
case int:
|
||||
@@ -852,27 +852,17 @@ var transformServiceDeviceRequest TransformerFunc = func(data interface{}) (inte
|
||||
}
|
||||
}
|
||||
|
||||
var transformFileReferenceConfig TransformerFunc = func(data interface{}) (interface{}, error) {
|
||||
var transformStringSourceMap TransformerFunc = func(data interface{}) (interface{}, error) {
|
||||
switch value := data.(type) {
|
||||
case string:
|
||||
return map[string]interface{}{"source": value}, nil
|
||||
case map[string]interface{}:
|
||||
if target, ok := value["target"]; ok {
|
||||
value["target"] = cleanTarget(target.(string))
|
||||
}
|
||||
return groupXFieldsIntoExtensions(value), nil
|
||||
return groupXFieldsIntoExtensions(data.(map[string]interface{})), nil
|
||||
default:
|
||||
return data, errors.Errorf("invalid type %T for secret", value)
|
||||
}
|
||||
}
|
||||
|
||||
func cleanTarget(target string) string {
|
||||
if target == "" {
|
||||
return ""
|
||||
}
|
||||
return path.Clean(target)
|
||||
}
|
||||
|
||||
var transformBuildConfig TransformerFunc = func(data interface{}) (interface{}, error) {
|
||||
switch value := data.(type) {
|
||||
case string:
|
||||
@@ -916,15 +906,9 @@ var transformExtendsConfig TransformerFunc = func(data interface{}) (interface{}
|
||||
var transformServiceVolumeConfig TransformerFunc = func(data interface{}) (interface{}, error) {
|
||||
switch value := data.(type) {
|
||||
case string:
|
||||
volume, err := ParseVolume(value)
|
||||
volume.Target = cleanTarget(volume.Target)
|
||||
return volume, err
|
||||
return ParseVolume(value)
|
||||
case map[string]interface{}:
|
||||
data := groupXFieldsIntoExtensions(data.(map[string]interface{}))
|
||||
if target, ok := data["target"]; ok {
|
||||
data["target"] = cleanTarget(target.(string))
|
||||
}
|
||||
return data, nil
|
||||
return groupXFieldsIntoExtensions(data.(map[string]interface{})), nil
|
||||
default:
|
||||
return data, errors.Errorf("invalid type %T for service volume", value)
|
||||
}
|
||||
@@ -987,7 +971,7 @@ func transformMappingOrList(mappingOrList interface{}, sep string, allowNil bool
|
||||
switch value := mappingOrList.(type) {
|
||||
case map[string]interface{}:
|
||||
return toMapStringString(value, allowNil)
|
||||
case []interface{}:
|
||||
case ([]interface{}):
|
||||
result := make(map[string]interface{})
|
||||
for _, value := range value {
|
||||
parts := strings.SplitN(value.(string), sep, 2)
|
||||
@@ -1070,7 +1054,7 @@ func toString(value interface{}, allowNil bool) interface{} {
|
||||
}
|
||||
|
||||
func toStringList(value map[string]interface{}, separator string, allowNil bool) []string {
|
||||
var output []string
|
||||
output := []string{}
|
||||
for key, value := range value {
|
||||
if value == nil && !allowNil {
|
||||
continue
|
||||
|
25
vendor/github.com/compose-spec/compose-go/loader/merge.go
generated
vendored
25
vendor/github.com/compose-spec/compose-go/loader/merge.go
generated
vendored
@@ -114,11 +114,6 @@ func _merge(baseService *types.ServiceConfig, overrideService *types.ServiceConf
|
||||
if overrideService.Entrypoint != nil {
|
||||
baseService.Entrypoint = overrideService.Entrypoint
|
||||
}
|
||||
if baseService.Environment != nil {
|
||||
baseService.Environment.OverrideBy(overrideService.Environment)
|
||||
} else {
|
||||
baseService.Environment = overrideService.Environment
|
||||
}
|
||||
return baseService, nil
|
||||
}
|
||||
|
||||
@@ -184,7 +179,7 @@ func toServiceVolumeConfigsMap(s interface{}) (map[interface{}]interface{}, erro
|
||||
}
|
||||
|
||||
func toServiceSecretConfigsSlice(dst reflect.Value, m map[interface{}]interface{}) error {
|
||||
var s []types.ServiceSecretConfig
|
||||
s := []types.ServiceSecretConfig{}
|
||||
for _, v := range m {
|
||||
s = append(s, v.(types.ServiceSecretConfig))
|
||||
}
|
||||
@@ -194,7 +189,7 @@ func toServiceSecretConfigsSlice(dst reflect.Value, m map[interface{}]interface{
|
||||
}
|
||||
|
||||
func toSServiceConfigObjConfigsSlice(dst reflect.Value, m map[interface{}]interface{}) error {
|
||||
var s []types.ServiceConfigObjConfig
|
||||
s := []types.ServiceConfigObjConfig{}
|
||||
for _, v := range m {
|
||||
s = append(s, v.(types.ServiceConfigObjConfig))
|
||||
}
|
||||
@@ -204,7 +199,7 @@ func toSServiceConfigObjConfigsSlice(dst reflect.Value, m map[interface{}]interf
|
||||
}
|
||||
|
||||
func toServicePortConfigsSlice(dst reflect.Value, m map[interface{}]interface{}) error {
|
||||
var s []types.ServicePortConfig
|
||||
s := []types.ServicePortConfig{}
|
||||
for _, v := range m {
|
||||
s = append(s, v.(types.ServicePortConfig))
|
||||
}
|
||||
@@ -225,7 +220,7 @@ func toServicePortConfigsSlice(dst reflect.Value, m map[interface{}]interface{})
|
||||
}
|
||||
|
||||
func toServiceVolumeConfigsSlice(dst reflect.Value, m map[interface{}]interface{}) error {
|
||||
var s []types.ServiceVolumeConfig
|
||||
s := []types.ServiceVolumeConfig{}
|
||||
for _, v := range m {
|
||||
s = append(s, v.(types.ServiceVolumeConfig))
|
||||
}
|
||||
@@ -234,7 +229,7 @@ func toServiceVolumeConfigsSlice(dst reflect.Value, m map[interface{}]interface{
|
||||
return nil
|
||||
}
|
||||
|
||||
type toMapFn func(s interface{}) (map[interface{}]interface{}, error)
|
||||
type tomapFn func(s interface{}) (map[interface{}]interface{}, error)
|
||||
type writeValueFromMapFn func(reflect.Value, map[interface{}]interface{}) error
|
||||
|
||||
func safelyMerge(mergeFn func(dst, src reflect.Value) error) func(dst, src reflect.Value) error {
|
||||
@@ -250,13 +245,13 @@ func safelyMerge(mergeFn func(dst, src reflect.Value) error) func(dst, src refle
|
||||
}
|
||||
}
|
||||
|
||||
func mergeSlice(toMap toMapFn, writeValue writeValueFromMapFn) func(dst, src reflect.Value) error {
|
||||
func mergeSlice(tomap tomapFn, writeValue writeValueFromMapFn) func(dst, src reflect.Value) error {
|
||||
return func(dst, src reflect.Value) error {
|
||||
dstMap, err := sliceToMap(toMap, dst)
|
||||
dstMap, err := sliceToMap(tomap, dst)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
srcMap, err := sliceToMap(toMap, src)
|
||||
srcMap, err := sliceToMap(tomap, src)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -267,12 +262,12 @@ func mergeSlice(toMap toMapFn, writeValue writeValueFromMapFn) func(dst, src ref
|
||||
}
|
||||
}
|
||||
|
||||
func sliceToMap(toMap toMapFn, v reflect.Value) (map[interface{}]interface{}, error) {
|
||||
func sliceToMap(tomap tomapFn, v reflect.Value) (map[interface{}]interface{}, error) {
|
||||
// check if valid
|
||||
if !v.IsValid() {
|
||||
return nil, errors.Errorf("invalid value : %+v", v)
|
||||
}
|
||||
return toMap(v.Interface())
|
||||
return tomap(v.Interface())
|
||||
}
|
||||
|
||||
func mergeLoggingConfig(dst, src reflect.Value) error {
|
||||
|
6
vendor/github.com/compose-spec/compose-go/loader/volume.go
generated
vendored
6
vendor/github.com/compose-spec/compose-go/loader/volume.go
generated
vendored
@@ -36,11 +36,11 @@ func ParseVolume(spec string) (types.ServiceVolumeConfig, error) {
|
||||
return volume, errors.New("invalid empty volume spec")
|
||||
case 1, 2:
|
||||
volume.Target = spec
|
||||
volume.Type = types.VolumeTypeVolume
|
||||
volume.Type = string(types.VolumeTypeVolume)
|
||||
return volume, nil
|
||||
}
|
||||
|
||||
var buffer []rune
|
||||
buffer := []rune{}
|
||||
for _, char := range spec + string(endOfSpec) {
|
||||
switch {
|
||||
case isWindowsDrive(buffer, char):
|
||||
@@ -50,7 +50,7 @@ func ParseVolume(spec string) (types.ServiceVolumeConfig, error) {
|
||||
populateType(&volume)
|
||||
return volume, errors.Wrapf(err, "invalid spec: %s", spec)
|
||||
}
|
||||
buffer = nil
|
||||
buffer = []rune{}
|
||||
default:
|
||||
buffer = append(buffer, char)
|
||||
}
|
||||
|
32
vendor/github.com/compose-spec/compose-go/template/template.go
generated
vendored
32
vendor/github.com/compose-spec/compose-go/template/template.go
generated
vendored
@@ -26,7 +26,7 @@ import (
|
||||
|
||||
var delimiter = "\\$"
|
||||
var substitutionNamed = "[_a-z][_a-z0-9]*"
|
||||
var substitutionBraced = "[_a-z][_a-z0-9]*(?::?[-?](.*}|[^}]*))?"
|
||||
var substitutionBraced = "[_a-z][_a-z0-9]*(?::?[-?][^}]*)?"
|
||||
|
||||
var patternString = fmt.Sprintf(
|
||||
"%s(?i:(?P<escaped>%s)|(?P<named>%s)|{(?P<braced>%s)}|(?P<invalid>))",
|
||||
@@ -35,6 +35,14 @@ var patternString = fmt.Sprintf(
|
||||
|
||||
var defaultPattern = regexp.MustCompile(patternString)
|
||||
|
||||
// DefaultSubstituteFuncs contains the default SubstituteFunc used by the docker cli
|
||||
var DefaultSubstituteFuncs = []SubstituteFunc{
|
||||
softDefault,
|
||||
hardDefault,
|
||||
requiredNonEmpty,
|
||||
required,
|
||||
}
|
||||
|
||||
// InvalidTemplateError is returned when a variable template is not in a valid
|
||||
// format
|
||||
type InvalidTemplateError struct {
|
||||
@@ -59,14 +67,6 @@ type SubstituteFunc func(string, Mapping) (string, bool, error)
|
||||
// SubstituteWith substitute variables in the string with their values.
|
||||
// It accepts additional substitute function.
|
||||
func SubstituteWith(template string, mapping Mapping, pattern *regexp.Regexp, subsFuncs ...SubstituteFunc) (string, error) {
|
||||
if len(subsFuncs) == 0 {
|
||||
subsFuncs = []SubstituteFunc{
|
||||
softDefault,
|
||||
hardDefault,
|
||||
requiredNonEmpty,
|
||||
required,
|
||||
}
|
||||
}
|
||||
var err error
|
||||
result := pattern.ReplaceAllStringFunc(template, func(substring string) string {
|
||||
matches := pattern.FindStringSubmatch(substring)
|
||||
@@ -116,7 +116,7 @@ func SubstituteWith(template string, mapping Mapping, pattern *regexp.Regexp, su
|
||||
|
||||
// Substitute variables in the string with their values
|
||||
func Substitute(template string, mapping Mapping) (string, error) {
|
||||
return SubstituteWith(template, mapping, defaultPattern)
|
||||
return SubstituteWith(template, mapping, defaultPattern, DefaultSubstituteFuncs...)
|
||||
}
|
||||
|
||||
// ExtractVariables returns a map of all the variables defined in the specified
|
||||
@@ -215,10 +215,6 @@ func softDefault(substitution string, mapping Mapping) (string, bool, error) {
|
||||
return "", false, nil
|
||||
}
|
||||
name, defaultValue := partition(substitution, sep)
|
||||
defaultValue, err := Substitute(defaultValue, mapping)
|
||||
if err != nil {
|
||||
return "", false, err
|
||||
}
|
||||
value, ok := mapping(name)
|
||||
if !ok || value == "" {
|
||||
return defaultValue, true, nil
|
||||
@@ -233,10 +229,6 @@ func hardDefault(substitution string, mapping Mapping) (string, bool, error) {
|
||||
return "", false, nil
|
||||
}
|
||||
name, defaultValue := partition(substitution, sep)
|
||||
defaultValue, err := Substitute(defaultValue, mapping)
|
||||
if err != nil {
|
||||
return "", false, err
|
||||
}
|
||||
value, ok := mapping(name)
|
||||
if !ok {
|
||||
return defaultValue, true, nil
|
||||
@@ -257,10 +249,6 @@ func withRequired(substitution string, mapping Mapping, sep string, valid func(s
|
||||
return "", false, nil
|
||||
}
|
||||
name, errorMessage := partition(substitution, sep)
|
||||
errorMessage, err := Substitute(errorMessage, mapping)
|
||||
if err != nil {
|
||||
return "", false, err
|
||||
}
|
||||
value, ok := mapping(name)
|
||||
if !ok || !valid(value) {
|
||||
return "", true, &InvalidTemplateError{
|
||||
|
17
vendor/github.com/compose-spec/compose-go/types/project.go
generated
vendored
17
vendor/github.com/compose-spec/compose-go/types/project.go
generated
vendored
@@ -46,7 +46,7 @@ type Project struct {
|
||||
|
||||
// ServiceNames return names for all services in this Compose config
|
||||
func (p Project) ServiceNames() []string {
|
||||
var names []string
|
||||
names := []string{}
|
||||
for _, s := range p.Services {
|
||||
names = append(names, s.Name)
|
||||
}
|
||||
@@ -56,7 +56,7 @@ func (p Project) ServiceNames() []string {
|
||||
|
||||
// VolumeNames return names for all volumes in this Compose config
|
||||
func (p Project) VolumeNames() []string {
|
||||
var names []string
|
||||
names := []string{}
|
||||
for k := range p.Volumes {
|
||||
names = append(names, k)
|
||||
}
|
||||
@@ -66,7 +66,7 @@ func (p Project) VolumeNames() []string {
|
||||
|
||||
// NetworkNames return names for all volumes in this Compose config
|
||||
func (p Project) NetworkNames() []string {
|
||||
var names []string
|
||||
names := []string{}
|
||||
for k := range p.Networks {
|
||||
names = append(names, k)
|
||||
}
|
||||
@@ -76,7 +76,7 @@ func (p Project) NetworkNames() []string {
|
||||
|
||||
// SecretNames return names for all secrets in this Compose config
|
||||
func (p Project) SecretNames() []string {
|
||||
var names []string
|
||||
names := []string{}
|
||||
for k := range p.Secrets {
|
||||
names = append(names, k)
|
||||
}
|
||||
@@ -86,7 +86,7 @@ func (p Project) SecretNames() []string {
|
||||
|
||||
// ConfigNames return names for all configs in this Compose config
|
||||
func (p Project) ConfigNames() []string {
|
||||
var names []string
|
||||
names := []string{}
|
||||
for k := range p.Configs {
|
||||
names = append(names, k)
|
||||
}
|
||||
@@ -179,12 +179,12 @@ func (p *Project) RelativePath(path string) string {
|
||||
}
|
||||
|
||||
// HasProfile return true if service has no profile declared or has at least one profile matching
|
||||
func (s ServiceConfig) HasProfile(profiles []string) bool {
|
||||
if len(s.Profiles) == 0 {
|
||||
func (service ServiceConfig) HasProfile(profiles []string) bool {
|
||||
if len(service.Profiles) == 0 {
|
||||
return true
|
||||
}
|
||||
for _, p := range profiles {
|
||||
for _, sp := range s.Profiles {
|
||||
for _, sp := range service.Profiles {
|
||||
if sp == p {
|
||||
return true
|
||||
}
|
||||
@@ -327,6 +327,7 @@ func (p *Project) ResolveImages(resolver func(named reference.Named) (digest.Dig
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
named, err = reference.WithDigest(named, digest)
|
||||
if err != nil {
|
||||
return err
|
||||
|
16
vendor/github.com/compose-spec/compose-go/types/types.go
generated
vendored
16
vendor/github.com/compose-spec/compose-go/types/types.go
generated
vendored
@@ -33,7 +33,7 @@ func (d Duration) String() string {
|
||||
return time.Duration(d).String()
|
||||
}
|
||||
|
||||
// ConvertDurationPtr converts a type defined Duration pointer to a time.Duration pointer with the same value.
|
||||
// ConvertDurationPtr converts a typedefined Duration pointer to a time.Duration pointer with the same value.
|
||||
func ConvertDurationPtr(d *Duration) *time.Duration {
|
||||
if d == nil {
|
||||
return nil
|
||||
@@ -121,7 +121,7 @@ type ServiceConfig struct {
|
||||
Extends ExtendsConfig `yaml:"extends,omitempty" json:"extends,omitempty"`
|
||||
ExternalLinks []string `mapstructure:"external_links" yaml:"external_links,omitempty" json:"external_links,omitempty"`
|
||||
ExtraHosts HostsList `mapstructure:"extra_hosts" yaml:"extra_hosts,omitempty" json:"extra_hosts,omitempty"`
|
||||
GroupAdd []string `mapstructure:"group_add" yaml:"group_add,omitempty" json:"group_add,omitempty"`
|
||||
GroupAdd []string `mapstructure:"group_app" yaml:"group_add,omitempty" json:"group_add,omitempty"`
|
||||
Hostname string `yaml:",omitempty" json:"hostname,omitempty"`
|
||||
HealthCheck *HealthCheckConfig `yaml:",omitempty" json:"healthcheck,omitempty"`
|
||||
Image string `yaml:",omitempty" json:"image,omitempty"`
|
||||
@@ -208,7 +208,7 @@ const (
|
||||
PullPolicyNever = "never"
|
||||
//PullPolicyIfNotPresent pull missing images
|
||||
PullPolicyIfNotPresent = "if_not_present"
|
||||
//PullPolicyMissing pull missing images
|
||||
//PullPolicyIfNotPresent pull missing images
|
||||
PullPolicyMissing = "missing"
|
||||
//PullPolicyBuild force building images
|
||||
PullPolicyBuild = "build"
|
||||
@@ -611,7 +611,7 @@ func ParsePortConfig(value string) ([]ServicePortConfig, error) {
|
||||
}
|
||||
|
||||
func convertPortToPortConfig(port nat.Port, portBindings map[nat.Port][]nat.PortBinding) ([]ServicePortConfig, error) {
|
||||
var portConfigs []ServicePortConfig
|
||||
portConfigs := []ServicePortConfig{}
|
||||
for _, binding := range portBindings[port] {
|
||||
startHostPort, endHostPort, err := nat.ParsePortRange(binding.HostPort)
|
||||
|
||||
@@ -647,13 +647,13 @@ type ServiceVolumeConfig struct {
|
||||
}
|
||||
|
||||
const (
|
||||
// VolumeTypeBind is the type for mounting host dir
|
||||
// TypeBind is the type for mounting host dir
|
||||
VolumeTypeBind = "bind"
|
||||
// VolumeTypeVolume is the type for remote storage volumes
|
||||
// TypeVolume is the type for remote storage volumes
|
||||
VolumeTypeVolume = "volume"
|
||||
// VolumeTypeTmpfs is the type for mounting tmpfs
|
||||
// TypeTmpfs is the type for mounting tmpfs
|
||||
VolumeTypeTmpfs = "tmpfs"
|
||||
// VolumeTypeNamedPipe is the type for mounting Windows named pipes
|
||||
// TypeNamedPipe is the type for mounting Windows named pipes
|
||||
VolumeTypeNamedPipe = "npipe"
|
||||
)
|
||||
|
||||
|
10
vendor/github.com/compose-spec/godotenv/godotenv.go
generated
vendored
10
vendor/github.com/compose-spec/godotenv/godotenv.go
generated
vendored
@@ -266,12 +266,12 @@ func parseLineWithLookup(line string, envMap map[string]string, lookupFn LookupF
|
||||
firstColon := strings.Index(line, ":")
|
||||
splitString := strings.SplitN(line, "=", 2)
|
||||
if firstColon != -1 && (firstColon < firstEquals || firstEquals == -1) {
|
||||
// This is a yaml-style line
|
||||
//this is a yaml-style line
|
||||
splitString = strings.SplitN(line, ":", 2)
|
||||
}
|
||||
|
||||
if len(splitString) != 2 {
|
||||
err = errors.New("can't separate key from value")
|
||||
err = errors.New("Can't separate key from value")
|
||||
return
|
||||
}
|
||||
key = exportRegex.ReplaceAllString(splitString[0], "$1")
|
||||
@@ -341,15 +341,15 @@ func expandVariables(v string, envMap map[string]string, lookupFn LookupFn) stri
|
||||
if submatch[1] == "\\" || submatch[2] == "(" {
|
||||
return submatch[0][1:]
|
||||
} else if submatch[4] != "" {
|
||||
// first check if we have defined this already earlier
|
||||
//first check if we have defined this already earlier
|
||||
if envMap[submatch[4]] != "" {
|
||||
return envMap[submatch[4]]
|
||||
}
|
||||
if lookupFn == nil {
|
||||
return ""
|
||||
}
|
||||
// if we have not defined it, check the lookup function provided
|
||||
// by the user
|
||||
//if we have not defined it, check the lookup function provided
|
||||
//by the user
|
||||
s2, ok := lookupFn(submatch[4])
|
||||
if ok {
|
||||
return s2
|
||||
|
28
vendor/github.com/compose-spec/godotenv/parser.go
generated
vendored
28
vendor/github.com/compose-spec/godotenv/parser.go
generated
vendored
@@ -127,21 +127,15 @@ loop:
|
||||
|
||||
// extractVarValue extracts variable value and returns rest of slice
|
||||
func extractVarValue(src []byte, envMap map[string]string, lookupFn LookupFn) (value string, rest []byte, err error) {
|
||||
quote, isQuoted := hasQuotePrefix(src)
|
||||
if !isQuoted {
|
||||
// unquoted value - read until new line
|
||||
end := bytes.IndexFunc(src, isNewLine)
|
||||
var rest []byte
|
||||
var value string
|
||||
if end < 0 {
|
||||
value := strings.TrimRightFunc(string(src), unicode.IsSpace)
|
||||
rest = nil
|
||||
return expandVariables(value, envMap, lookupFn), rest, nil
|
||||
quote, hasPrefix := hasQuotePrefix(src)
|
||||
if !hasPrefix {
|
||||
// unquoted value - read until whitespace
|
||||
end := bytes.IndexFunc(src, unicode.IsSpace)
|
||||
if end == -1 {
|
||||
return expandVariables(string(src), envMap, lookupFn), nil, nil
|
||||
}
|
||||
|
||||
value = strings.TrimRightFunc(string(src[0:end]), unicode.IsSpace)
|
||||
rest = src[end:]
|
||||
return expandVariables(value, envMap, lookupFn), rest, nil
|
||||
return expandVariables(string(src[0:end]), envMap, lookupFn), src[end:], nil
|
||||
}
|
||||
|
||||
// lookup quoted string terminator
|
||||
@@ -198,7 +192,7 @@ func indexOfNonSpaceChar(src []byte) int {
|
||||
}
|
||||
|
||||
// hasQuotePrefix reports whether charset starts with single or double quote and returns quote character
|
||||
func hasQuotePrefix(src []byte) (quote byte, isQuoted bool) {
|
||||
func hasQuotePrefix(src []byte) (prefix byte, isQuored bool) {
|
||||
if len(src) == 0 {
|
||||
return 0, false
|
||||
}
|
||||
@@ -227,9 +221,3 @@ func isSpace(r rune) bool {
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
|
||||
// isNewLine reports whether the rune is a new line character
|
||||
func isNewLine(r rune) bool {
|
||||
return r == '\n'
|
||||
}
|
||||
|
17
vendor/github.com/containerd/containerd/api/services/content/v1/doc.go
generated
vendored
17
vendor/github.com/containerd/containerd/api/services/content/v1/doc.go
generated
vendored
@@ -1,17 +0,0 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package content
|
54
vendor/github.com/containerd/containerd/archive/compression/compression.go
generated
vendored
54
vendor/github.com/containerd/containerd/archive/compression/compression.go
generated
vendored
@@ -21,16 +21,15 @@ import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strconv"
|
||||
"sync"
|
||||
|
||||
"github.com/containerd/containerd/log"
|
||||
"github.com/klauspost/compress/zstd"
|
||||
exec "golang.org/x/sys/execabs"
|
||||
)
|
||||
|
||||
type (
|
||||
@@ -126,52 +125,17 @@ func (r *bufferedReader) Peek(n int) ([]byte, error) {
|
||||
return r.buf.Peek(n)
|
||||
}
|
||||
|
||||
const (
|
||||
zstdMagicSkippableStart = 0x184D2A50
|
||||
zstdMagicSkippableMask = 0xFFFFFFF0
|
||||
)
|
||||
|
||||
var (
|
||||
gzipMagic = []byte{0x1F, 0x8B, 0x08}
|
||||
zstdMagic = []byte{0x28, 0xb5, 0x2f, 0xfd}
|
||||
)
|
||||
|
||||
type matcher = func([]byte) bool
|
||||
|
||||
func magicNumberMatcher(m []byte) matcher {
|
||||
return func(source []byte) bool {
|
||||
return bytes.HasPrefix(source, m)
|
||||
}
|
||||
}
|
||||
|
||||
// zstdMatcher detects zstd compression algorithm.
|
||||
// There are two frame formats defined by Zstandard: Zstandard frames and Skippable frames.
|
||||
// See https://tools.ietf.org/id/draft-kucherawy-dispatch-zstd-00.html#rfc.section.2 for more details.
|
||||
func zstdMatcher() matcher {
|
||||
return func(source []byte) bool {
|
||||
if bytes.HasPrefix(source, zstdMagic) {
|
||||
// Zstandard frame
|
||||
return true
|
||||
}
|
||||
// skippable frame
|
||||
if len(source) < 8 {
|
||||
return false
|
||||
}
|
||||
// magic number from 0x184D2A50 to 0x184D2A5F.
|
||||
if binary.LittleEndian.Uint32(source[:4])&zstdMagicSkippableMask == zstdMagicSkippableStart {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// DetectCompression detects the compression algorithm of the source.
|
||||
func DetectCompression(source []byte) Compression {
|
||||
for compression, fn := range map[Compression]matcher{
|
||||
Gzip: magicNumberMatcher(gzipMagic),
|
||||
Zstd: zstdMatcher(),
|
||||
for compression, m := range map[Compression][]byte{
|
||||
Gzip: {0x1F, 0x8B, 0x08},
|
||||
Zstd: {0x28, 0xb5, 0x2f, 0xfd},
|
||||
} {
|
||||
if fn(source) {
|
||||
if len(source) < len(m) {
|
||||
// Len too short
|
||||
continue
|
||||
}
|
||||
if bytes.Equal(m, source[:len(m)]) {
|
||||
return compression
|
||||
}
|
||||
}
|
||||
|
47
vendor/github.com/containerd/containerd/content/helpers.go
generated
vendored
47
vendor/github.com/containerd/containerd/content/helpers.go
generated
vendored
@@ -18,9 +18,8 @@ package content
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math/rand"
|
||||
"sync"
|
||||
"time"
|
||||
@@ -28,6 +27,7 @@ import (
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
"github.com/opencontainers/go-digest"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var bufPool = sync.Pool{
|
||||
@@ -77,7 +77,7 @@ func WriteBlob(ctx context.Context, cs Ingester, ref string, r io.Reader, desc o
|
||||
cw, err := OpenWriter(ctx, cs, WithRef(ref), WithDescriptor(desc))
|
||||
if err != nil {
|
||||
if !errdefs.IsAlreadyExists(err) {
|
||||
return fmt.Errorf("failed to open writer: %w", err)
|
||||
return errors.Wrap(err, "failed to open writer")
|
||||
}
|
||||
|
||||
return nil // all ready present
|
||||
@@ -134,28 +134,23 @@ func OpenWriter(ctx context.Context, cs Ingester, opts ...WriterOpt) (Writer, er
|
||||
func Copy(ctx context.Context, cw Writer, r io.Reader, size int64, expected digest.Digest, opts ...Opt) error {
|
||||
ws, err := cw.Status()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get status: %w", err)
|
||||
return errors.Wrap(err, "failed to get status")
|
||||
}
|
||||
|
||||
if ws.Offset > 0 {
|
||||
r, err = seekReader(r, ws.Offset, size)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to resume write to %v: %w", ws.Ref, err)
|
||||
return errors.Wrapf(err, "unable to resume write to %v", ws.Ref)
|
||||
}
|
||||
}
|
||||
|
||||
copied, err := copyWithBuffer(cw, r)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to copy: %w", err)
|
||||
}
|
||||
if size != 0 && copied < size-ws.Offset {
|
||||
// Short writes would return its own error, this indicates a read failure
|
||||
return fmt.Errorf("failed to read expected number of bytes: %w", io.ErrUnexpectedEOF)
|
||||
if _, err := copyWithBuffer(cw, r); err != nil {
|
||||
return errors.Wrap(err, "failed to copy")
|
||||
}
|
||||
|
||||
if err := cw.Commit(ctx, size, expected, opts...); err != nil {
|
||||
if !errdefs.IsAlreadyExists(err) {
|
||||
return fmt.Errorf("failed commit on ref %q: %w", ws.Ref, err)
|
||||
return errors.Wrapf(err, "failed commit on ref %q", ws.Ref)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -170,15 +165,8 @@ func CopyReaderAt(cw Writer, ra ReaderAt, n int64) error {
|
||||
return err
|
||||
}
|
||||
|
||||
copied, err := copyWithBuffer(cw, io.NewSectionReader(ra, ws.Offset, n))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to copy: %w", err)
|
||||
}
|
||||
if copied < n {
|
||||
// Short writes would return its own error, this indicates a read failure
|
||||
return fmt.Errorf("failed to read expected number of bytes: %w", io.ErrUnexpectedEOF)
|
||||
}
|
||||
return nil
|
||||
_, err = copyWithBuffer(cw, io.NewSectionReader(ra, ws.Offset, n))
|
||||
return err
|
||||
}
|
||||
|
||||
// CopyReader copies to a writer from a given reader, returning
|
||||
@@ -190,13 +178,13 @@ func CopyReaderAt(cw Writer, ra ReaderAt, n int64) error {
|
||||
func CopyReader(cw Writer, r io.Reader) (int64, error) {
|
||||
ws, err := cw.Status()
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("failed to get status: %w", err)
|
||||
return 0, errors.Wrap(err, "failed to get status")
|
||||
}
|
||||
|
||||
if ws.Offset > 0 {
|
||||
r, err = seekReader(r, ws.Offset, 0)
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("unable to resume write to %v: %w", ws.Ref, err)
|
||||
return 0, errors.Wrapf(err, "unable to resume write to %v", ws.Ref)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -212,10 +200,7 @@ func seekReader(r io.Reader, offset, size int64) (io.Reader, error) {
|
||||
if ok {
|
||||
nn, err := seeker.Seek(offset, io.SeekStart)
|
||||
if nn != offset {
|
||||
if err == nil {
|
||||
err = fmt.Errorf("unexpected seek location without seek error")
|
||||
}
|
||||
return nil, fmt.Errorf("failed to seek to offset %v: %w", offset, err)
|
||||
return nil, errors.Wrapf(err, "failed to seek to offset %v", offset)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
@@ -233,12 +218,12 @@ func seekReader(r io.Reader, offset, size int64) (io.Reader, error) {
|
||||
}
|
||||
|
||||
// well then, let's just discard up to the offset
|
||||
n, err := copyWithBuffer(io.Discard, io.LimitReader(r, offset))
|
||||
n, err := copyWithBuffer(ioutil.Discard, io.LimitReader(r, offset))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to discard to offset: %w", err)
|
||||
return nil, errors.Wrap(err, "failed to discard to offset")
|
||||
}
|
||||
if n != offset {
|
||||
return nil, errors.New("unable to discard to offset")
|
||||
return nil, errors.Errorf("unable to discard to offset")
|
||||
}
|
||||
|
||||
return r, nil
|
||||
|
10
vendor/github.com/containerd/containerd/content/local/locks.go
generated
vendored
10
vendor/github.com/containerd/containerd/content/local/locks.go
generated
vendored
@@ -17,11 +17,11 @@
|
||||
package local
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// Handles locking references
|
||||
@@ -41,13 +41,7 @@ func tryLock(ref string) error {
|
||||
defer locksMu.Unlock()
|
||||
|
||||
if v, ok := locks[ref]; ok {
|
||||
// Returning the duration may help developers distinguish dead locks (long duration) from
|
||||
// lock contentions (short duration).
|
||||
now := time.Now()
|
||||
return fmt.Errorf(
|
||||
"ref %s locked for %s (since %s): %w", ref, now.Sub(v.since), v.since,
|
||||
errdefs.ErrUnavailable,
|
||||
)
|
||||
return errors.Wrapf(errdefs.ErrUnavailable, "ref %s locked since %s", ref, v.since)
|
||||
}
|
||||
|
||||
locks[ref] = &lock{time.Now()}
|
||||
|
7
vendor/github.com/containerd/containerd/content/local/readerat.go
generated
vendored
7
vendor/github.com/containerd/containerd/content/local/readerat.go
generated
vendored
@@ -17,9 +17,10 @@
|
||||
package local
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/containerd/containerd/content"
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
)
|
||||
@@ -39,7 +40,7 @@ func OpenReader(p string) (content.ReaderAt, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("blob not found: %w", errdefs.ErrNotFound)
|
||||
return nil, errors.Wrap(errdefs.ErrNotFound, "blob not found")
|
||||
}
|
||||
|
||||
fp, err := os.Open(p)
|
||||
@@ -48,7 +49,7 @@ func OpenReader(p string) (content.ReaderAt, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return nil, fmt.Errorf("blob not found: %w", errdefs.ErrNotFound)
|
||||
return nil, errors.Wrap(errdefs.ErrNotFound, "blob not found")
|
||||
}
|
||||
|
||||
return sizeReaderAt{size: fi.Size(), fp: fp}, nil
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user