mirror of
https://gitea.com/Lydanne/buildx.git
synced 2025-08-15 16:25:54 +08:00
Compare commits
114 Commits
v0.20.0-rc
...
v0.21.0-rc
Author | SHA1 | Date | |
---|---|---|---|
![]() |
d9f8c73375 | ||
![]() |
af5d0d4ab5 | ||
![]() |
20256b6999 | ||
![]() |
c09d38af8a | ||
![]() |
9430ed6752 | ||
![]() |
3b31a33d59 | ||
![]() |
5e5568f3cd | ||
![]() |
9ac9a4170b | ||
![]() |
8d1ba91dcb | ||
![]() |
0df7a75961 | ||
![]() |
de5fbc38b8 | ||
![]() |
ef73c64d2c | ||
![]() |
1784f84561 | ||
![]() |
6a6fa4f422 | ||
![]() |
2389d457a4 | ||
![]() |
3f82aadc6e | ||
![]() |
79e3f12305 | ||
![]() |
1dc5f0751b | ||
![]() |
7ba4da0800 | ||
![]() |
a64e628774 | ||
![]() |
1c4b1a376c | ||
![]() |
e1f690abfc | ||
![]() |
03569c2188 | ||
![]() |
350d3f0f4b | ||
![]() |
dc27815236 | ||
![]() |
1089ff7341 | ||
![]() |
7433d37183 | ||
![]() |
f9a76355b5 | ||
![]() |
cfeea34b2d | ||
![]() |
ba2d3692a6 | ||
![]() |
853b593a4d | ||
![]() |
efb300e613 | ||
![]() |
cee7b344da | ||
![]() |
67dbde6970 | ||
![]() |
295653dabb | ||
![]() |
f5802119c5 | ||
![]() |
40b9ac1ec5 | ||
![]() |
f11496448a | ||
![]() |
c8c9c72ca6 | ||
![]() |
9fe8139022 | ||
![]() |
b3e8c62635 | ||
![]() |
b8e9c28315 | ||
![]() |
3ae9970da5 | ||
![]() |
1d219100fc | ||
![]() |
464f9278d1 | ||
![]() |
7216086b8c | ||
![]() |
b195b80ddf | ||
![]() |
70a5e266d1 | ||
![]() |
689bea7963 | ||
![]() |
5176c38115 | ||
![]() |
ec440c4574 | ||
![]() |
0a4eb7ec76 | ||
![]() |
f710c93157 | ||
![]() |
d1a0a1497c | ||
![]() |
c880ecd513 | ||
![]() |
d557da1935 | ||
![]() |
417af36abc | ||
![]() |
e236b86297 | ||
![]() |
633e8a0881 | ||
![]() |
5e1ea62f92 | ||
![]() |
4b90b84995 | ||
![]() |
abc85c38f8 | ||
![]() |
ccca7c795a | ||
![]() |
04aab6958c | ||
![]() |
9d640f0e33 | ||
![]() |
b76fdcaf8d | ||
![]() |
d693e18c04 | ||
![]() |
b066ee1110 | ||
![]() |
cf8bf9e104 | ||
![]() |
3bd54b19aa | ||
![]() |
934841f329 | ||
![]() |
b2ababc7b6 | ||
![]() |
0ccdb7e248 | ||
![]() |
cacb4fb9b3 | ||
![]() |
df80bd72c6 | ||
![]() |
bb4bef2f04 | ||
![]() |
a11507344a | ||
![]() |
17af006857 | ||
![]() |
11c84973ef | ||
![]() |
cc4a291f6a | ||
![]() |
aa1fbc0421 | ||
![]() |
b2bbb337e4 | ||
![]() |
012df71b63 | ||
![]() |
a26bb271ab | ||
![]() |
3e0682f039 | ||
![]() |
3aed658dc4 | ||
![]() |
b4a0dee723 | ||
![]() |
6904512813 | ||
![]() |
d41e335466 | ||
![]() |
0954dcb5fd | ||
![]() |
38f64bf709 | ||
![]() |
c1d3955fbe | ||
![]() |
d0b63e60e2 | ||
![]() |
e141c8fa71 | ||
![]() |
2ee156236b | ||
![]() |
1335264c9d | ||
![]() |
e74185aa6d | ||
![]() |
0224773102 | ||
![]() |
8c27b5c545 | ||
![]() |
f7594d484b | ||
![]() |
f118749cdc | ||
![]() |
0d92ad713c | ||
![]() |
a18ff4d5ef | ||
![]() |
b035a04aaa | ||
![]() |
6220e0aae8 | ||
![]() |
d9abc78e8f | ||
![]() |
3313026961 | ||
![]() |
06912aa24c | ||
![]() |
cde0e9814d | ||
![]() |
2e6e146087 | ||
![]() |
af3cbe6cec | ||
![]() |
1ef9e67cbb | ||
![]() |
75204426bd | ||
![]() |
6f5486e718 |
5
.github/labeler.yml
vendored
5
.github/labeler.yml
vendored
@@ -96,6 +96,11 @@ area/hack:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: 'hack/**'
|
||||
|
||||
# Add 'area/history' label to changes in history command
|
||||
area/history:
|
||||
- changed-files:
|
||||
- any-glob-to-any-file: 'commands/history/**'
|
||||
|
||||
# Add 'area/tests' label to changes in test files
|
||||
area/tests:
|
||||
- changed-files:
|
||||
|
11
.github/workflows/build.yml
vendored
11
.github/workflows/build.yml
vendored
@@ -54,9 +54,9 @@ jobs:
|
||||
- master
|
||||
- latest
|
||||
- buildx-stable-1
|
||||
- v0.19.0
|
||||
- v0.18.2
|
||||
- v0.17.2
|
||||
- v0.16.0
|
||||
worker:
|
||||
- docker-container
|
||||
- remote
|
||||
@@ -174,6 +174,11 @@ jobs:
|
||||
env:
|
||||
SKIP_INTEGRATION_TESTS: 1
|
||||
steps:
|
||||
-
|
||||
name: Setup Git config
|
||||
run: |
|
||||
git config --global core.autocrlf false
|
||||
git config --global core.eol lf
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
@@ -264,8 +269,10 @@ jobs:
|
||||
name: Install vagrant
|
||||
run: |
|
||||
set -x
|
||||
wget -O - https://apt.releases.hashicorp.com/gpg | sudo gpg --dearmor -o /usr/share/keyrings/hashicorp-archive-keyring.gpg
|
||||
echo "deb [arch=$(dpkg --print-architecture) signed-by=/usr/share/keyrings/hashicorp-archive-keyring.gpg] https://apt.releases.hashicorp.com $(lsb_release -cs) main" | sudo tee /etc/apt/sources.list.d/hashicorp.list
|
||||
sudo apt-get update
|
||||
sudo apt-get install -y libvirt-daemon libvirt-daemon-system vagrant vagrant-libvirt ruby-libvirt
|
||||
sudo apt-get install -y libvirt-dev libvirt-daemon libvirt-daemon-system vagrant vagrant-libvirt ruby-libvirt
|
||||
sudo systemctl enable --now libvirtd
|
||||
sudo chmod a+rw /var/run/libvirt/libvirt-sock
|
||||
vagrant plugin install vagrant-libvirt
|
||||
|
2
.github/workflows/docs-upstream.yml
vendored
2
.github/workflows/docs-upstream.yml
vendored
@@ -65,7 +65,7 @@ jobs:
|
||||
retention-days: 1
|
||||
|
||||
validate:
|
||||
uses: docker/docs/.github/workflows/validate-upstream.yml@6b73b05acb21edf7995cc5b3c6672d8e314cee7a # pin for artifact v4 support: https://github.com/docker/docs/pull/19220
|
||||
uses: docker/docs/.github/workflows/validate-upstream.yml@main
|
||||
needs:
|
||||
- docs-yaml
|
||||
with:
|
||||
|
3
.github/workflows/e2e.yml
vendored
3
.github/workflows/e2e.yml
vendored
@@ -215,6 +215,9 @@ jobs:
|
||||
-
|
||||
name: Checkout
|
||||
uses: actions/checkout@v4
|
||||
-
|
||||
name: Expose GitHub Runtime
|
||||
uses: crazy-max/ghaction-github-runtime@v3
|
||||
-
|
||||
name: Environment variables
|
||||
if: matrix.envs != ''
|
||||
|
@@ -43,6 +43,9 @@ linters-settings:
|
||||
# buildkit errdefs package (or vice-versa).
|
||||
- pkg: "github.com/containerd/errdefs"
|
||||
alias: "cerrdefs"
|
||||
# Use a consistent alias to prevent confusion with "github.com/moby/buildkit/client"
|
||||
- pkg: "github.com/docker/docker/client"
|
||||
alias: "dockerclient"
|
||||
- pkg: "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
alias: "ocispecs"
|
||||
- pkg: "github.com/opencontainers/go-digest"
|
||||
|
@@ -5,12 +5,12 @@ ARG ALPINE_VERSION=3.21
|
||||
ARG XX_VERSION=1.6.1
|
||||
|
||||
# for testing
|
||||
ARG DOCKER_VERSION=27.4.1
|
||||
ARG DOCKER_VERSION=28.0.0-rc.1
|
||||
ARG DOCKER_VERSION_ALT_26=26.1.3
|
||||
ARG DOCKER_CLI_VERSION=${DOCKER_VERSION}
|
||||
ARG GOTESTSUM_VERSION=v1.12.0
|
||||
ARG REGISTRY_VERSION=2.8.3
|
||||
ARG BUILDKIT_VERSION=v0.18.2
|
||||
ARG BUILDKIT_VERSION=v0.19.0
|
||||
ARG UNDOCK_VERSION=0.9.0
|
||||
|
||||
FROM --platform=$BUILDPLATFORM tonistiigi/xx:${XX_VERSION} AS xx
|
||||
|
52
bake/bake.go
52
bake/bake.go
@@ -27,9 +27,7 @@ import (
|
||||
"github.com/moby/buildkit/client"
|
||||
"github.com/moby/buildkit/client/llb"
|
||||
"github.com/moby/buildkit/session/auth/authprovider"
|
||||
"github.com/moby/buildkit/util/entitlements"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/tonistiigi/go-csvvalue"
|
||||
"github.com/zclconf/go-cty/cty"
|
||||
"github.com/zclconf/go-cty/cty/convert"
|
||||
)
|
||||
@@ -556,6 +554,8 @@ func (c Config) newOverrides(v []string) (map[string]map[string]Override, error)
|
||||
|
||||
o := t[kk[1]]
|
||||
|
||||
// IMPORTANT: if you add more fields here, do not forget to update
|
||||
// docs/bake-reference.md and https://docs.docker.com/build/bake/overrides/
|
||||
switch keys[1] {
|
||||
case "output", "cache-to", "cache-from", "tags", "platform", "secrets", "ssh", "attest", "entitlements", "network":
|
||||
if len(parts) == 2 {
|
||||
@@ -861,6 +861,8 @@ func (t *Target) Merge(t2 *Target) {
|
||||
}
|
||||
|
||||
func (t *Target) AddOverrides(overrides map[string]Override, ent *EntitlementConf) error {
|
||||
// IMPORTANT: if you add more fields here, do not forget to update
|
||||
// docs/bake-reference.md and https://docs.docker.com/build/bake/overrides/
|
||||
for key, o := range overrides {
|
||||
value := o.Value
|
||||
keys := strings.SplitN(key, ".", 2)
|
||||
@@ -896,7 +898,7 @@ func (t *Target) AddOverrides(overrides map[string]Override, ent *EntitlementCon
|
||||
case "tags":
|
||||
t.Tags = o.ArrValue
|
||||
case "cache-from":
|
||||
cacheFrom, err := parseCacheArrValues(o.ArrValue)
|
||||
cacheFrom, err := buildflags.ParseCacheEntry(o.ArrValue)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -909,7 +911,7 @@ func (t *Target) AddOverrides(overrides map[string]Override, ent *EntitlementCon
|
||||
}
|
||||
}
|
||||
case "cache-to":
|
||||
cacheTo, err := parseCacheArrValues(o.ArrValue)
|
||||
cacheTo, err := buildflags.ParseCacheEntry(o.ArrValue)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -1127,7 +1129,9 @@ func (t *Target) GetName(ectx *hcl.EvalContext, block *hcl.Block, loadDeps func(
|
||||
func TargetsToBuildOpt(m map[string]*Target, inp *Input) (map[string]build.Options, error) {
|
||||
// make sure local credentials are loaded multiple times for different targets
|
||||
dockerConfig := config.LoadDefaultConfigFile(os.Stderr)
|
||||
authProvider := authprovider.NewDockerAuthProvider(dockerConfig, nil)
|
||||
authProvider := authprovider.NewDockerAuthProvider(authprovider.DockerAuthProviderConfig{
|
||||
ConfigFile: dockerConfig,
|
||||
})
|
||||
|
||||
m2 := make(map[string]build.Options, len(m))
|
||||
for k, v := range m {
|
||||
@@ -1429,9 +1433,7 @@ func toBuildOpt(t *Target, inp *Input) (*build.Options, error) {
|
||||
}
|
||||
bo.Ulimits = ulimits
|
||||
|
||||
for _, ent := range t.Entitlements {
|
||||
bo.Allow = append(bo.Allow, entitlements.Entitlement(ent))
|
||||
}
|
||||
bo.Allow = append(bo.Allow, t.Entitlements...)
|
||||
|
||||
return bo, nil
|
||||
}
|
||||
@@ -1581,37 +1583,3 @@ func parseArrValue[T any, PT arrValue[T]](s []string) ([]*T, error) {
|
||||
}
|
||||
return outputs, nil
|
||||
}
|
||||
|
||||
func parseCacheArrValues(s []string) (buildflags.CacheOptions, error) {
|
||||
var outs buildflags.CacheOptions
|
||||
for _, in := range s {
|
||||
if in == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
if !strings.Contains(in, "=") {
|
||||
// This is ref only format. Each field in the CSV is its own entry.
|
||||
fields, err := csvvalue.Fields(in, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, field := range fields {
|
||||
out := buildflags.CacheOptionsEntry{}
|
||||
if err := out.UnmarshalText([]byte(field)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
outs = append(outs, &out)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// Normal entry.
|
||||
out := buildflags.CacheOptionsEntry{}
|
||||
if err := out.UnmarshalText([]byte(in)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
outs = append(outs, &out)
|
||||
}
|
||||
return outs, nil
|
||||
}
|
||||
|
@@ -9,6 +9,7 @@ import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/buildx/util/buildflags"
|
||||
"github.com/moby/buildkit/util/entitlements"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
@@ -1759,6 +1760,27 @@ func TestAnnotations(t *testing.T) {
|
||||
require.Equal(t, "bar", bo["app"].Exports[0].Attrs["annotation-manifest[linux/amd64].foo"])
|
||||
}
|
||||
|
||||
func TestRefOnlyCacheOptions(t *testing.T) {
|
||||
fp := File{
|
||||
Name: "docker-bake.hcl",
|
||||
Data: []byte(
|
||||
`target "app" {
|
||||
output = ["type=image,name=foo"]
|
||||
cache-from = ["ref1,ref2"]
|
||||
}`),
|
||||
}
|
||||
ctx := context.TODO()
|
||||
m, _, err := ReadTargets(ctx, []File{fp}, []string{"app"}, nil, nil, &EntitlementConf{})
|
||||
require.NoError(t, err)
|
||||
|
||||
require.Len(t, m, 1)
|
||||
require.Contains(t, m, "app")
|
||||
require.Equal(t, buildflags.CacheOptions{
|
||||
{Type: "registry", Attrs: map[string]string{"ref": "ref1"}},
|
||||
{Type: "registry", Attrs: map[string]string{"ref": "ref2"}},
|
||||
}, m["app"].CacheFrom)
|
||||
}
|
||||
|
||||
func TestHCLEntitlements(t *testing.T) {
|
||||
fp := File{
|
||||
Name: "docker-bake.hcl",
|
||||
@@ -1784,8 +1806,8 @@ func TestHCLEntitlements(t *testing.T) {
|
||||
require.Equal(t, "network.host", m["app"].Entitlements[1])
|
||||
|
||||
require.Len(t, bo["app"].Allow, 2)
|
||||
require.Equal(t, entitlements.EntitlementSecurityInsecure, bo["app"].Allow[0])
|
||||
require.Equal(t, entitlements.EntitlementNetworkHost, bo["app"].Allow[1])
|
||||
require.Equal(t, entitlements.EntitlementSecurityInsecure.String(), bo["app"].Allow[0])
|
||||
require.Equal(t, entitlements.EntitlementNetworkHost.String(), bo["app"].Allow[1])
|
||||
}
|
||||
|
||||
func TestEntitlementsForNetHostCompose(t *testing.T) {
|
||||
@@ -1824,7 +1846,7 @@ func TestEntitlementsForNetHostCompose(t *testing.T) {
|
||||
require.Equal(t, "host", *m["app"].NetworkMode)
|
||||
|
||||
require.Len(t, bo["app"].Allow, 1)
|
||||
require.Equal(t, entitlements.EntitlementNetworkHost, bo["app"].Allow[0])
|
||||
require.Equal(t, entitlements.EntitlementNetworkHost.String(), bo["app"].Allow[0])
|
||||
require.Equal(t, "host", bo["app"].NetworkMode)
|
||||
}
|
||||
|
||||
@@ -1855,7 +1877,7 @@ func TestEntitlementsForNetHost(t *testing.T) {
|
||||
require.Equal(t, "host", *m["app"].NetworkMode)
|
||||
|
||||
require.Len(t, bo["app"].Allow, 1)
|
||||
require.Equal(t, entitlements.EntitlementNetworkHost, bo["app"].Allow[0])
|
||||
require.Equal(t, entitlements.EntitlementNetworkHost.String(), bo["app"].Allow[0])
|
||||
require.Equal(t, "host", bo["app"].NetworkMode)
|
||||
}
|
||||
|
||||
|
@@ -145,12 +145,12 @@ func ParseCompose(cfgs []composetypes.ConfigFile, envs map[string]string) (*Conf
|
||||
labels[k] = &v
|
||||
}
|
||||
|
||||
cacheFrom, err := parseCacheArrValues(s.Build.CacheFrom)
|
||||
cacheFrom, err := buildflags.ParseCacheEntry(s.Build.CacheFrom)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
cacheTo, err := parseCacheArrValues(s.Build.CacheTo)
|
||||
cacheTo, err := buildflags.ParseCacheEntry(s.Build.CacheTo)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@@ -349,14 +349,14 @@ func (t *Target) composeExtTarget(exts map[string]interface{}) error {
|
||||
t.Tags = dedupSlice(append(t.Tags, xb.Tags...))
|
||||
}
|
||||
if len(xb.CacheFrom) > 0 {
|
||||
cacheFrom, err := parseCacheArrValues(xb.CacheFrom)
|
||||
cacheFrom, err := buildflags.ParseCacheEntry(xb.CacheFrom)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
t.CacheFrom = t.CacheFrom.Merge(cacheFrom)
|
||||
}
|
||||
if len(xb.CacheTo) > 0 {
|
||||
cacheTo, err := parseCacheArrValues(xb.CacheTo)
|
||||
cacheTo, err := buildflags.ParseCacheEntry(xb.CacheTo)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@@ -20,6 +20,7 @@ import (
|
||||
"github.com/moby/buildkit/util/entitlements"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/tonistiigi/go-csvvalue"
|
||||
)
|
||||
|
||||
type EntitlementKey string
|
||||
@@ -27,6 +28,7 @@ type EntitlementKey string
|
||||
const (
|
||||
EntitlementKeyNetworkHost EntitlementKey = "network.host"
|
||||
EntitlementKeySecurityInsecure EntitlementKey = "security.insecure"
|
||||
EntitlementKeyDevice EntitlementKey = "device"
|
||||
EntitlementKeyFSRead EntitlementKey = "fs.read"
|
||||
EntitlementKeyFSWrite EntitlementKey = "fs.write"
|
||||
EntitlementKeyFS EntitlementKey = "fs"
|
||||
@@ -39,6 +41,7 @@ const (
|
||||
type EntitlementConf struct {
|
||||
NetworkHost bool
|
||||
SecurityInsecure bool
|
||||
Devices *EntitlementsDevicesConf
|
||||
FSRead []string
|
||||
FSWrite []string
|
||||
ImagePush []string
|
||||
@@ -46,6 +49,11 @@ type EntitlementConf struct {
|
||||
SSH bool
|
||||
}
|
||||
|
||||
type EntitlementsDevicesConf struct {
|
||||
All bool
|
||||
Devices map[string]struct{}
|
||||
}
|
||||
|
||||
func ParseEntitlements(in []string) (EntitlementConf, error) {
|
||||
var conf EntitlementConf
|
||||
for _, e := range in {
|
||||
@@ -59,6 +67,22 @@ func ParseEntitlements(in []string) (EntitlementConf, error) {
|
||||
default:
|
||||
k, v, _ := strings.Cut(e, "=")
|
||||
switch k {
|
||||
case string(EntitlementKeyDevice):
|
||||
if v == "" {
|
||||
conf.Devices = &EntitlementsDevicesConf{All: true}
|
||||
continue
|
||||
}
|
||||
fields, err := csvvalue.Fields(v, nil)
|
||||
if err != nil {
|
||||
return EntitlementConf{}, errors.Wrapf(err, "failed to parse device entitlement %q", v)
|
||||
}
|
||||
if conf.Devices == nil {
|
||||
conf.Devices = &EntitlementsDevicesConf{}
|
||||
}
|
||||
if conf.Devices.Devices == nil {
|
||||
conf.Devices.Devices = make(map[string]struct{}, 0)
|
||||
}
|
||||
conf.Devices.Devices[fields[0]] = struct{}{}
|
||||
case string(EntitlementKeyFSRead):
|
||||
conf.FSRead = append(conf.FSRead, v)
|
||||
case string(EntitlementKeyFSWrite):
|
||||
@@ -95,12 +119,34 @@ func (c EntitlementConf) Validate(m map[string]build.Options) (EntitlementConf,
|
||||
|
||||
func (c EntitlementConf) check(bo build.Options, expected *EntitlementConf) error {
|
||||
for _, e := range bo.Allow {
|
||||
k, rest, _ := strings.Cut(e, "=")
|
||||
switch k {
|
||||
case entitlements.EntitlementDevice.String():
|
||||
if rest == "" {
|
||||
if c.Devices == nil || !c.Devices.All {
|
||||
expected.Devices = &EntitlementsDevicesConf{All: true}
|
||||
}
|
||||
continue
|
||||
}
|
||||
fields, err := csvvalue.Fields(rest, nil)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to parse device entitlement %q", rest)
|
||||
}
|
||||
if expected.Devices == nil {
|
||||
expected.Devices = &EntitlementsDevicesConf{}
|
||||
}
|
||||
if expected.Devices.Devices == nil {
|
||||
expected.Devices.Devices = make(map[string]struct{}, 0)
|
||||
}
|
||||
expected.Devices.Devices[fields[0]] = struct{}{}
|
||||
}
|
||||
|
||||
switch e {
|
||||
case entitlements.EntitlementNetworkHost:
|
||||
case entitlements.EntitlementNetworkHost.String():
|
||||
if !c.NetworkHost {
|
||||
expected.NetworkHost = true
|
||||
}
|
||||
case entitlements.EntitlementSecurityInsecure:
|
||||
case entitlements.EntitlementSecurityInsecure.String():
|
||||
if !c.SecurityInsecure {
|
||||
expected.SecurityInsecure = true
|
||||
}
|
||||
@@ -187,6 +233,18 @@ func (c EntitlementConf) Prompt(ctx context.Context, isRemote bool, out io.Write
|
||||
flags = append(flags, string(EntitlementKeySecurityInsecure))
|
||||
}
|
||||
|
||||
if c.Devices != nil {
|
||||
if c.Devices.All {
|
||||
msgs = append(msgs, " - Access to CDI devices")
|
||||
flags = append(flags, string(EntitlementKeyDevice))
|
||||
} else {
|
||||
for d := range c.Devices.Devices {
|
||||
msgs = append(msgs, fmt.Sprintf(" - Access to device %s", d))
|
||||
flags = append(flags, string(EntitlementKeyDevice)+"="+d)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if c.SSH {
|
||||
msgsFS = append(msgsFS, " - Forwarding default SSH agent socket")
|
||||
flagsFS = append(flagsFS, string(EntitlementKeySSH))
|
||||
|
@@ -208,8 +208,8 @@ func TestValidateEntitlements(t *testing.T) {
|
||||
{
|
||||
name: "NetworkHostMissing",
|
||||
opt: build.Options{
|
||||
Allow: []entitlements.Entitlement{
|
||||
entitlements.EntitlementNetworkHost,
|
||||
Allow: []string{
|
||||
entitlements.EntitlementNetworkHost.String(),
|
||||
},
|
||||
},
|
||||
expected: EntitlementConf{
|
||||
@@ -223,8 +223,8 @@ func TestValidateEntitlements(t *testing.T) {
|
||||
NetworkHost: true,
|
||||
},
|
||||
opt: build.Options{
|
||||
Allow: []entitlements.Entitlement{
|
||||
entitlements.EntitlementNetworkHost,
|
||||
Allow: []string{
|
||||
entitlements.EntitlementNetworkHost.String(),
|
||||
},
|
||||
},
|
||||
expected: EntitlementConf{
|
||||
@@ -234,9 +234,9 @@ func TestValidateEntitlements(t *testing.T) {
|
||||
{
|
||||
name: "SecurityAndNetworkHostMissing",
|
||||
opt: build.Options{
|
||||
Allow: []entitlements.Entitlement{
|
||||
entitlements.EntitlementNetworkHost,
|
||||
entitlements.EntitlementSecurityInsecure,
|
||||
Allow: []string{
|
||||
entitlements.EntitlementNetworkHost.String(),
|
||||
entitlements.EntitlementSecurityInsecure.String(),
|
||||
},
|
||||
},
|
||||
expected: EntitlementConf{
|
||||
@@ -251,9 +251,9 @@ func TestValidateEntitlements(t *testing.T) {
|
||||
NetworkHost: true,
|
||||
},
|
||||
opt: build.Options{
|
||||
Allow: []entitlements.Entitlement{
|
||||
entitlements.EntitlementNetworkHost,
|
||||
entitlements.EntitlementSecurityInsecure,
|
||||
Allow: []string{
|
||||
entitlements.EntitlementNetworkHost.String(),
|
||||
entitlements.EntitlementSecurityInsecure.String(),
|
||||
},
|
||||
},
|
||||
expected: EntitlementConf{
|
||||
|
@@ -2,8 +2,10 @@ package bake
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"regexp"
|
||||
"testing"
|
||||
|
||||
hcl "github.com/hashicorp/hcl/v2"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
@@ -647,7 +649,7 @@ func TestHCLAttrsCapsuleType(t *testing.T) {
|
||||
require.Equal(t, []string{"default", "key=path/to/key"}, stringify(c.Targets[0].SSH))
|
||||
}
|
||||
|
||||
func TestHCLAttrsCapsuleTypeVars(t *testing.T) {
|
||||
func TestHCLAttrsCapsuleType_ObjectVars(t *testing.T) {
|
||||
dt := []byte(`
|
||||
variable "foo" {
|
||||
default = "bar"
|
||||
@@ -716,6 +718,52 @@ func TestHCLAttrsCapsuleTypeVars(t *testing.T) {
|
||||
require.Equal(t, []string{"id=oci,src=/local/secret"}, stringify(web.Secrets))
|
||||
}
|
||||
|
||||
func TestHCLAttrsCapsuleType_MissingVars(t *testing.T) {
|
||||
dt := []byte(`
|
||||
target "app" {
|
||||
attest = [
|
||||
"type=sbom,disabled=${SBOM}",
|
||||
]
|
||||
|
||||
cache-from = [
|
||||
{ type = "registry", ref = "user/app:${FOO1}" },
|
||||
"type=local,src=path/to/cache:${FOO2}",
|
||||
]
|
||||
|
||||
cache-to = [
|
||||
{ type = "local", dest = "path/to/${BAR}" },
|
||||
]
|
||||
|
||||
output = [
|
||||
{ type = "oci", dest = "../${OUTPUT}.tar" },
|
||||
]
|
||||
|
||||
secret = [
|
||||
{ id = "mysecret", src = "/local/${SECRET}" },
|
||||
]
|
||||
|
||||
ssh = [
|
||||
{ id = "key", paths = ["path/to/${SSH_KEY}"] },
|
||||
]
|
||||
}
|
||||
`)
|
||||
|
||||
var diags hcl.Diagnostics
|
||||
_, err := ParseFile(dt, "docker-bake.hcl")
|
||||
require.ErrorAs(t, err, &diags)
|
||||
|
||||
re := regexp.MustCompile(`There is no variable named "([\w\d_]+)"`)
|
||||
var actual []string
|
||||
for _, diag := range diags {
|
||||
if m := re.FindStringSubmatch(diag.Error()); m != nil {
|
||||
actual = append(actual, m[1])
|
||||
}
|
||||
}
|
||||
require.ElementsMatch(t,
|
||||
[]string{"SBOM", "FOO1", "FOO2", "BAR", "OUTPUT", "SECRET", "SSH_KEY"},
|
||||
actual)
|
||||
}
|
||||
|
||||
func TestHCLMultiFileAttrs(t *testing.T) {
|
||||
dt := []byte(`
|
||||
variable "FOO" {
|
||||
|
@@ -40,7 +40,6 @@ import (
|
||||
"github.com/moby/buildkit/solver/errdefs"
|
||||
"github.com/moby/buildkit/solver/pb"
|
||||
spb "github.com/moby/buildkit/sourcepolicy/pb"
|
||||
"github.com/moby/buildkit/util/entitlements"
|
||||
"github.com/moby/buildkit/util/progress/progresswriter"
|
||||
"github.com/moby/buildkit/util/tracing"
|
||||
"github.com/opencontainers/go-digest"
|
||||
@@ -63,7 +62,7 @@ type Options struct {
|
||||
Inputs Inputs
|
||||
|
||||
Ref string
|
||||
Allow []entitlements.Entitlement
|
||||
Allow []string
|
||||
Attests map[string]*string
|
||||
BuildArgs map[string]string
|
||||
CacheFrom []client.CacheOptionsEntry
|
||||
@@ -835,7 +834,7 @@ func remoteDigestWithMoby(ctx context.Context, d *driver.DriverHandle, name stri
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
img, _, err := api.ImageInspectWithRaw(ctx, name)
|
||||
img, err := api.ImageInspect(ctx, name)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
@@ -318,7 +318,7 @@ func toSolveOpt(ctx context.Context, node builder.Node, multiDriver bool, opt *O
|
||||
switch opt.NetworkMode {
|
||||
case "host":
|
||||
so.FrontendAttrs["force-network-mode"] = opt.NetworkMode
|
||||
so.AllowedEntitlements = append(so.AllowedEntitlements, entitlements.EntitlementNetworkHost)
|
||||
so.AllowedEntitlements = append(so.AllowedEntitlements, entitlements.EntitlementNetworkHost.String())
|
||||
case "none":
|
||||
so.FrontendAttrs["force-network-mode"] = opt.NetworkMode
|
||||
case "", "default":
|
||||
|
@@ -32,10 +32,11 @@ type Node struct {
|
||||
Err error
|
||||
|
||||
// worker settings
|
||||
IDs []string
|
||||
Platforms []ocispecs.Platform
|
||||
GCPolicy []client.PruneInfo
|
||||
Labels map[string]string
|
||||
IDs []string
|
||||
Platforms []ocispecs.Platform
|
||||
GCPolicy []client.PruneInfo
|
||||
Labels map[string]string
|
||||
CDIDevices []client.CDIDevice
|
||||
}
|
||||
|
||||
// Nodes returns nodes for this builder.
|
||||
@@ -259,6 +260,7 @@ func (n *Node) loadData(ctx context.Context, clientOpt ...client.ClientOpt) erro
|
||||
n.GCPolicy = w.GCPolicy
|
||||
n.Labels = w.Labels
|
||||
}
|
||||
n.CDIDevices = w.CDIDevices
|
||||
}
|
||||
sort.Strings(n.IDs)
|
||||
n.Platforms = platformutil.Dedupe(n.Platforms)
|
||||
|
@@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/docker/buildx/commands"
|
||||
controllererrors "github.com/docker/buildx/controller/errdefs"
|
||||
@@ -41,7 +42,8 @@ func runStandalone(cmd *command.DockerCli) error {
|
||||
}
|
||||
defer flushMetrics(cmd)
|
||||
|
||||
rootCmd := commands.NewRootCmd(os.Args[0], false, cmd)
|
||||
executable := os.Args[0]
|
||||
rootCmd := commands.NewRootCmd(filepath.Base(executable), false, cmd)
|
||||
return rootCmd.Execute()
|
||||
}
|
||||
|
||||
|
@@ -271,8 +271,10 @@ func runBake(ctx context.Context, dockerCli command.Cli, targets []string, in ba
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := exp.Prompt(ctx, url != "", &syncWriter{w: dockerCli.Err(), wait: printer.Wait}); err != nil {
|
||||
return err
|
||||
if progressMode != progressui.RawJSONMode {
|
||||
if err := exp.Prompt(ctx, url != "", &syncWriter{w: dockerCli.Err(), wait: printer.Wait}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
if printer.IsDone() {
|
||||
// init new printer as old one was stopped to show the prompt
|
||||
|
@@ -41,7 +41,7 @@ import (
|
||||
"github.com/docker/cli/cli/command"
|
||||
dockeropts "github.com/docker/cli/opts"
|
||||
"github.com/docker/docker/api/types/versions"
|
||||
"github.com/docker/docker/pkg/ioutils"
|
||||
"github.com/docker/docker/pkg/atomicwriter"
|
||||
"github.com/moby/buildkit/client"
|
||||
"github.com/moby/buildkit/exporter/containerimage/exptypes"
|
||||
"github.com/moby/buildkit/frontend/subrequests"
|
||||
@@ -183,14 +183,17 @@ func (o *buildOptions) toControllerOptions() (*controllerapi.BuildOptions, error
|
||||
}
|
||||
}
|
||||
|
||||
opts.CacheFrom, err = buildflags.ParseCacheEntry(o.cacheFrom)
|
||||
cacheFrom, err := buildflags.ParseCacheEntry(o.cacheFrom)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
opts.CacheTo, err = buildflags.ParseCacheEntry(o.cacheTo)
|
||||
opts.CacheFrom = cacheFrom.ToPB()
|
||||
|
||||
cacheTo, err := buildflags.ParseCacheEntry(o.cacheTo)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
opts.CacheTo = cacheTo.ToPB()
|
||||
|
||||
opts.Secrets, err = buildflags.ParseSecretSpecs(o.secrets)
|
||||
if err != nil {
|
||||
@@ -463,7 +466,7 @@ func runControllerBuild(ctx context.Context, dockerCli command.Cli, opts *contro
|
||||
if err != nil {
|
||||
var be *controllererrors.BuildError
|
||||
if errors.As(err, &be) {
|
||||
ref = be.Ref
|
||||
ref = be.SessionID
|
||||
retErr = err
|
||||
// We can proceed to monitor
|
||||
} else {
|
||||
@@ -590,7 +593,7 @@ func buildCmd(dockerCli command.Cli, rootOpts *rootOptions, debugConfig *debug.D
|
||||
|
||||
flags.StringSliceVar(&options.extraHosts, "add-host", []string{}, `Add a custom host-to-IP mapping (format: "host:ip")`)
|
||||
|
||||
flags.StringSliceVar(&options.allow, "allow", []string{}, `Allow extra privileged entitlement (e.g., "network.host", "security.insecure")`)
|
||||
flags.StringArrayVar(&options.allow, "allow", []string{}, `Allow extra privileged entitlement (e.g., "network.host", "security.insecure")`)
|
||||
|
||||
flags.StringArrayVarP(&options.annotations, "annotation", "", []string{}, "Add annotation to the image")
|
||||
|
||||
@@ -742,7 +745,7 @@ func writeMetadataFile(filename string, dt interface{}) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return ioutils.AtomicWriteFile(filename, b, 0644)
|
||||
return atomicwriter.WriteFile(filename, b, 0644)
|
||||
}
|
||||
|
||||
func decodeExporterResponse(exporterResponse map[string]string) map[string]interface{} {
|
||||
|
900
commands/history/inspect.go
Normal file
900
commands/history/inspect.go
Normal file
@@ -0,0 +1,900 @@
|
||||
package history
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"cmp"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"slices"
|
||||
"strconv"
|
||||
"strings"
|
||||
"text/tabwriter"
|
||||
"text/template"
|
||||
"time"
|
||||
|
||||
"github.com/containerd/containerd/v2/core/content"
|
||||
"github.com/containerd/containerd/v2/core/content/proxy"
|
||||
"github.com/containerd/containerd/v2/core/images"
|
||||
"github.com/containerd/platforms"
|
||||
"github.com/docker/buildx/builder"
|
||||
"github.com/docker/buildx/localstate"
|
||||
"github.com/docker/buildx/util/cobrautil/completion"
|
||||
"github.com/docker/buildx/util/confutil"
|
||||
"github.com/docker/buildx/util/desktop"
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/cli/cli/command/formatter"
|
||||
"github.com/docker/cli/cli/debug"
|
||||
slsa "github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/common"
|
||||
slsa02 "github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v0.2"
|
||||
controlapi "github.com/moby/buildkit/api/services/control"
|
||||
"github.com/moby/buildkit/client"
|
||||
"github.com/moby/buildkit/solver/errdefs"
|
||||
provenancetypes "github.com/moby/buildkit/solver/llbsolver/provenance/types"
|
||||
"github.com/moby/buildkit/util/grpcerrors"
|
||||
"github.com/moby/buildkit/util/stack"
|
||||
"github.com/opencontainers/go-digest"
|
||||
ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/tonistiigi/go-csvvalue"
|
||||
spb "google.golang.org/genproto/googleapis/rpc/status"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
proto "google.golang.org/protobuf/proto"
|
||||
)
|
||||
|
||||
type statusT string
|
||||
|
||||
const (
|
||||
statusComplete statusT = "completed"
|
||||
statusRunning statusT = "running"
|
||||
statusError statusT = "failed"
|
||||
statusCanceled statusT = "canceled"
|
||||
)
|
||||
|
||||
type inspectOptions struct {
|
||||
builder string
|
||||
ref string
|
||||
format string
|
||||
}
|
||||
|
||||
type inspectOutput struct {
|
||||
Name string `json:",omitempty"`
|
||||
Ref string
|
||||
|
||||
Context string `json:",omitempty"`
|
||||
Dockerfile string `json:",omitempty"`
|
||||
VCSRepository string `json:",omitempty"`
|
||||
VCSRevision string `json:",omitempty"`
|
||||
Target string `json:",omitempty"`
|
||||
Platform []string `json:",omitempty"`
|
||||
KeepGitDir bool `json:",omitempty"`
|
||||
|
||||
NamedContexts []keyValueOutput `json:",omitempty"`
|
||||
|
||||
StartedAt *time.Time `json:",omitempty"`
|
||||
CompletedAt *time.Time `json:",omitempty"`
|
||||
Duration time.Duration `json:",omitempty"`
|
||||
Status statusT `json:",omitempty"`
|
||||
Error *errorOutput `json:",omitempty"`
|
||||
|
||||
NumCompletedSteps int32
|
||||
NumTotalSteps int32
|
||||
NumCachedSteps int32
|
||||
|
||||
BuildArgs []keyValueOutput `json:",omitempty"`
|
||||
Labels []keyValueOutput `json:",omitempty"`
|
||||
|
||||
Config configOutput `json:",omitempty"`
|
||||
|
||||
Materials []materialOutput `json:",omitempty"`
|
||||
Attachments []attachmentOutput `json:",omitempty"`
|
||||
|
||||
Errors []string `json:",omitempty"`
|
||||
}
|
||||
|
||||
type configOutput struct {
|
||||
Network string `json:",omitempty"`
|
||||
ExtraHosts []string `json:",omitempty"`
|
||||
Hostname string `json:",omitempty"`
|
||||
CgroupParent string `json:",omitempty"`
|
||||
ImageResolveMode string `json:",omitempty"`
|
||||
MultiPlatform bool `json:",omitempty"`
|
||||
NoCache bool `json:",omitempty"`
|
||||
NoCacheFilter []string `json:",omitempty"`
|
||||
|
||||
ShmSize string `json:",omitempty"`
|
||||
Ulimit string `json:",omitempty"`
|
||||
CacheMountNS string `json:",omitempty"`
|
||||
DockerfileCheckConfig string `json:",omitempty"`
|
||||
SourceDateEpoch string `json:",omitempty"`
|
||||
SandboxHostname string `json:",omitempty"`
|
||||
|
||||
RestRaw []keyValueOutput `json:",omitempty"`
|
||||
}
|
||||
|
||||
type materialOutput struct {
|
||||
URI string `json:",omitempty"`
|
||||
Digests []string `json:",omitempty"`
|
||||
}
|
||||
|
||||
type attachmentOutput struct {
|
||||
Digest string `json:",omitempty"`
|
||||
Platform string `json:",omitempty"`
|
||||
Type string `json:",omitempty"`
|
||||
}
|
||||
|
||||
type errorOutput struct {
|
||||
Code int `json:",omitempty"`
|
||||
Message string `json:",omitempty"`
|
||||
Name string `json:",omitempty"`
|
||||
Logs []string `json:",omitempty"`
|
||||
Sources []byte `json:",omitempty"`
|
||||
Stack []byte `json:",omitempty"`
|
||||
}
|
||||
|
||||
type keyValueOutput struct {
|
||||
Name string `json:",omitempty"`
|
||||
Value string `json:",omitempty"`
|
||||
}
|
||||
|
||||
func readAttr[T any](attrs map[string]string, k string, dest *T, f func(v string) (T, bool)) {
|
||||
if sv, ok := attrs[k]; ok {
|
||||
if f != nil {
|
||||
v, ok := f(sv)
|
||||
if ok {
|
||||
*dest = v
|
||||
}
|
||||
}
|
||||
if d, ok := any(dest).(*string); ok {
|
||||
*d = sv
|
||||
}
|
||||
}
|
||||
delete(attrs, k)
|
||||
}
|
||||
|
||||
func runInspect(ctx context.Context, dockerCli command.Cli, opts inspectOptions) error {
|
||||
b, err := builder.New(dockerCli, builder.WithName(opts.builder))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
nodes, err := b.LoadNodes(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, node := range nodes {
|
||||
if node.Err != nil {
|
||||
return node.Err
|
||||
}
|
||||
}
|
||||
|
||||
recs, err := queryRecords(ctx, opts.ref, nodes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(recs) == 0 {
|
||||
if opts.ref == "" {
|
||||
return errors.New("no records found")
|
||||
}
|
||||
return errors.Errorf("no record found for ref %q", opts.ref)
|
||||
}
|
||||
|
||||
if opts.ref == "" {
|
||||
slices.SortFunc(recs, func(a, b historyRecord) int {
|
||||
return b.CreatedAt.AsTime().Compare(a.CreatedAt.AsTime())
|
||||
})
|
||||
}
|
||||
|
||||
rec := &recs[0]
|
||||
|
||||
c, err := rec.node.Driver.Client(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
store := proxy.NewContentStore(c.ContentClient())
|
||||
|
||||
var defaultPlatform string
|
||||
workers, err := c.ListWorkers(ctx)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to list workers")
|
||||
}
|
||||
workers0:
|
||||
for _, w := range workers {
|
||||
for _, p := range w.Platforms {
|
||||
defaultPlatform = platforms.FormatAll(platforms.Normalize(p))
|
||||
break workers0
|
||||
}
|
||||
}
|
||||
|
||||
ls, err := localstate.New(confutil.NewConfig(dockerCli))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
st, _ := ls.ReadRef(rec.node.Builder, rec.node.Name, rec.Ref)
|
||||
|
||||
attrs := rec.FrontendAttrs
|
||||
delete(attrs, "frontend.caps")
|
||||
|
||||
var out inspectOutput
|
||||
|
||||
var context string
|
||||
var dockerfile string
|
||||
if st != nil {
|
||||
context = st.LocalPath
|
||||
dockerfile = st.DockerfilePath
|
||||
wd, _ := os.Getwd()
|
||||
|
||||
if dockerfile != "" && dockerfile != "-" {
|
||||
if rel, err := filepath.Rel(context, dockerfile); err == nil {
|
||||
if !strings.HasPrefix(rel, ".."+string(filepath.Separator)) {
|
||||
dockerfile = rel
|
||||
}
|
||||
}
|
||||
}
|
||||
if context != "" {
|
||||
if rel, err := filepath.Rel(wd, context); err == nil {
|
||||
if !strings.HasPrefix(rel, ".."+string(filepath.Separator)) {
|
||||
context = rel
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if v, ok := attrs["context"]; ok && context == "" {
|
||||
delete(attrs, "context")
|
||||
context = v
|
||||
}
|
||||
if dockerfile == "" {
|
||||
if v, ok := attrs["filename"]; ok {
|
||||
dockerfile = v
|
||||
if dfdir, ok := attrs["vcs:localdir:dockerfile"]; ok {
|
||||
dockerfile = filepath.Join(dfdir, dockerfile)
|
||||
}
|
||||
}
|
||||
}
|
||||
delete(attrs, "filename")
|
||||
|
||||
out.Name = buildName(rec.FrontendAttrs, st)
|
||||
out.Ref = rec.Ref
|
||||
|
||||
out.Context = context
|
||||
out.Dockerfile = dockerfile
|
||||
|
||||
if _, ok := attrs["context"]; !ok {
|
||||
if src, ok := attrs["vcs:source"]; ok {
|
||||
out.VCSRepository = src
|
||||
}
|
||||
if rev, ok := attrs["vcs:revision"]; ok {
|
||||
out.VCSRevision = rev
|
||||
}
|
||||
}
|
||||
|
||||
readAttr(attrs, "target", &out.Target, nil)
|
||||
|
||||
readAttr(attrs, "platform", &out.Platform, func(v string) ([]string, bool) {
|
||||
return tryParseValue(v, &out.Errors, func(v string) ([]string, error) {
|
||||
var pp []string
|
||||
for _, v := range strings.Split(v, ",") {
|
||||
p, err := platforms.Parse(v)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
pp = append(pp, platforms.FormatAll(platforms.Normalize(p)))
|
||||
}
|
||||
if len(pp) == 0 {
|
||||
pp = append(pp, defaultPlatform)
|
||||
}
|
||||
return pp, nil
|
||||
})
|
||||
})
|
||||
|
||||
readAttr(attrs, "build-arg:BUILDKIT_CONTEXT_KEEP_GIT_DIR", &out.KeepGitDir, func(v string) (bool, bool) {
|
||||
return tryParseValue(v, &out.Errors, strconv.ParseBool)
|
||||
})
|
||||
|
||||
out.NamedContexts = readKeyValues(attrs, "context:")
|
||||
|
||||
if rec.CreatedAt != nil {
|
||||
tm := rec.CreatedAt.AsTime().Local()
|
||||
out.StartedAt = &tm
|
||||
}
|
||||
out.Status = statusRunning
|
||||
|
||||
if rec.CompletedAt != nil {
|
||||
tm := rec.CompletedAt.AsTime().Local()
|
||||
out.CompletedAt = &tm
|
||||
out.Status = statusComplete
|
||||
}
|
||||
|
||||
if rec.Error != nil || rec.ExternalError != nil {
|
||||
out.Error = &errorOutput{}
|
||||
if rec.Error != nil {
|
||||
if codes.Code(rec.Error.Code) == codes.Canceled {
|
||||
out.Status = statusCanceled
|
||||
} else {
|
||||
out.Status = statusError
|
||||
}
|
||||
out.Error.Code = int(codes.Code(rec.Error.Code))
|
||||
out.Error.Message = rec.Error.Message
|
||||
}
|
||||
if rec.ExternalError != nil {
|
||||
dt, err := content.ReadBlob(ctx, store, ociDesc(rec.ExternalError))
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to read external error %s", rec.ExternalError.Digest)
|
||||
}
|
||||
var st spb.Status
|
||||
if err := proto.Unmarshal(dt, &st); err != nil {
|
||||
return errors.Wrapf(err, "failed to unmarshal external error %s", rec.ExternalError.Digest)
|
||||
}
|
||||
retErr := grpcerrors.FromGRPC(status.ErrorProto(&st))
|
||||
var errsources bytes.Buffer
|
||||
for _, s := range errdefs.Sources(retErr) {
|
||||
s.Print(&errsources)
|
||||
errsources.WriteString("\n")
|
||||
}
|
||||
out.Error.Sources = errsources.Bytes()
|
||||
var ve *errdefs.VertexError
|
||||
if errors.As(retErr, &ve) {
|
||||
dgst, err := digest.Parse(ve.Vertex.Digest)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to parse vertex digest %s", ve.Vertex.Digest)
|
||||
}
|
||||
name, logs, err := loadVertexLogs(ctx, c, rec.Ref, dgst, 16)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to load vertex logs %s", dgst)
|
||||
}
|
||||
out.Error.Name = name
|
||||
out.Error.Logs = logs
|
||||
}
|
||||
out.Error.Stack = []byte(fmt.Sprintf("%+v", stack.Formatter(retErr)))
|
||||
}
|
||||
}
|
||||
|
||||
if out.StartedAt != nil {
|
||||
if out.CompletedAt != nil {
|
||||
out.Duration = out.CompletedAt.Sub(*out.StartedAt)
|
||||
} else {
|
||||
out.Duration = rec.currentTimestamp.Sub(*out.StartedAt)
|
||||
}
|
||||
}
|
||||
|
||||
out.NumCompletedSteps = rec.NumCompletedSteps
|
||||
out.NumTotalSteps = rec.NumTotalSteps
|
||||
out.NumCachedSteps = rec.NumCachedSteps
|
||||
|
||||
out.BuildArgs = readKeyValues(attrs, "build-arg:")
|
||||
out.Labels = readKeyValues(attrs, "label:")
|
||||
|
||||
readAttr(attrs, "force-network-mode", &out.Config.Network, nil)
|
||||
readAttr(attrs, "hostname", &out.Config.Hostname, nil)
|
||||
readAttr(attrs, "cgroup-parent", &out.Config.CgroupParent, nil)
|
||||
readAttr(attrs, "image-resolve-mode", &out.Config.ImageResolveMode, nil)
|
||||
readAttr(attrs, "build-arg:BUILDKIT_MULTI_PLATFORM", &out.Config.MultiPlatform, func(v string) (bool, bool) {
|
||||
return tryParseValue(v, &out.Errors, strconv.ParseBool)
|
||||
})
|
||||
readAttr(attrs, "multi-platform", &out.Config.MultiPlatform, func(v string) (bool, bool) {
|
||||
return tryParseValue(v, &out.Errors, strconv.ParseBool)
|
||||
})
|
||||
readAttr(attrs, "no-cache", &out.Config.NoCache, func(v string) (bool, bool) {
|
||||
if v == "" {
|
||||
return true, true
|
||||
}
|
||||
return false, false
|
||||
})
|
||||
readAttr(attrs, "no-cache", &out.Config.NoCacheFilter, func(v string) ([]string, bool) {
|
||||
if v == "" {
|
||||
return nil, false
|
||||
}
|
||||
return strings.Split(v, ","), true
|
||||
})
|
||||
|
||||
readAttr(attrs, "add-hosts", &out.Config.ExtraHosts, func(v string) ([]string, bool) {
|
||||
return tryParseValue(v, &out.Errors, func(v string) ([]string, error) {
|
||||
fields, err := csvvalue.Fields(v, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return fields, nil
|
||||
})
|
||||
})
|
||||
|
||||
readAttr(attrs, "shm-size", &out.Config.ShmSize, nil)
|
||||
readAttr(attrs, "ulimit", &out.Config.Ulimit, nil)
|
||||
readAttr(attrs, "build-arg:BUILDKIT_CACHE_MOUNT_NS", &out.Config.CacheMountNS, nil)
|
||||
readAttr(attrs, "build-arg:BUILDKIT_DOCKERFILE_CHECK", &out.Config.DockerfileCheckConfig, nil)
|
||||
readAttr(attrs, "build-arg:SOURCE_DATE_EPOCH", &out.Config.SourceDateEpoch, nil)
|
||||
readAttr(attrs, "build-arg:SANDBOX_HOSTNAME", &out.Config.SandboxHostname, nil)
|
||||
|
||||
var unusedAttrs []keyValueOutput
|
||||
for k := range attrs {
|
||||
if strings.HasPrefix(k, "vcs:") || strings.HasPrefix(k, "build-arg:") || strings.HasPrefix(k, "label:") || strings.HasPrefix(k, "context:") || strings.HasPrefix(k, "attest:") {
|
||||
continue
|
||||
}
|
||||
unusedAttrs = append(unusedAttrs, keyValueOutput{
|
||||
Name: k,
|
||||
Value: attrs[k],
|
||||
})
|
||||
}
|
||||
slices.SortFunc(unusedAttrs, func(a, b keyValueOutput) int {
|
||||
return cmp.Compare(a.Name, b.Name)
|
||||
})
|
||||
out.Config.RestRaw = unusedAttrs
|
||||
|
||||
attachments, err := allAttachments(ctx, store, *rec)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
provIndex := slices.IndexFunc(attachments, func(a attachment) bool {
|
||||
return descrType(a.descr) == slsa02.PredicateSLSAProvenance
|
||||
})
|
||||
if provIndex != -1 {
|
||||
prov := attachments[provIndex]
|
||||
dt, err := content.ReadBlob(ctx, store, prov.descr)
|
||||
if err != nil {
|
||||
return errors.Errorf("failed to read provenance %s: %v", prov.descr.Digest, err)
|
||||
}
|
||||
var pred provenancetypes.ProvenancePredicate
|
||||
if err := json.Unmarshal(dt, &pred); err != nil {
|
||||
return errors.Errorf("failed to unmarshal provenance %s: %v", prov.descr.Digest, err)
|
||||
}
|
||||
for _, m := range pred.Materials {
|
||||
out.Materials = append(out.Materials, materialOutput{
|
||||
URI: m.URI,
|
||||
Digests: digestSetToDigests(m.Digest),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
if len(attachments) > 0 {
|
||||
for _, a := range attachments {
|
||||
p := ""
|
||||
if a.platform != nil {
|
||||
p = platforms.FormatAll(*a.platform)
|
||||
}
|
||||
out.Attachments = append(out.Attachments, attachmentOutput{
|
||||
Digest: a.descr.Digest.String(),
|
||||
Platform: p,
|
||||
Type: descrType(a.descr),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
if opts.format == formatter.JSONFormatKey {
|
||||
enc := json.NewEncoder(dockerCli.Out())
|
||||
enc.SetIndent("", " ")
|
||||
return enc.Encode(out)
|
||||
} else if opts.format != formatter.PrettyFormatKey {
|
||||
tmpl, err := template.New("inspect").Parse(opts.format)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to parse format template")
|
||||
}
|
||||
var buf bytes.Buffer
|
||||
if err := tmpl.Execute(&buf, out); err != nil {
|
||||
return errors.Wrapf(err, "failed to execute format template")
|
||||
}
|
||||
fmt.Fprintln(dockerCli.Out(), buf.String())
|
||||
return nil
|
||||
}
|
||||
|
||||
tw := tabwriter.NewWriter(dockerCli.Out(), 1, 8, 1, '\t', 0)
|
||||
|
||||
if out.Name != "" {
|
||||
fmt.Fprintf(tw, "Name:\t%s\n", out.Name)
|
||||
}
|
||||
if opts.ref == "" && out.Ref != "" {
|
||||
fmt.Fprintf(tw, "Ref:\t%s\n", out.Ref)
|
||||
}
|
||||
if out.Context != "" {
|
||||
fmt.Fprintf(tw, "Context:\t%s\n", out.Context)
|
||||
}
|
||||
if out.Dockerfile != "" {
|
||||
fmt.Fprintf(tw, "Dockerfile:\t%s\n", out.Dockerfile)
|
||||
}
|
||||
if out.VCSRepository != "" {
|
||||
fmt.Fprintf(tw, "VCS Repository:\t%s\n", out.VCSRepository)
|
||||
}
|
||||
if out.VCSRevision != "" {
|
||||
fmt.Fprintf(tw, "VCS Revision:\t%s\n", out.VCSRevision)
|
||||
}
|
||||
|
||||
if out.Target != "" {
|
||||
fmt.Fprintf(tw, "Target:\t%s\n", out.Target)
|
||||
}
|
||||
|
||||
if len(out.Platform) > 0 {
|
||||
fmt.Fprintf(tw, "Platforms:\t%s\n", strings.Join(out.Platform, ", "))
|
||||
}
|
||||
|
||||
if out.KeepGitDir {
|
||||
fmt.Fprintf(tw, "Keep Git Dir:\t%s\n", strconv.FormatBool(out.KeepGitDir))
|
||||
}
|
||||
|
||||
tw.Flush()
|
||||
|
||||
fmt.Fprintln(dockerCli.Out())
|
||||
|
||||
printTable(dockerCli.Out(), out.NamedContexts, "Named Context")
|
||||
|
||||
tw = tabwriter.NewWriter(dockerCli.Out(), 1, 8, 1, '\t', 0)
|
||||
|
||||
fmt.Fprintf(tw, "Started:\t%s\n", out.StartedAt.Format("2006-01-02 15:04:05"))
|
||||
var statusStr string
|
||||
if out.Status == statusRunning {
|
||||
statusStr = " (running)"
|
||||
}
|
||||
fmt.Fprintf(tw, "Duration:\t%s%s\n", formatDuration(out.Duration), statusStr)
|
||||
|
||||
if out.Status == statusError {
|
||||
fmt.Fprintf(tw, "Error:\t%s %s\n", codes.Code(rec.Error.Code).String(), rec.Error.Message)
|
||||
} else if out.Status == statusCanceled {
|
||||
fmt.Fprintf(tw, "Status:\tCanceled\n")
|
||||
}
|
||||
|
||||
fmt.Fprintf(tw, "Build Steps:\t%d/%d (%.0f%% cached)\n", out.NumCompletedSteps, out.NumTotalSteps, float64(out.NumCachedSteps)/float64(out.NumTotalSteps)*100)
|
||||
tw.Flush()
|
||||
|
||||
fmt.Fprintln(dockerCli.Out())
|
||||
|
||||
tw = tabwriter.NewWriter(dockerCli.Out(), 1, 8, 1, '\t', 0)
|
||||
|
||||
if out.Config.Network != "" {
|
||||
fmt.Fprintf(tw, "Network:\t%s\n", out.Config.Network)
|
||||
}
|
||||
if out.Config.Hostname != "" {
|
||||
fmt.Fprintf(tw, "Hostname:\t%s\n", out.Config.Hostname)
|
||||
}
|
||||
if len(out.Config.ExtraHosts) > 0 {
|
||||
fmt.Fprintf(tw, "Extra Hosts:\t%s\n", strings.Join(out.Config.ExtraHosts, ", "))
|
||||
}
|
||||
if out.Config.CgroupParent != "" {
|
||||
fmt.Fprintf(tw, "Cgroup Parent:\t%s\n", out.Config.CgroupParent)
|
||||
}
|
||||
if out.Config.ImageResolveMode != "" {
|
||||
fmt.Fprintf(tw, "Image Resolve Mode:\t%s\n", out.Config.ImageResolveMode)
|
||||
}
|
||||
if out.Config.MultiPlatform {
|
||||
fmt.Fprintf(tw, "Multi-Platform:\t%s\n", strconv.FormatBool(out.Config.MultiPlatform))
|
||||
}
|
||||
if out.Config.NoCache {
|
||||
fmt.Fprintf(tw, "No Cache:\t%s\n", strconv.FormatBool(out.Config.NoCache))
|
||||
}
|
||||
if len(out.Config.NoCacheFilter) > 0 {
|
||||
fmt.Fprintf(tw, "No Cache Filter:\t%s\n", strings.Join(out.Config.NoCacheFilter, ", "))
|
||||
}
|
||||
|
||||
if out.Config.ShmSize != "" {
|
||||
fmt.Fprintf(tw, "Shm Size:\t%s\n", out.Config.ShmSize)
|
||||
}
|
||||
if out.Config.Ulimit != "" {
|
||||
fmt.Fprintf(tw, "Resource Limits:\t%s\n", out.Config.Ulimit)
|
||||
}
|
||||
if out.Config.CacheMountNS != "" {
|
||||
fmt.Fprintf(tw, "Cache Mount Namespace:\t%s\n", out.Config.CacheMountNS)
|
||||
}
|
||||
if out.Config.DockerfileCheckConfig != "" {
|
||||
fmt.Fprintf(tw, "Dockerfile Check Config:\t%s\n", out.Config.DockerfileCheckConfig)
|
||||
}
|
||||
if out.Config.SourceDateEpoch != "" {
|
||||
fmt.Fprintf(tw, "Source Date Epoch:\t%s\n", out.Config.SourceDateEpoch)
|
||||
}
|
||||
if out.Config.SandboxHostname != "" {
|
||||
fmt.Fprintf(tw, "Sandbox Hostname:\t%s\n", out.Config.SandboxHostname)
|
||||
}
|
||||
|
||||
for _, kv := range out.Config.RestRaw {
|
||||
fmt.Fprintf(tw, "%s:\t%s\n", kv.Name, kv.Value)
|
||||
}
|
||||
|
||||
tw.Flush()
|
||||
|
||||
fmt.Fprintln(dockerCli.Out())
|
||||
|
||||
printTable(dockerCli.Out(), out.BuildArgs, "Build Arg")
|
||||
printTable(dockerCli.Out(), out.Labels, "Label")
|
||||
|
||||
if len(out.Materials) > 0 {
|
||||
fmt.Fprintln(dockerCli.Out(), "Materials:")
|
||||
tw = tabwriter.NewWriter(dockerCli.Out(), 1, 8, 1, '\t', 0)
|
||||
fmt.Fprintf(tw, "URI\tDIGEST\n")
|
||||
for _, m := range out.Materials {
|
||||
fmt.Fprintf(tw, "%s\t%s\n", m.URI, strings.Join(m.Digests, ", "))
|
||||
}
|
||||
tw.Flush()
|
||||
fmt.Fprintln(dockerCli.Out())
|
||||
}
|
||||
|
||||
if len(out.Attachments) > 0 {
|
||||
fmt.Fprintf(tw, "Attachments:\n")
|
||||
tw = tabwriter.NewWriter(dockerCli.Out(), 1, 8, 1, '\t', 0)
|
||||
fmt.Fprintf(tw, "DIGEST\tPLATFORM\tTYPE\n")
|
||||
for _, a := range out.Attachments {
|
||||
fmt.Fprintf(tw, "%s\t%s\t%s\n", a.Digest, a.Platform, a.Type)
|
||||
}
|
||||
tw.Flush()
|
||||
fmt.Fprintln(dockerCli.Out())
|
||||
}
|
||||
|
||||
if out.Error != nil {
|
||||
if out.Error.Sources != nil {
|
||||
fmt.Fprint(dockerCli.Out(), string(out.Error.Sources))
|
||||
}
|
||||
if len(out.Error.Logs) > 0 {
|
||||
fmt.Fprintln(dockerCli.Out(), "Logs:")
|
||||
fmt.Fprintf(dockerCli.Out(), "> => %s:\n", out.Error.Name)
|
||||
for _, l := range out.Error.Logs {
|
||||
fmt.Fprintln(dockerCli.Out(), "> "+l)
|
||||
}
|
||||
fmt.Fprintln(dockerCli.Out())
|
||||
}
|
||||
if len(out.Error.Stack) > 0 {
|
||||
if debug.IsEnabled() {
|
||||
fmt.Fprintf(dockerCli.Out(), "\n%s\n", out.Error.Stack)
|
||||
} else {
|
||||
fmt.Fprintf(dockerCli.Out(), "Enable --debug to see stack traces for error\n")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Fprintf(dockerCli.Out(), "Print build logs: docker buildx history logs %s\n", rec.Ref)
|
||||
|
||||
fmt.Fprintf(dockerCli.Out(), "View build in Docker Desktop: %s\n", desktop.BuildURL(fmt.Sprintf("%s/%s/%s", rec.node.Builder, rec.node.Name, rec.Ref)))
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func inspectCmd(dockerCli command.Cli, rootOpts RootOptions) *cobra.Command {
|
||||
var options inspectOptions
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "inspect [OPTIONS] [REF]",
|
||||
Short: "Inspect a build",
|
||||
Args: cobra.MaximumNArgs(1),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
if len(args) > 0 {
|
||||
options.ref = args[0]
|
||||
}
|
||||
options.builder = *rootOpts.Builder
|
||||
return runInspect(cmd.Context(), dockerCli, options)
|
||||
},
|
||||
ValidArgsFunction: completion.Disable,
|
||||
}
|
||||
|
||||
cmd.AddCommand(
|
||||
attachmentCmd(dockerCli, rootOpts),
|
||||
)
|
||||
|
||||
flags := cmd.Flags()
|
||||
flags.StringVar(&options.format, "format", formatter.PrettyFormatKey, "Format the output")
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func loadVertexLogs(ctx context.Context, c *client.Client, ref string, dgst digest.Digest, limit int) (string, []string, error) {
|
||||
st, err := c.ControlClient().Status(ctx, &controlapi.StatusRequest{
|
||||
Ref: ref,
|
||||
})
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
var name string
|
||||
var logs []string
|
||||
lastState := map[int]int{}
|
||||
|
||||
loop0:
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
st.CloseSend()
|
||||
return "", nil, context.Cause(ctx)
|
||||
default:
|
||||
ev, err := st.Recv()
|
||||
if err != nil {
|
||||
if errors.Is(err, io.EOF) {
|
||||
break loop0
|
||||
}
|
||||
return "", nil, err
|
||||
}
|
||||
ss := client.NewSolveStatus(ev)
|
||||
for _, v := range ss.Vertexes {
|
||||
if v.Digest == dgst {
|
||||
name = v.Name
|
||||
break
|
||||
}
|
||||
}
|
||||
for _, l := range ss.Logs {
|
||||
if l.Vertex == dgst {
|
||||
parts := bytes.Split(l.Data, []byte("\n"))
|
||||
for i, p := range parts {
|
||||
var wrote bool
|
||||
if i == 0 {
|
||||
idx, ok := lastState[l.Stream]
|
||||
if ok && idx != -1 {
|
||||
logs[idx] = logs[idx] + string(p)
|
||||
wrote = true
|
||||
}
|
||||
}
|
||||
if !wrote {
|
||||
if len(p) > 0 {
|
||||
logs = append(logs, string(p))
|
||||
}
|
||||
lastState[l.Stream] = len(logs) - 1
|
||||
}
|
||||
if i == len(parts)-1 && len(p) == 0 {
|
||||
lastState[l.Stream] = -1
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if limit > 0 && len(logs) > limit {
|
||||
logs = logs[len(logs)-limit:]
|
||||
}
|
||||
|
||||
return name, logs, nil
|
||||
}
|
||||
|
||||
type attachment struct {
|
||||
platform *ocispecs.Platform
|
||||
descr ocispecs.Descriptor
|
||||
}
|
||||
|
||||
func allAttachments(ctx context.Context, store content.Store, rec historyRecord) ([]attachment, error) {
|
||||
var attachments []attachment
|
||||
|
||||
if rec.Result != nil {
|
||||
for _, a := range rec.Result.Attestations {
|
||||
attachments = append(attachments, attachment{
|
||||
descr: ociDesc(a),
|
||||
})
|
||||
}
|
||||
for _, r := range rec.Result.Results {
|
||||
attachments = append(attachments, walkAttachments(ctx, store, ociDesc(r), nil)...)
|
||||
}
|
||||
}
|
||||
|
||||
for key, ri := range rec.Results {
|
||||
p, err := platforms.Parse(key)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, a := range ri.Attestations {
|
||||
attachments = append(attachments, attachment{
|
||||
platform: &p,
|
||||
descr: ociDesc(a),
|
||||
})
|
||||
}
|
||||
for _, r := range ri.Results {
|
||||
attachments = append(attachments, walkAttachments(ctx, store, ociDesc(r), &p)...)
|
||||
}
|
||||
}
|
||||
|
||||
slices.SortFunc(attachments, func(a, b attachment) int {
|
||||
pCmp := 0
|
||||
if a.platform == nil && b.platform != nil {
|
||||
return -1
|
||||
} else if a.platform != nil && b.platform == nil {
|
||||
return 1
|
||||
} else if a.platform != nil && b.platform != nil {
|
||||
pCmp = cmp.Compare(platforms.FormatAll(*a.platform), platforms.FormatAll(*b.platform))
|
||||
}
|
||||
return cmp.Or(
|
||||
pCmp,
|
||||
cmp.Compare(descrType(a.descr), descrType(b.descr)),
|
||||
)
|
||||
})
|
||||
|
||||
return attachments, nil
|
||||
}
|
||||
|
||||
func walkAttachments(ctx context.Context, store content.Store, desc ocispecs.Descriptor, platform *ocispecs.Platform) []attachment {
|
||||
_, err := store.Info(ctx, desc.Digest)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
var out []attachment
|
||||
|
||||
if desc.Annotations["vnd.docker.reference.type"] != "attestation-manifest" {
|
||||
out = append(out, attachment{platform: platform, descr: desc})
|
||||
}
|
||||
|
||||
if desc.MediaType != ocispecs.MediaTypeImageIndex && desc.MediaType != images.MediaTypeDockerSchema2ManifestList {
|
||||
return out
|
||||
}
|
||||
|
||||
dt, err := content.ReadBlob(ctx, store, desc)
|
||||
if err != nil {
|
||||
return out
|
||||
}
|
||||
|
||||
var idx ocispecs.Index
|
||||
if err := json.Unmarshal(dt, &idx); err != nil {
|
||||
return out
|
||||
}
|
||||
|
||||
for _, d := range idx.Manifests {
|
||||
p := platform
|
||||
if d.Platform != nil {
|
||||
p = d.Platform
|
||||
}
|
||||
out = append(out, walkAttachments(ctx, store, d, p)...)
|
||||
}
|
||||
|
||||
return out
|
||||
}
|
||||
|
||||
func ociDesc(in *controlapi.Descriptor) ocispecs.Descriptor {
|
||||
return ocispecs.Descriptor{
|
||||
MediaType: in.MediaType,
|
||||
Digest: digest.Digest(in.Digest),
|
||||
Size: in.Size,
|
||||
Annotations: in.Annotations,
|
||||
}
|
||||
}
|
||||
func descrType(desc ocispecs.Descriptor) string {
|
||||
if typ, ok := desc.Annotations["in-toto.io/predicate-type"]; ok {
|
||||
return typ
|
||||
}
|
||||
return desc.MediaType
|
||||
}
|
||||
|
||||
func tryParseValue[T any](s string, errs *[]string, f func(string) (T, error)) (T, bool) {
|
||||
v, err := f(s)
|
||||
if err != nil {
|
||||
errStr := fmt.Sprintf("failed to parse %s: (%v)", s, err)
|
||||
*errs = append(*errs, errStr)
|
||||
}
|
||||
return v, true
|
||||
}
|
||||
|
||||
func printTable(w io.Writer, kvs []keyValueOutput, title string) {
|
||||
if len(kvs) == 0 {
|
||||
return
|
||||
}
|
||||
|
||||
tw := tabwriter.NewWriter(w, 1, 8, 1, '\t', 0)
|
||||
fmt.Fprintf(tw, "%s\tVALUE\n", strings.ToUpper(title))
|
||||
for _, k := range kvs {
|
||||
fmt.Fprintf(tw, "%s\t%s\n", k.Name, k.Value)
|
||||
}
|
||||
tw.Flush()
|
||||
fmt.Fprintln(w)
|
||||
}
|
||||
|
||||
func readKeyValues(attrs map[string]string, prefix string) []keyValueOutput {
|
||||
var out []keyValueOutput
|
||||
for k, v := range attrs {
|
||||
if strings.HasPrefix(k, prefix) {
|
||||
out = append(out, keyValueOutput{
|
||||
Name: strings.TrimPrefix(k, prefix),
|
||||
Value: v,
|
||||
})
|
||||
}
|
||||
}
|
||||
if len(out) == 0 {
|
||||
return nil
|
||||
}
|
||||
slices.SortFunc(out, func(a, b keyValueOutput) int {
|
||||
return cmp.Compare(a.Name, b.Name)
|
||||
})
|
||||
return out
|
||||
}
|
||||
|
||||
func digestSetToDigests(ds slsa.DigestSet) []string {
|
||||
var out []string
|
||||
for k, v := range ds {
|
||||
out = append(out, fmt.Sprintf("%s:%s", k, v))
|
||||
}
|
||||
return out
|
||||
}
|
152
commands/history/inspect_attachment.go
Normal file
152
commands/history/inspect_attachment.go
Normal file
@@ -0,0 +1,152 @@
|
||||
package history
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"slices"
|
||||
|
||||
"github.com/containerd/containerd/v2/core/content/proxy"
|
||||
"github.com/containerd/platforms"
|
||||
"github.com/docker/buildx/builder"
|
||||
"github.com/docker/buildx/util/cobrautil/completion"
|
||||
"github.com/docker/cli/cli/command"
|
||||
intoto "github.com/in-toto/in-toto-golang/in_toto"
|
||||
slsa02 "github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/v0.2"
|
||||
"github.com/opencontainers/go-digest"
|
||||
ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
type attachmentOptions struct {
|
||||
builder string
|
||||
typ string
|
||||
platform string
|
||||
ref string
|
||||
digest digest.Digest
|
||||
}
|
||||
|
||||
func runAttachment(ctx context.Context, dockerCli command.Cli, opts attachmentOptions) error {
|
||||
b, err := builder.New(dockerCli, builder.WithName(opts.builder))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
nodes, err := b.LoadNodes(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, node := range nodes {
|
||||
if node.Err != nil {
|
||||
return node.Err
|
||||
}
|
||||
}
|
||||
|
||||
recs, err := queryRecords(ctx, opts.ref, nodes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(recs) == 0 {
|
||||
if opts.ref == "" {
|
||||
return errors.New("no records found")
|
||||
}
|
||||
return errors.Errorf("no record found for ref %q", opts.ref)
|
||||
}
|
||||
|
||||
if opts.ref == "" {
|
||||
slices.SortFunc(recs, func(a, b historyRecord) int {
|
||||
return b.CreatedAt.AsTime().Compare(a.CreatedAt.AsTime())
|
||||
})
|
||||
}
|
||||
|
||||
rec := &recs[0]
|
||||
|
||||
c, err := rec.node.Driver.Client(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
store := proxy.NewContentStore(c.ContentClient())
|
||||
|
||||
if opts.digest != "" {
|
||||
ra, err := store.ReaderAt(ctx, ocispecs.Descriptor{Digest: opts.digest})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = io.Copy(dockerCli.Out(), io.NewSectionReader(ra, 0, ra.Size()))
|
||||
return err
|
||||
}
|
||||
|
||||
attachments, err := allAttachments(ctx, store, *rec)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
typ := opts.typ
|
||||
switch typ {
|
||||
case "index":
|
||||
typ = ocispecs.MediaTypeImageIndex
|
||||
case "manifest":
|
||||
typ = ocispecs.MediaTypeImageManifest
|
||||
case "image":
|
||||
typ = ocispecs.MediaTypeImageConfig
|
||||
case "provenance":
|
||||
typ = slsa02.PredicateSLSAProvenance
|
||||
case "sbom":
|
||||
typ = intoto.PredicateSPDX
|
||||
}
|
||||
|
||||
for _, a := range attachments {
|
||||
if opts.platform != "" && (a.platform == nil || platforms.FormatAll(*a.platform) != opts.platform) {
|
||||
continue
|
||||
}
|
||||
if typ != "" && descrType(a.descr) != typ {
|
||||
continue
|
||||
}
|
||||
ra, err := store.ReaderAt(ctx, a.descr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = io.Copy(dockerCli.Out(), io.NewSectionReader(ra, 0, ra.Size()))
|
||||
return err
|
||||
}
|
||||
|
||||
return errors.Errorf("no matching attachment found for ref %q", opts.ref)
|
||||
}
|
||||
|
||||
func attachmentCmd(dockerCli command.Cli, rootOpts RootOptions) *cobra.Command {
|
||||
var options attachmentOptions
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "attachment [OPTIONS] REF [DIGEST]",
|
||||
Short: "Inspect a build attachment",
|
||||
Args: cobra.RangeArgs(1, 2),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
if len(args) > 0 {
|
||||
options.ref = args[0]
|
||||
}
|
||||
if len(args) > 1 {
|
||||
dgst, err := digest.Parse(args[1])
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "invalid digest %q", args[1])
|
||||
}
|
||||
options.digest = dgst
|
||||
}
|
||||
|
||||
if options.digest == "" && options.platform == "" && options.typ == "" {
|
||||
return errors.New("at least one of --type, --platform or DIGEST must be specified")
|
||||
}
|
||||
|
||||
options.builder = *rootOpts.Builder
|
||||
return runAttachment(cmd.Context(), dockerCli, options)
|
||||
},
|
||||
ValidArgsFunction: completion.Disable,
|
||||
}
|
||||
|
||||
flags := cmd.Flags()
|
||||
flags.StringVar(&options.typ, "type", "", "Type of attachment")
|
||||
flags.StringVar(&options.platform, "platform", "", "Platform of attachment")
|
||||
|
||||
return cmd
|
||||
}
|
124
commands/history/logs.go
Normal file
124
commands/history/logs.go
Normal file
@@ -0,0 +1,124 @@
|
||||
package history
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"os"
|
||||
"slices"
|
||||
|
||||
"github.com/docker/buildx/builder"
|
||||
"github.com/docker/buildx/util/cobrautil/completion"
|
||||
"github.com/docker/buildx/util/progress"
|
||||
"github.com/docker/cli/cli/command"
|
||||
controlapi "github.com/moby/buildkit/api/services/control"
|
||||
"github.com/moby/buildkit/client"
|
||||
"github.com/moby/buildkit/util/progress/progressui"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
type logsOptions struct {
|
||||
builder string
|
||||
ref string
|
||||
progress string
|
||||
}
|
||||
|
||||
func runLogs(ctx context.Context, dockerCli command.Cli, opts logsOptions) error {
|
||||
b, err := builder.New(dockerCli, builder.WithName(opts.builder))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
nodes, err := b.LoadNodes(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, node := range nodes {
|
||||
if node.Err != nil {
|
||||
return node.Err
|
||||
}
|
||||
}
|
||||
|
||||
recs, err := queryRecords(ctx, opts.ref, nodes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(recs) == 0 {
|
||||
if opts.ref == "" {
|
||||
return errors.New("no records found")
|
||||
}
|
||||
return errors.Errorf("no record found for ref %q", opts.ref)
|
||||
}
|
||||
|
||||
if opts.ref == "" {
|
||||
slices.SortFunc(recs, func(a, b historyRecord) int {
|
||||
return b.CreatedAt.AsTime().Compare(a.CreatedAt.AsTime())
|
||||
})
|
||||
}
|
||||
|
||||
rec := &recs[0]
|
||||
c, err := rec.node.Driver.Client(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
cl, err := c.ControlClient().Status(ctx, &controlapi.StatusRequest{
|
||||
Ref: rec.Ref,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var mode progressui.DisplayMode = progressui.DisplayMode(opts.progress)
|
||||
if mode == progressui.AutoMode {
|
||||
mode = progressui.PlainMode
|
||||
}
|
||||
printer, err := progress.NewPrinter(context.TODO(), os.Stderr, mode)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
loop0:
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
cl.CloseSend()
|
||||
return context.Cause(ctx)
|
||||
default:
|
||||
ev, err := cl.Recv()
|
||||
if err != nil {
|
||||
if errors.Is(err, io.EOF) {
|
||||
break loop0
|
||||
}
|
||||
return err
|
||||
}
|
||||
printer.Write(client.NewSolveStatus(ev))
|
||||
}
|
||||
}
|
||||
|
||||
return printer.Wait()
|
||||
}
|
||||
|
||||
func logsCmd(dockerCli command.Cli, rootOpts RootOptions) *cobra.Command {
|
||||
var options logsOptions
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "logs [OPTIONS] [REF]",
|
||||
Short: "Print the logs of a build",
|
||||
Args: cobra.MaximumNArgs(1),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
if len(args) > 0 {
|
||||
options.ref = args[0]
|
||||
}
|
||||
options.builder = *rootOpts.Builder
|
||||
return runLogs(cmd.Context(), dockerCli, options)
|
||||
},
|
||||
ValidArgsFunction: completion.Disable,
|
||||
}
|
||||
|
||||
flags := cmd.Flags()
|
||||
flags.StringVar(&options.progress, "progress", "plain", "Set type of progress output (plain, rawjson, tty)")
|
||||
|
||||
return cmd
|
||||
}
|
234
commands/history/ls.go
Normal file
234
commands/history/ls.go
Normal file
@@ -0,0 +1,234 @@
|
||||
package history
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"slices"
|
||||
"time"
|
||||
|
||||
"github.com/containerd/console"
|
||||
"github.com/docker/buildx/builder"
|
||||
"github.com/docker/buildx/localstate"
|
||||
"github.com/docker/buildx/util/cobrautil/completion"
|
||||
"github.com/docker/buildx/util/confutil"
|
||||
"github.com/docker/buildx/util/desktop"
|
||||
"github.com/docker/cli/cli"
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/cli/cli/command/formatter"
|
||||
"github.com/docker/go-units"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
const (
|
||||
lsHeaderBuildID = "BUILD ID"
|
||||
lsHeaderName = "NAME"
|
||||
lsHeaderStatus = "STATUS"
|
||||
lsHeaderCreated = "CREATED AT"
|
||||
lsHeaderDuration = "DURATION"
|
||||
lsHeaderLink = ""
|
||||
|
||||
lsDefaultTableFormat = "table {{.Ref}}\t{{.Name}}\t{{.Status}}\t{{.CreatedAt}}\t{{.Duration}}\t{{.Link}}"
|
||||
|
||||
headerKeyTimestamp = "buildkit-current-timestamp"
|
||||
)
|
||||
|
||||
type lsOptions struct {
|
||||
builder string
|
||||
format string
|
||||
noTrunc bool
|
||||
}
|
||||
|
||||
func runLs(ctx context.Context, dockerCli command.Cli, opts lsOptions) error {
|
||||
b, err := builder.New(dockerCli, builder.WithName(opts.builder))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
nodes, err := b.LoadNodes(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, node := range nodes {
|
||||
if node.Err != nil {
|
||||
return node.Err
|
||||
}
|
||||
}
|
||||
|
||||
out, err := queryRecords(ctx, "", nodes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
ls, err := localstate.New(confutil.NewConfig(dockerCli))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for i, rec := range out {
|
||||
st, _ := ls.ReadRef(rec.node.Builder, rec.node.Name, rec.Ref)
|
||||
rec.name = buildName(rec.FrontendAttrs, st)
|
||||
out[i] = rec
|
||||
}
|
||||
|
||||
return lsPrint(dockerCli, out, opts)
|
||||
}
|
||||
|
||||
func lsCmd(dockerCli command.Cli, rootOpts RootOptions) *cobra.Command {
|
||||
var options lsOptions
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "ls",
|
||||
Short: "List build records",
|
||||
Args: cli.NoArgs,
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
options.builder = *rootOpts.Builder
|
||||
return runLs(cmd.Context(), dockerCli, options)
|
||||
},
|
||||
ValidArgsFunction: completion.Disable,
|
||||
}
|
||||
|
||||
flags := cmd.Flags()
|
||||
flags.StringVar(&options.format, "format", formatter.TableFormatKey, "Format the output")
|
||||
flags.BoolVar(&options.noTrunc, "no-trunc", false, "Don't truncate output")
|
||||
|
||||
return cmd
|
||||
}
|
||||
|
||||
func lsPrint(dockerCli command.Cli, records []historyRecord, in lsOptions) error {
|
||||
if in.format == formatter.TableFormatKey {
|
||||
in.format = lsDefaultTableFormat
|
||||
}
|
||||
|
||||
ctx := formatter.Context{
|
||||
Output: dockerCli.Out(),
|
||||
Format: formatter.Format(in.format),
|
||||
Trunc: !in.noTrunc,
|
||||
}
|
||||
|
||||
slices.SortFunc(records, func(a, b historyRecord) int {
|
||||
if a.CompletedAt == nil && b.CompletedAt != nil {
|
||||
return -1
|
||||
}
|
||||
if a.CompletedAt != nil && b.CompletedAt == nil {
|
||||
return 1
|
||||
}
|
||||
return b.CreatedAt.AsTime().Compare(a.CreatedAt.AsTime())
|
||||
})
|
||||
|
||||
var term bool
|
||||
if _, err := console.ConsoleFromFile(os.Stdout); err == nil {
|
||||
term = true
|
||||
}
|
||||
render := func(format func(subContext formatter.SubContext) error) error {
|
||||
for _, r := range records {
|
||||
if err := format(&lsContext{
|
||||
format: formatter.Format(in.format),
|
||||
isTerm: term,
|
||||
trunc: !in.noTrunc,
|
||||
record: &r,
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
lsCtx := lsContext{
|
||||
isTerm: term,
|
||||
trunc: !in.noTrunc,
|
||||
}
|
||||
lsCtx.Header = formatter.SubHeaderContext{
|
||||
"Ref": lsHeaderBuildID,
|
||||
"Name": lsHeaderName,
|
||||
"Status": lsHeaderStatus,
|
||||
"CreatedAt": lsHeaderCreated,
|
||||
"Duration": lsHeaderDuration,
|
||||
"Link": lsHeaderLink,
|
||||
}
|
||||
|
||||
return ctx.Write(&lsCtx, render)
|
||||
}
|
||||
|
||||
type lsContext struct {
|
||||
formatter.HeaderContext
|
||||
|
||||
isTerm bool
|
||||
trunc bool
|
||||
format formatter.Format
|
||||
record *historyRecord
|
||||
}
|
||||
|
||||
func (c *lsContext) MarshalJSON() ([]byte, error) {
|
||||
m := map[string]interface{}{
|
||||
"ref": c.FullRef(),
|
||||
"name": c.Name(),
|
||||
"status": c.Status(),
|
||||
"created_at": c.record.CreatedAt.AsTime().Format(time.RFC3339Nano),
|
||||
"total_steps": c.record.NumTotalSteps,
|
||||
"completed_steps": c.record.NumCompletedSteps,
|
||||
"cached_steps": c.record.NumCachedSteps,
|
||||
}
|
||||
if c.record.CompletedAt != nil {
|
||||
m["completed_at"] = c.record.CompletedAt.AsTime().Format(time.RFC3339Nano)
|
||||
}
|
||||
return json.Marshal(m)
|
||||
}
|
||||
|
||||
func (c *lsContext) Ref() string {
|
||||
return c.record.Ref
|
||||
}
|
||||
|
||||
func (c *lsContext) FullRef() string {
|
||||
return fmt.Sprintf("%s/%s/%s", c.record.node.Builder, c.record.node.Name, c.record.Ref)
|
||||
}
|
||||
|
||||
func (c *lsContext) Name() string {
|
||||
name := c.record.name
|
||||
if c.trunc && c.format.IsTable() {
|
||||
return trimBeginning(name, 36)
|
||||
}
|
||||
return name
|
||||
}
|
||||
|
||||
func (c *lsContext) Status() string {
|
||||
if c.record.CompletedAt != nil {
|
||||
if c.record.Error != nil {
|
||||
return "Error"
|
||||
}
|
||||
return "Completed"
|
||||
}
|
||||
return "Running"
|
||||
}
|
||||
|
||||
func (c *lsContext) CreatedAt() string {
|
||||
return units.HumanDuration(time.Since(c.record.CreatedAt.AsTime())) + " ago"
|
||||
}
|
||||
|
||||
func (c *lsContext) Duration() string {
|
||||
lastTime := c.record.currentTimestamp
|
||||
if c.record.CompletedAt != nil {
|
||||
tm := c.record.CompletedAt.AsTime()
|
||||
lastTime = &tm
|
||||
}
|
||||
if lastTime == nil {
|
||||
return ""
|
||||
}
|
||||
v := formatDuration(lastTime.Sub(c.record.CreatedAt.AsTime()))
|
||||
if c.record.CompletedAt == nil {
|
||||
v += "+"
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
func (c *lsContext) Link() string {
|
||||
url := desktop.BuildURL(c.FullRef())
|
||||
if c.format.IsTable() {
|
||||
if c.isTerm {
|
||||
return desktop.ANSIHyperlink(url, "Open")
|
||||
}
|
||||
return ""
|
||||
}
|
||||
return url
|
||||
}
|
80
commands/history/open.go
Normal file
80
commands/history/open.go
Normal file
@@ -0,0 +1,80 @@
|
||||
package history
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"slices"
|
||||
|
||||
"github.com/docker/buildx/builder"
|
||||
"github.com/docker/buildx/util/cobrautil/completion"
|
||||
"github.com/docker/buildx/util/desktop"
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/pkg/browser"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
type openOptions struct {
|
||||
builder string
|
||||
ref string
|
||||
}
|
||||
|
||||
func runOpen(ctx context.Context, dockerCli command.Cli, opts openOptions) error {
|
||||
b, err := builder.New(dockerCli, builder.WithName(opts.builder))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
nodes, err := b.LoadNodes(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, node := range nodes {
|
||||
if node.Err != nil {
|
||||
return node.Err
|
||||
}
|
||||
}
|
||||
|
||||
recs, err := queryRecords(ctx, opts.ref, nodes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(recs) == 0 {
|
||||
if opts.ref == "" {
|
||||
return errors.New("no records found")
|
||||
}
|
||||
return errors.Errorf("no record found for ref %q", opts.ref)
|
||||
}
|
||||
|
||||
if opts.ref == "" {
|
||||
slices.SortFunc(recs, func(a, b historyRecord) int {
|
||||
return b.CreatedAt.AsTime().Compare(a.CreatedAt.AsTime())
|
||||
})
|
||||
}
|
||||
|
||||
rec := &recs[0]
|
||||
|
||||
url := desktop.BuildURL(fmt.Sprintf("%s/%s/%s", rec.node.Builder, rec.node.Name, rec.Ref))
|
||||
return browser.OpenURL(url)
|
||||
}
|
||||
|
||||
func openCmd(dockerCli command.Cli, rootOpts RootOptions) *cobra.Command {
|
||||
var options openOptions
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "open [OPTIONS] [REF]",
|
||||
Short: "Open a build in Docker Desktop",
|
||||
Args: cobra.MaximumNArgs(1),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
if len(args) > 0 {
|
||||
options.ref = args[0]
|
||||
}
|
||||
options.builder = *rootOpts.Builder
|
||||
return runOpen(cmd.Context(), dockerCli, options)
|
||||
},
|
||||
ValidArgsFunction: completion.Disable,
|
||||
}
|
||||
|
||||
return cmd
|
||||
}
|
151
commands/history/rm.go
Normal file
151
commands/history/rm.go
Normal file
@@ -0,0 +1,151 @@
|
||||
package history
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
|
||||
"github.com/docker/buildx/builder"
|
||||
"github.com/docker/buildx/util/cobrautil/completion"
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/hashicorp/go-multierror"
|
||||
controlapi "github.com/moby/buildkit/api/services/control"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
type rmOptions struct {
|
||||
builder string
|
||||
refs []string
|
||||
all bool
|
||||
}
|
||||
|
||||
func runRm(ctx context.Context, dockerCli command.Cli, opts rmOptions) error {
|
||||
b, err := builder.New(dockerCli, builder.WithName(opts.builder))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
nodes, err := b.LoadNodes(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, node := range nodes {
|
||||
if node.Err != nil {
|
||||
return node.Err
|
||||
}
|
||||
}
|
||||
|
||||
errs := make([][]error, len(opts.refs))
|
||||
for i := range errs {
|
||||
errs[i] = make([]error, len(nodes))
|
||||
}
|
||||
|
||||
eg, ctx := errgroup.WithContext(ctx)
|
||||
for i, node := range nodes {
|
||||
node := node
|
||||
eg.Go(func() error {
|
||||
if node.Driver == nil {
|
||||
return nil
|
||||
}
|
||||
c, err := node.Driver.Client(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
refs := opts.refs
|
||||
|
||||
if opts.all {
|
||||
serv, err := c.ControlClient().ListenBuildHistory(ctx, &controlapi.BuildHistoryRequest{
|
||||
EarlyExit: true,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer serv.CloseSend()
|
||||
|
||||
for {
|
||||
resp, err := serv.Recv()
|
||||
if err != nil {
|
||||
if errors.Is(err, io.EOF) {
|
||||
break
|
||||
}
|
||||
return err
|
||||
}
|
||||
if resp.Type == controlapi.BuildHistoryEventType_COMPLETE {
|
||||
refs = append(refs, resp.Record.Ref)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for j, ref := range refs {
|
||||
_, err = c.ControlClient().UpdateBuildHistory(ctx, &controlapi.UpdateBuildHistoryRequest{
|
||||
Ref: ref,
|
||||
Delete: true,
|
||||
})
|
||||
if opts.all {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
errs[j][i] = err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
if err := eg.Wait(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var out []error
|
||||
loop0:
|
||||
for _, nodeErrs := range errs {
|
||||
var nodeErr error
|
||||
for _, err1 := range nodeErrs {
|
||||
if err1 == nil {
|
||||
continue loop0
|
||||
}
|
||||
if nodeErr == nil {
|
||||
nodeErr = err1
|
||||
} else {
|
||||
nodeErr = multierror.Append(nodeErr, err1)
|
||||
}
|
||||
}
|
||||
out = append(out, nodeErr)
|
||||
}
|
||||
if len(out) == 0 {
|
||||
return nil
|
||||
}
|
||||
if len(out) == 1 {
|
||||
return out[0]
|
||||
}
|
||||
return multierror.Append(out[0], out[1:]...)
|
||||
}
|
||||
|
||||
func rmCmd(dockerCli command.Cli, rootOpts RootOptions) *cobra.Command {
|
||||
var options rmOptions
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "rm [OPTIONS] [REF...]",
|
||||
Short: "Remove build records",
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
if len(args) == 0 && !options.all {
|
||||
return errors.New("rm requires at least one argument")
|
||||
}
|
||||
if len(args) > 0 && options.all {
|
||||
return errors.New("rm requires either --all or at least one argument")
|
||||
}
|
||||
options.refs = args
|
||||
options.builder = *rootOpts.Builder
|
||||
return runRm(cmd.Context(), dockerCli, options)
|
||||
},
|
||||
ValidArgsFunction: completion.Disable,
|
||||
}
|
||||
|
||||
flags := cmd.Flags()
|
||||
flags.BoolVar(&options.all, "all", false, "Remove all build records")
|
||||
|
||||
return cmd
|
||||
}
|
31
commands/history/root.go
Normal file
31
commands/history/root.go
Normal file
@@ -0,0 +1,31 @@
|
||||
package history
|
||||
|
||||
import (
|
||||
"github.com/docker/buildx/util/cobrautil/completion"
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
|
||||
type RootOptions struct {
|
||||
Builder *string
|
||||
}
|
||||
|
||||
func RootCmd(rootcmd *cobra.Command, dockerCli command.Cli, opts RootOptions) *cobra.Command {
|
||||
cmd := &cobra.Command{
|
||||
Use: "history",
|
||||
Short: "Commands to work on build records",
|
||||
ValidArgsFunction: completion.Disable,
|
||||
RunE: rootcmd.RunE,
|
||||
}
|
||||
|
||||
cmd.AddCommand(
|
||||
lsCmd(dockerCli, opts),
|
||||
rmCmd(dockerCli, opts),
|
||||
logsCmd(dockerCli, opts),
|
||||
inspectCmd(dockerCli, opts),
|
||||
openCmd(dockerCli, opts),
|
||||
traceCmd(dockerCli, opts),
|
||||
)
|
||||
|
||||
return cmd
|
||||
}
|
260
commands/history/trace.go
Normal file
260
commands/history/trace.go
Normal file
@@ -0,0 +1,260 @@
|
||||
package history
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"os"
|
||||
"slices"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/containerd/console"
|
||||
"github.com/containerd/containerd/v2/core/content/proxy"
|
||||
"github.com/docker/buildx/builder"
|
||||
"github.com/docker/buildx/util/cobrautil/completion"
|
||||
"github.com/docker/buildx/util/otelutil"
|
||||
"github.com/docker/buildx/util/otelutil/jaeger"
|
||||
"github.com/docker/cli/cli/command"
|
||||
controlapi "github.com/moby/buildkit/api/services/control"
|
||||
"github.com/opencontainers/go-digest"
|
||||
ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/browser"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
jaegerui "github.com/tonistiigi/jaeger-ui-rest"
|
||||
)
|
||||
|
||||
type traceOptions struct {
|
||||
builder string
|
||||
ref string
|
||||
addr string
|
||||
compare string
|
||||
}
|
||||
|
||||
func loadTrace(ctx context.Context, ref string, nodes []builder.Node) (string, []byte, error) {
|
||||
var offset *int
|
||||
if strings.HasPrefix(ref, "^") {
|
||||
off, err := strconv.Atoi(ref[1:])
|
||||
if err != nil {
|
||||
return "", nil, errors.Wrapf(err, "invalid offset %q", ref)
|
||||
}
|
||||
offset = &off
|
||||
ref = ""
|
||||
}
|
||||
|
||||
recs, err := queryRecords(ctx, ref, nodes)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
var rec *historyRecord
|
||||
|
||||
if ref == "" {
|
||||
slices.SortFunc(recs, func(a, b historyRecord) int {
|
||||
return b.CreatedAt.AsTime().Compare(a.CreatedAt.AsTime())
|
||||
})
|
||||
for _, r := range recs {
|
||||
if r.CompletedAt != nil {
|
||||
if offset != nil {
|
||||
if *offset > 0 {
|
||||
*offset--
|
||||
continue
|
||||
}
|
||||
}
|
||||
rec = &r
|
||||
break
|
||||
}
|
||||
}
|
||||
if offset != nil && *offset > 0 {
|
||||
return "", nil, errors.Errorf("no completed build found with offset %d", *offset)
|
||||
}
|
||||
} else {
|
||||
rec = &recs[0]
|
||||
}
|
||||
if rec == nil {
|
||||
if ref == "" {
|
||||
return "", nil, errors.New("no records found")
|
||||
}
|
||||
return "", nil, errors.Errorf("no record found for ref %q", ref)
|
||||
}
|
||||
|
||||
if rec.CompletedAt == nil {
|
||||
return "", nil, errors.Errorf("build %q is not completed, only completed builds can be traced", rec.Ref)
|
||||
}
|
||||
|
||||
if rec.Trace == nil {
|
||||
// build is complete but no trace yet. try to finalize the trace
|
||||
time.Sleep(1 * time.Second) // give some extra time for last parts of trace to be written
|
||||
|
||||
c, err := rec.node.Driver.Client(ctx)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
_, err = c.ControlClient().UpdateBuildHistory(ctx, &controlapi.UpdateBuildHistoryRequest{
|
||||
Ref: rec.Ref,
|
||||
Finalize: true,
|
||||
})
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
recs, err := queryRecords(ctx, rec.Ref, []builder.Node{*rec.node})
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
if len(recs) == 0 {
|
||||
return "", nil, errors.Errorf("build record %q was deleted", rec.Ref)
|
||||
}
|
||||
|
||||
rec = &recs[0]
|
||||
if rec.Trace == nil {
|
||||
return "", nil, errors.Errorf("build record %q is missing a trace", rec.Ref)
|
||||
}
|
||||
}
|
||||
|
||||
c, err := rec.node.Driver.Client(ctx)
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
store := proxy.NewContentStore(c.ContentClient())
|
||||
|
||||
ra, err := store.ReaderAt(ctx, ocispecs.Descriptor{
|
||||
Digest: digest.Digest(rec.Trace.Digest),
|
||||
MediaType: rec.Trace.MediaType,
|
||||
Size: rec.Trace.Size,
|
||||
})
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
spans, err := otelutil.ParseSpanStubs(io.NewSectionReader(ra, 0, ra.Size()))
|
||||
if err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
wrapper := struct {
|
||||
Data []jaeger.Trace `json:"data"`
|
||||
}{
|
||||
Data: spans.JaegerData().Data,
|
||||
}
|
||||
|
||||
if len(wrapper.Data) == 0 {
|
||||
return "", nil, errors.New("no trace data")
|
||||
}
|
||||
|
||||
buf := &bytes.Buffer{}
|
||||
enc := json.NewEncoder(buf)
|
||||
enc.SetIndent("", " ")
|
||||
if err := enc.Encode(wrapper); err != nil {
|
||||
return "", nil, err
|
||||
}
|
||||
|
||||
return string(wrapper.Data[0].TraceID), buf.Bytes(), nil
|
||||
}
|
||||
|
||||
func runTrace(ctx context.Context, dockerCli command.Cli, opts traceOptions) error {
|
||||
b, err := builder.New(dockerCli, builder.WithName(opts.builder))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
nodes, err := b.LoadNodes(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, node := range nodes {
|
||||
if node.Err != nil {
|
||||
return node.Err
|
||||
}
|
||||
}
|
||||
|
||||
traceID, data, err := loadTrace(ctx, opts.ref, nodes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
srv := jaegerui.NewServer(jaegerui.Config{})
|
||||
if err := srv.AddTrace(traceID, bytes.NewReader(data)); err != nil {
|
||||
return err
|
||||
}
|
||||
url := "/trace/" + traceID
|
||||
|
||||
if opts.compare != "" {
|
||||
traceIDcomp, data, err := loadTrace(ctx, opts.compare, nodes)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed to load trace for %s", opts.compare)
|
||||
}
|
||||
if err := srv.AddTrace(traceIDcomp, bytes.NewReader(data)); err != nil {
|
||||
return err
|
||||
}
|
||||
url = "/trace/" + traceIDcomp + "..." + traceID
|
||||
}
|
||||
|
||||
var term bool
|
||||
if _, err := console.ConsoleFromFile(os.Stdout); err == nil {
|
||||
term = true
|
||||
}
|
||||
|
||||
if !term && opts.compare == "" {
|
||||
fmt.Fprintln(dockerCli.Out(), string(data))
|
||||
return nil
|
||||
}
|
||||
|
||||
ln, err := net.Listen("tcp", opts.addr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
go func() {
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
browser.OpenURL(url)
|
||||
}()
|
||||
|
||||
url = "http://" + ln.Addr().String() + url
|
||||
fmt.Fprintf(dockerCli.Err(), "Trace available at %s\n", url)
|
||||
|
||||
go func() {
|
||||
<-ctx.Done()
|
||||
ln.Close()
|
||||
}()
|
||||
|
||||
err = srv.Serve(ln)
|
||||
if err != nil {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return nil
|
||||
default:
|
||||
}
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
func traceCmd(dockerCli command.Cli, rootOpts RootOptions) *cobra.Command {
|
||||
var options traceOptions
|
||||
|
||||
cmd := &cobra.Command{
|
||||
Use: "trace [OPTIONS] [REF]",
|
||||
Short: "Show the OpenTelemetry trace of a build record",
|
||||
Args: cobra.MaximumNArgs(1),
|
||||
RunE: func(cmd *cobra.Command, args []string) error {
|
||||
if len(args) > 0 {
|
||||
options.ref = args[0]
|
||||
}
|
||||
options.builder = *rootOpts.Builder
|
||||
return runTrace(cmd.Context(), dockerCli, options)
|
||||
},
|
||||
ValidArgsFunction: completion.Disable,
|
||||
}
|
||||
|
||||
flags := cmd.Flags()
|
||||
flags.StringVar(&options.addr, "addr", "127.0.0.1:0", "Address to bind the UI server")
|
||||
flags.StringVar(&options.compare, "compare", "", "Compare with another build reference")
|
||||
|
||||
return cmd
|
||||
}
|
180
commands/history/utils.go
Normal file
180
commands/history/utils.go
Normal file
@@ -0,0 +1,180 @@
|
||||
package history
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/docker/buildx/build"
|
||||
"github.com/docker/buildx/builder"
|
||||
"github.com/docker/buildx/localstate"
|
||||
controlapi "github.com/moby/buildkit/api/services/control"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
func buildName(fattrs map[string]string, ls *localstate.State) string {
|
||||
var res string
|
||||
|
||||
var target, contextPath, dockerfilePath, vcsSource string
|
||||
if v, ok := fattrs["target"]; ok {
|
||||
target = v
|
||||
}
|
||||
if v, ok := fattrs["context"]; ok {
|
||||
contextPath = filepath.ToSlash(v)
|
||||
} else if v, ok := fattrs["vcs:localdir:context"]; ok && v != "." {
|
||||
contextPath = filepath.ToSlash(v)
|
||||
}
|
||||
if v, ok := fattrs["vcs:source"]; ok {
|
||||
vcsSource = v
|
||||
}
|
||||
if v, ok := fattrs["filename"]; ok && v != "Dockerfile" {
|
||||
dockerfilePath = filepath.ToSlash(v)
|
||||
}
|
||||
if v, ok := fattrs["vcs:localdir:dockerfile"]; ok && v != "." {
|
||||
dockerfilePath = filepath.ToSlash(filepath.Join(v, dockerfilePath))
|
||||
}
|
||||
|
||||
var localPath string
|
||||
if ls != nil && !build.IsRemoteURL(ls.LocalPath) {
|
||||
if ls.LocalPath != "" && ls.LocalPath != "-" {
|
||||
localPath = filepath.ToSlash(ls.LocalPath)
|
||||
}
|
||||
if ls.DockerfilePath != "" && ls.DockerfilePath != "-" && ls.DockerfilePath != "Dockerfile" {
|
||||
dockerfilePath = filepath.ToSlash(ls.DockerfilePath)
|
||||
}
|
||||
}
|
||||
|
||||
// remove default dockerfile name
|
||||
const defaultFilename = "/Dockerfile"
|
||||
hasDefaultFileName := strings.HasSuffix(dockerfilePath, defaultFilename) || dockerfilePath == ""
|
||||
dockerfilePath = strings.TrimSuffix(dockerfilePath, defaultFilename)
|
||||
|
||||
// dockerfile is a subpath of context
|
||||
if strings.HasPrefix(dockerfilePath, localPath) && len(dockerfilePath) > len(localPath) {
|
||||
res = dockerfilePath[strings.LastIndex(localPath, "/")+1:]
|
||||
} else {
|
||||
// Otherwise, use basename
|
||||
bpath := localPath
|
||||
if len(dockerfilePath) > 0 {
|
||||
bpath = dockerfilePath
|
||||
}
|
||||
if len(bpath) > 0 {
|
||||
lidx := strings.LastIndex(bpath, "/")
|
||||
res = bpath[lidx+1:]
|
||||
if !hasDefaultFileName {
|
||||
if lidx != -1 {
|
||||
res = filepath.ToSlash(filepath.Join(filepath.Base(bpath[:lidx]), res))
|
||||
} else {
|
||||
res = filepath.ToSlash(filepath.Join(filepath.Base(bpath), res))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if len(contextPath) > 0 {
|
||||
res = contextPath
|
||||
}
|
||||
if len(target) > 0 {
|
||||
if len(res) > 0 {
|
||||
res = res + " (" + target + ")"
|
||||
} else {
|
||||
res = target
|
||||
}
|
||||
}
|
||||
if res == "" && vcsSource != "" {
|
||||
return vcsSource
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
func trimBeginning(s string, n int) string {
|
||||
if len(s) <= n {
|
||||
return s
|
||||
}
|
||||
return ".." + s[len(s)-n+2:]
|
||||
}
|
||||
|
||||
type historyRecord struct {
|
||||
*controlapi.BuildHistoryRecord
|
||||
currentTimestamp *time.Time
|
||||
node *builder.Node
|
||||
name string
|
||||
}
|
||||
|
||||
func queryRecords(ctx context.Context, ref string, nodes []builder.Node) ([]historyRecord, error) {
|
||||
var mu sync.Mutex
|
||||
var out []historyRecord
|
||||
|
||||
eg, ctx := errgroup.WithContext(ctx)
|
||||
for _, node := range nodes {
|
||||
node := node
|
||||
eg.Go(func() error {
|
||||
if node.Driver == nil {
|
||||
return nil
|
||||
}
|
||||
var records []historyRecord
|
||||
c, err := node.Driver.Client(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
serv, err := c.ControlClient().ListenBuildHistory(ctx, &controlapi.BuildHistoryRequest{
|
||||
EarlyExit: true,
|
||||
Ref: ref,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
md, err := serv.Header()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
var ts *time.Time
|
||||
if v, ok := md[headerKeyTimestamp]; ok {
|
||||
t, err := time.Parse(time.RFC3339Nano, v[0])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ts = &t
|
||||
}
|
||||
defer serv.CloseSend()
|
||||
for {
|
||||
he, err := serv.Recv()
|
||||
if err != nil {
|
||||
if errors.Is(err, io.EOF) {
|
||||
break
|
||||
}
|
||||
return err
|
||||
}
|
||||
if he.Type == controlapi.BuildHistoryEventType_DELETED || he.Record == nil {
|
||||
continue
|
||||
}
|
||||
records = append(records, historyRecord{
|
||||
BuildHistoryRecord: he.Record,
|
||||
currentTimestamp: ts,
|
||||
node: &node,
|
||||
})
|
||||
}
|
||||
mu.Lock()
|
||||
out = append(out, records...)
|
||||
mu.Unlock()
|
||||
return nil
|
||||
})
|
||||
}
|
||||
|
||||
if err := eg.Wait(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func formatDuration(d time.Duration) string {
|
||||
if d < time.Minute {
|
||||
return fmt.Sprintf("%.1fs", d.Seconds())
|
||||
}
|
||||
return fmt.Sprintf("%dm %2ds", int(d.Minutes()), int(d.Seconds())%60)
|
||||
}
|
@@ -115,6 +115,25 @@ func runInspect(ctx context.Context, dockerCli command.Cli, in inspectOptions) e
|
||||
fmt.Fprintf(w, "\t%s:\t%s\n", k, v)
|
||||
}
|
||||
}
|
||||
|
||||
if len(nodes[i].CDIDevices) > 0 {
|
||||
fmt.Fprintf(w, "Devices:\n")
|
||||
for _, dev := range nodes[i].CDIDevices {
|
||||
fmt.Fprintf(w, "\tName:\t%s\n", dev.Name)
|
||||
if dev.OnDemand {
|
||||
fmt.Fprintf(w, "\tOn-Demand:\t%v\n", dev.OnDemand)
|
||||
} else {
|
||||
fmt.Fprintf(w, "\tAutomatically allowed:\t%v\n", dev.AutoAllow)
|
||||
}
|
||||
if len(dev.Annotations) > 0 {
|
||||
fmt.Fprintf(w, "\tAnnotations:\n")
|
||||
for k, v := range dev.Annotations {
|
||||
fmt.Fprintf(w, "\t\t%s:\t%s\n", k, v)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for ri, rule := range nodes[i].GCPolicy {
|
||||
fmt.Fprintf(w, "GC Policy rule#%d:\n", ri)
|
||||
fmt.Fprintf(w, "\tAll:\t%v\n", rule.All)
|
||||
|
@@ -159,6 +159,9 @@ func lsPrint(dockerCli command.Cli, current *store.NodeGroup, builders []*builde
|
||||
}
|
||||
continue
|
||||
}
|
||||
if ctx.Format.IsJSON() {
|
||||
continue
|
||||
}
|
||||
for _, n := range b.Nodes() {
|
||||
if n.Err != nil {
|
||||
if ctx.Format.IsTable() {
|
||||
|
@@ -5,6 +5,7 @@ import (
|
||||
"os"
|
||||
|
||||
debugcmd "github.com/docker/buildx/commands/debug"
|
||||
historycmd "github.com/docker/buildx/commands/history"
|
||||
imagetoolscmd "github.com/docker/buildx/commands/imagetools"
|
||||
"github.com/docker/buildx/controller/remote"
|
||||
"github.com/docker/buildx/util/cobrautil/completion"
|
||||
@@ -106,6 +107,7 @@ func addCommands(cmd *cobra.Command, opts *rootOptions, dockerCli command.Cli) {
|
||||
pruneCmd(dockerCli, opts),
|
||||
duCmd(dockerCli, opts),
|
||||
imagetoolscmd.RootCmd(cmd, dockerCli, imagetoolscmd.RootOptions{Builder: &opts.builder}),
|
||||
historycmd.RootCmd(cmd, dockerCli, historycmd.RootOptions{Builder: &opts.builder}),
|
||||
)
|
||||
if confutil.IsExperimental() {
|
||||
cmd.AddCommand(debugcmd.RootCmd(dockerCli,
|
||||
|
@@ -75,7 +75,9 @@ func RunBuild(ctx context.Context, dockerCli command.Cli, in *controllerapi.Buil
|
||||
opts.Platforms = platforms
|
||||
|
||||
dockerConfig := dockerCli.ConfigFile()
|
||||
opts.Session = append(opts.Session, authprovider.NewDockerAuthProvider(dockerConfig, nil))
|
||||
opts.Session = append(opts.Session, authprovider.NewDockerAuthProvider(authprovider.DockerAuthProviderConfig{
|
||||
ConfigFile: dockerConfig,
|
||||
}))
|
||||
|
||||
secrets, err := controllerapi.CreateSecrets(in.Secrets)
|
||||
if err != nil {
|
||||
|
@@ -221,8 +221,10 @@ The following table shows the complete list of attributes that you can assign to
|
||||
| [`attest`](#targetattest) | List | Build attestations |
|
||||
| [`cache-from`](#targetcache-from) | List | External cache sources |
|
||||
| [`cache-to`](#targetcache-to) | List | External cache destinations |
|
||||
| [`call`](#targetcall) | String | Specify the frontend method to call for the target. |
|
||||
| [`context`](#targetcontext) | String | Set of files located in the specified path or URL |
|
||||
| [`contexts`](#targetcontexts) | Map | Additional build contexts |
|
||||
| [`description`](#targetdescription) | String | Description of a target |
|
||||
| [`dockerfile-inline`](#targetdockerfile-inline) | String | Inline Dockerfile string |
|
||||
| [`dockerfile`](#targetdockerfile) | String | Dockerfile location |
|
||||
| [`inherits`](#targetinherits) | List | Inherit attributes from other targets |
|
||||
@@ -283,19 +285,11 @@ The key takes a list of annotations, in the format of `KEY=VALUE`.
|
||||
|
||||
```hcl
|
||||
target "default" {
|
||||
output = ["type=image,name=foo"]
|
||||
output = [{ type = "image", name = "foo" }]
|
||||
annotations = ["org.opencontainers.image.authors=dvdksn"]
|
||||
}
|
||||
```
|
||||
|
||||
is the same as
|
||||
|
||||
```hcl
|
||||
target "default" {
|
||||
output = ["type=image,name=foo,annotation.org.opencontainers.image.authors=dvdksn"]
|
||||
}
|
||||
```
|
||||
|
||||
By default, the annotation is added to image manifests. You can configure the
|
||||
level of the annotations by adding a prefix to the annotation, containing a
|
||||
comma-separated list of all the levels that you want to annotate. The following
|
||||
@@ -303,7 +297,7 @@ example adds annotations to both the image index and manifests.
|
||||
|
||||
```hcl
|
||||
target "default" {
|
||||
output = ["type=image,name=foo"]
|
||||
output = [{ type = "image", name = "foo" }]
|
||||
annotations = ["index,manifest:org.opencontainers.image.authors=dvdksn"]
|
||||
}
|
||||
```
|
||||
@@ -319,8 +313,13 @@ This attribute accepts the long-form CSV version of attestation parameters.
|
||||
```hcl
|
||||
target "default" {
|
||||
attest = [
|
||||
"type=provenance,mode=min",
|
||||
"type=sbom"
|
||||
{
|
||||
type = "provenance",
|
||||
mode = "max",
|
||||
},
|
||||
{
|
||||
type = "sbom",
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
@@ -336,8 +335,15 @@ This takes a list value, so you can specify multiple cache sources.
|
||||
```hcl
|
||||
target "app" {
|
||||
cache-from = [
|
||||
"type=s3,region=eu-west-1,bucket=mybucket",
|
||||
"user/repo:cache",
|
||||
{
|
||||
type = "s3",
|
||||
region = "eu-west-1",
|
||||
bucket = "mybucket"
|
||||
},
|
||||
{
|
||||
type = "registry",
|
||||
ref = "user/repo:cache"
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
@@ -353,8 +359,14 @@ This takes a list value, so you can specify multiple cache export targets.
|
||||
```hcl
|
||||
target "app" {
|
||||
cache-to = [
|
||||
"type=s3,region=eu-west-1,bucket=mybucket",
|
||||
"type=inline"
|
||||
{
|
||||
type = "s3",
|
||||
region = "eu-west-1",
|
||||
bucket = "mybucket"
|
||||
},
|
||||
{
|
||||
type = "inline",
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
@@ -371,6 +383,13 @@ target "app" {
|
||||
}
|
||||
```
|
||||
|
||||
Supported values are:
|
||||
|
||||
- `build` builds the target (default)
|
||||
- `check`: evaluates [build checks](https://docs.docker.com/build/checks/) for the target
|
||||
- `outline`: displays the target's build arguments and their default values if available
|
||||
- `targets`: lists all Bake targets in the loaded definition, along with its [description](#targetdescription).
|
||||
|
||||
For more information about frontend methods, refer to the CLI reference for
|
||||
[`docker buildx build --call`](https://docs.docker.com/reference/cli/docker/buildx/build/#call).
|
||||
|
||||
@@ -481,6 +500,25 @@ FROM baseapp
|
||||
RUN echo "Hello world"
|
||||
```
|
||||
|
||||
### `target.description`
|
||||
|
||||
Defines a human-readable description for the target, clarifying its purpose or
|
||||
functionality.
|
||||
|
||||
```hcl
|
||||
target "lint" {
|
||||
description = "Runs golangci-lint to detect style errors"
|
||||
args = {
|
||||
GOLANGCI_LINT_VERSION = null
|
||||
}
|
||||
dockerfile = "lint.Dockerfile"
|
||||
}
|
||||
```
|
||||
|
||||
This attribute is useful when combined with the `docker buildx bake --list=targets`
|
||||
option, providing a more informative output when listing the available build
|
||||
targets in a Bake file.
|
||||
|
||||
### `target.dockerfile-inline`
|
||||
|
||||
Uses the string value as an inline Dockerfile for the build target.
|
||||
@@ -835,7 +873,7 @@ The following example configures the target to use a cache-only output,
|
||||
|
||||
```hcl
|
||||
target "default" {
|
||||
output = ["type=cacheonly"]
|
||||
output = [{ type = "cacheonly" }]
|
||||
}
|
||||
```
|
||||
|
||||
@@ -875,8 +913,8 @@ variable "HOME" {
|
||||
|
||||
target "default" {
|
||||
secret = [
|
||||
"type=env,id=KUBECONFIG",
|
||||
"type=file,id=aws,src=${HOME}/.aws/credentials"
|
||||
{ type = "env", id = "KUBECONFIG" },
|
||||
{ type = "file", id = "aws", src = "${HOME}/.aws/credentials" },
|
||||
]
|
||||
}
|
||||
```
|
||||
@@ -920,7 +958,7 @@ This can be useful if you need to access private repositories during a build.
|
||||
|
||||
```hcl
|
||||
target "default" {
|
||||
ssh = ["default"]
|
||||
ssh = [{ id = "default" }]
|
||||
}
|
||||
```
|
||||
|
||||
|
@@ -17,6 +17,7 @@ Extended build capabilities with BuildKit
|
||||
| [`debug`](buildx_debug.md) | Start debugger (EXPERIMENTAL) |
|
||||
| [`dial-stdio`](buildx_dial-stdio.md) | Proxy current stdio streams to builder instance |
|
||||
| [`du`](buildx_du.md) | Disk usage |
|
||||
| [`history`](buildx_history.md) | Commands to work on build records |
|
||||
| [`imagetools`](buildx_imagetools.md) | Commands to work on images in registry |
|
||||
| [`inspect`](buildx_inspect.md) | Inspect current builder instance |
|
||||
| [`ls`](buildx_ls.md) | List builder instances |
|
||||
|
@@ -15,7 +15,7 @@ Build from a file
|
||||
|
||||
| Name | Type | Default | Description |
|
||||
|:------------------------------------|:--------------|:--------|:-------------------------------------------------------------------------------------------------------------|
|
||||
| `--allow` | `stringArray` | | Allow build to access specified resources |
|
||||
| [`--allow`](#allow) | `stringArray` | | Allow build to access specified resources |
|
||||
| [`--builder`](#builder) | `string` | | Override the configured builder instance |
|
||||
| [`--call`](#call) | `string` | `build` | Set method for evaluating build (`check`, `outline`, `targets`) |
|
||||
| [`--check`](#check) | `bool` | | Shorthand for `--call=check` |
|
||||
@@ -51,6 +51,80 @@ guide for introduction to writing bake files.
|
||||
|
||||
## Examples
|
||||
|
||||
### <a name="allow"></a> Allow extra privileged entitlement (--allow)
|
||||
|
||||
```text
|
||||
--allow=ENTITLEMENT[=VALUE]
|
||||
```
|
||||
|
||||
Entitlements are designed to provide controlled access to privileged
|
||||
operations. By default, Buildx and BuildKit operates with restricted
|
||||
permissions to protect users and their systems from unintended side effects or
|
||||
security risks. The `--allow` flag explicitly grants access to additional
|
||||
entitlements, making it clear when a build or bake operation requires elevated
|
||||
privileges.
|
||||
|
||||
In addition to BuildKit's `network.host` and `security.insecure` entitlements
|
||||
(see [`docker buildx build --allow`](https://docs.docker.com/reference/cli/docker/buildx/build/#allow),
|
||||
Bake supports file system entitlements that grant granular control over file
|
||||
system access. These are particularly useful when working with builds that need
|
||||
access to files outside the default working directory.
|
||||
|
||||
Bake supports the following filesystem entitlements:
|
||||
|
||||
- `--allow fs=<path|*>` - Grant read and write access to files outside of the
|
||||
working directory.
|
||||
- `--allow fs.read=<path|*>` - Grant read access to files outside of the
|
||||
working directory.
|
||||
- `--allow fs.write=<path|*>` - Grant write access to files outside of the
|
||||
working directory.
|
||||
|
||||
The `fs` entitlements take a path value (relative or absolute) to a directory
|
||||
on the filesystem. Alternatively, you can pass a wildcard (`*`) to allow Bake
|
||||
to access the entire filesystem.
|
||||
|
||||
### Example: fs.read
|
||||
|
||||
Given the following Bake configuration, Bake would need to access the parent
|
||||
directory, relative to the Bake file.
|
||||
|
||||
```hcl
|
||||
target "app" {
|
||||
context = "../src"
|
||||
}
|
||||
```
|
||||
|
||||
Assuming `docker buildx bake app` is executed in the same directory as the
|
||||
`docker-bake.hcl` file, you would need to explicitly allow Bake to read from
|
||||
the `../src` directory. In this case, the following invocations all work:
|
||||
|
||||
```console
|
||||
$ docker buildx bake --allow fs.read=* app
|
||||
$ docker buildx bake --allow fs.read=../src app
|
||||
$ docker buildx bake --allow fs=* app
|
||||
```
|
||||
|
||||
### Example: fs.write
|
||||
|
||||
The following `docker-bake.hcl` file requires write access to the `/tmp`
|
||||
directory.
|
||||
|
||||
```hcl
|
||||
target "app" {
|
||||
output = "/tmp"
|
||||
}
|
||||
```
|
||||
|
||||
Assuming `docker buildx bake app` is executed outside of the `/tmp` directory,
|
||||
you would need to allow the `fs.write` entitlement, either by specifying the
|
||||
path or using a wildcard:
|
||||
|
||||
```console
|
||||
$ docker buildx bake --allow fs=/tmp app
|
||||
$ docker buildx bake --allow fs.write=/tmp app
|
||||
$ docker buildx bake --allow fs.write=* app
|
||||
```
|
||||
|
||||
### <a name="builder"></a> Override the configured builder instance (--builder)
|
||||
|
||||
Same as [`buildx --builder`](buildx.md#builder).
|
||||
|
@@ -16,7 +16,7 @@ Start a build
|
||||
| Name | Type | Default | Description |
|
||||
|:----------------------------------------|:--------------|:----------|:-------------------------------------------------------------------------------------------------------------|
|
||||
| [`--add-host`](#add-host) | `stringSlice` | | Add a custom host-to-IP mapping (format: `host:ip`) |
|
||||
| [`--allow`](#allow) | `stringSlice` | | Allow extra privileged entitlement (e.g., `network.host`, `security.insecure`) |
|
||||
| [`--allow`](#allow) | `stringArray` | | Allow extra privileged entitlement (e.g., `network.host`, `security.insecure`) |
|
||||
| [`--annotation`](#annotation) | `stringArray` | | Add annotation to the image |
|
||||
| [`--attest`](#attest) | `stringArray` | | Attestation parameters (format: `type=sbom,generator=image`) |
|
||||
| [`--build-arg`](#build-arg) | `stringArray` | | Set build-time variables |
|
||||
|
@@ -12,7 +12,7 @@ Start a build
|
||||
| Name | Type | Default | Description |
|
||||
|:--------------------|:--------------|:----------|:-------------------------------------------------------------------------------------------------------------|
|
||||
| `--add-host` | `stringSlice` | | Add a custom host-to-IP mapping (format: `host:ip`) |
|
||||
| `--allow` | `stringSlice` | | Allow extra privileged entitlement (e.g., `network.host`, `security.insecure`) |
|
||||
| `--allow` | `stringArray` | | Allow extra privileged entitlement (e.g., `network.host`, `security.insecure`) |
|
||||
| `--annotation` | `stringArray` | | Add annotation to the image |
|
||||
| `--attest` | `stringArray` | | Attestation parameters (format: `type=sbom,generator=image`) |
|
||||
| `--build-arg` | `stringArray` | | Set build-time variables |
|
||||
|
27
docs/reference/buildx_history.md
Normal file
27
docs/reference/buildx_history.md
Normal file
@@ -0,0 +1,27 @@
|
||||
# docker buildx history
|
||||
|
||||
<!---MARKER_GEN_START-->
|
||||
Commands to work on build records
|
||||
|
||||
### Subcommands
|
||||
|
||||
| Name | Description |
|
||||
|:---------------------------------------|:-----------------------------------------------|
|
||||
| [`inspect`](buildx_history_inspect.md) | Inspect a build |
|
||||
| [`logs`](buildx_history_logs.md) | Print the logs of a build |
|
||||
| [`ls`](buildx_history_ls.md) | List build records |
|
||||
| [`open`](buildx_history_open.md) | Open a build in Docker Desktop |
|
||||
| [`rm`](buildx_history_rm.md) | Remove build records |
|
||||
| [`trace`](buildx_history_trace.md) | Show the OpenTelemetry trace of a build record |
|
||||
|
||||
|
||||
### Options
|
||||
|
||||
| Name | Type | Default | Description |
|
||||
|:----------------|:---------|:--------|:-----------------------------------------|
|
||||
| `--builder` | `string` | | Override the configured builder instance |
|
||||
| `-D`, `--debug` | `bool` | | Enable debug logging |
|
||||
|
||||
|
||||
<!---MARKER_GEN_END-->
|
||||
|
117
docs/reference/buildx_history_inspect.md
Normal file
117
docs/reference/buildx_history_inspect.md
Normal file
@@ -0,0 +1,117 @@
|
||||
# docker buildx history inspect
|
||||
|
||||
<!---MARKER_GEN_START-->
|
||||
Inspect a build
|
||||
|
||||
### Subcommands
|
||||
|
||||
| Name | Description |
|
||||
|:-----------------------------------------------------|:---------------------------|
|
||||
| [`attachment`](buildx_history_inspect_attachment.md) | Inspect a build attachment |
|
||||
|
||||
|
||||
### Options
|
||||
|
||||
| Name | Type | Default | Description |
|
||||
|:----------------------|:---------|:---------|:-----------------------------------------|
|
||||
| `--builder` | `string` | | Override the configured builder instance |
|
||||
| `-D`, `--debug` | `bool` | | Enable debug logging |
|
||||
| [`--format`](#format) | `string` | `pretty` | Format the output |
|
||||
|
||||
|
||||
<!---MARKER_GEN_END-->
|
||||
|
||||
## Examples
|
||||
|
||||
### <a name="format"></a> Format the output (--format)
|
||||
|
||||
The formatting options (`--format`) pretty-prints the output to `pretty` (default),
|
||||
`json` or using a Go template.
|
||||
|
||||
```console
|
||||
$ docker buildx history inspect
|
||||
Name: buildx (binaries)
|
||||
Context: .
|
||||
Dockerfile: Dockerfile
|
||||
VCS Repository: https://github.com/crazy-max/buildx.git
|
||||
VCS Revision: f15eaa1ee324ffbbab29605600d27a84cab86361
|
||||
Target: binaries
|
||||
Platforms: linux/amd64
|
||||
Keep Git Dir: true
|
||||
|
||||
Started: 2025-02-07 11:56:24
|
||||
Duration: 1m 1s
|
||||
Build Steps: 16/16 (25% cached)
|
||||
|
||||
Image Resolve Mode: local
|
||||
|
||||
Materials:
|
||||
URI DIGEST
|
||||
pkg:docker/docker/dockerfile@1 sha256:93bfd3b68c109427185cd78b4779fc82b484b0b7618e36d0f104d4d801e66d25
|
||||
pkg:docker/golang@1.23-alpine3.21?platform=linux%2Famd64 sha256:2c49857f2295e89b23b28386e57e018a86620a8fede5003900f2d138ba9c4037
|
||||
pkg:docker/tonistiigi/xx@1.6.1?platform=linux%2Famd64 sha256:923441d7c25f1e2eb5789f82d987693c47b8ed987c4ab3b075d6ed2b5d6779a3
|
||||
|
||||
Attachments:
|
||||
DIGEST PLATFORM TYPE
|
||||
sha256:217329d2af959d4f02e3a96dcbe62bf100cab1feb8006a047ddfe51a5397f7e3 https://slsa.dev/provenance/v0.2
|
||||
|
||||
Print build logs: docker buildx history logs g9808bwrjrlkbhdamxklx660b
|
||||
```
|
||||
|
||||
```console
|
||||
$ docker buildx history inspect --format json
|
||||
{
|
||||
"Name": "buildx (binaries)",
|
||||
"Ref": "5w7vkqfi0rf59hw4hnmn627r9",
|
||||
"Context": ".",
|
||||
"Dockerfile": "Dockerfile",
|
||||
"VCSRepository": "https://github.com/crazy-max/buildx.git",
|
||||
"VCSRevision": "f15eaa1ee324ffbbab29605600d27a84cab86361",
|
||||
"Target": "binaries",
|
||||
"Platform": [
|
||||
"linux/amd64"
|
||||
],
|
||||
"KeepGitDir": true,
|
||||
"StartedAt": "2025-02-07T12:01:05.75807272+01:00",
|
||||
"CompletedAt": "2025-02-07T12:02:07.991778875+01:00",
|
||||
"Duration": 62233706155,
|
||||
"Status": "completed",
|
||||
"NumCompletedSteps": 16,
|
||||
"NumTotalSteps": 16,
|
||||
"NumCachedSteps": 4,
|
||||
"Config": {
|
||||
"ImageResolveMode": "local"
|
||||
},
|
||||
"Materials": [
|
||||
{
|
||||
"URI": "pkg:docker/docker/dockerfile@1",
|
||||
"Digests": [
|
||||
"sha256:93bfd3b68c109427185cd78b4779fc82b484b0b7618e36d0f104d4d801e66d25"
|
||||
]
|
||||
},
|
||||
{
|
||||
"URI": "pkg:docker/golang@1.23-alpine3.21?platform=linux%2Famd64",
|
||||
"Digests": [
|
||||
"sha256:2c49857f2295e89b23b28386e57e018a86620a8fede5003900f2d138ba9c4037"
|
||||
]
|
||||
},
|
||||
{
|
||||
"URI": "pkg:docker/tonistiigi/xx@1.6.1?platform=linux%2Famd64",
|
||||
"Digests": [
|
||||
"sha256:923441d7c25f1e2eb5789f82d987693c47b8ed987c4ab3b075d6ed2b5d6779a3"
|
||||
]
|
||||
}
|
||||
],
|
||||
"Attachments": [
|
||||
{
|
||||
"Digest": "sha256:450fdd2e6b868fecd69e9891c2c404ba461aa38a47663b4805edeb8d2baf80b1",
|
||||
"Type": "https://slsa.dev/provenance/v0.2"
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
```console
|
||||
$ docker buildx history inspect --format "{{.Name}}: {{.VCSRepository}} ({{.VCSRevision}})"
|
||||
buildx (binaries): https://github.com/crazy-max/buildx.git (f15eaa1ee324ffbbab29605600d27a84cab86361)
|
||||
```
|
17
docs/reference/buildx_history_inspect_attachment.md
Normal file
17
docs/reference/buildx_history_inspect_attachment.md
Normal file
@@ -0,0 +1,17 @@
|
||||
# docker buildx history inspect attachment
|
||||
|
||||
<!---MARKER_GEN_START-->
|
||||
Inspect a build attachment
|
||||
|
||||
### Options
|
||||
|
||||
| Name | Type | Default | Description |
|
||||
|:----------------|:---------|:--------|:-----------------------------------------|
|
||||
| `--builder` | `string` | | Override the configured builder instance |
|
||||
| `-D`, `--debug` | `bool` | | Enable debug logging |
|
||||
| `--platform` | `string` | | Platform of attachment |
|
||||
| `--type` | `string` | | Type of attachment |
|
||||
|
||||
|
||||
<!---MARKER_GEN_END-->
|
||||
|
16
docs/reference/buildx_history_logs.md
Normal file
16
docs/reference/buildx_history_logs.md
Normal file
@@ -0,0 +1,16 @@
|
||||
# docker buildx history logs
|
||||
|
||||
<!---MARKER_GEN_START-->
|
||||
Print the logs of a build
|
||||
|
||||
### Options
|
||||
|
||||
| Name | Type | Default | Description |
|
||||
|:----------------|:---------|:--------|:--------------------------------------------------|
|
||||
| `--builder` | `string` | | Override the configured builder instance |
|
||||
| `-D`, `--debug` | `bool` | | Enable debug logging |
|
||||
| `--progress` | `string` | `plain` | Set type of progress output (plain, rawjson, tty) |
|
||||
|
||||
|
||||
<!---MARKER_GEN_END-->
|
||||
|
17
docs/reference/buildx_history_ls.md
Normal file
17
docs/reference/buildx_history_ls.md
Normal file
@@ -0,0 +1,17 @@
|
||||
# docker buildx history ls
|
||||
|
||||
<!---MARKER_GEN_START-->
|
||||
List build records
|
||||
|
||||
### Options
|
||||
|
||||
| Name | Type | Default | Description |
|
||||
|:----------------|:---------|:--------|:-----------------------------------------|
|
||||
| `--builder` | `string` | | Override the configured builder instance |
|
||||
| `-D`, `--debug` | `bool` | | Enable debug logging |
|
||||
| `--format` | `string` | `table` | Format the output |
|
||||
| `--no-trunc` | `bool` | | Don't truncate output |
|
||||
|
||||
|
||||
<!---MARKER_GEN_END-->
|
||||
|
15
docs/reference/buildx_history_open.md
Normal file
15
docs/reference/buildx_history_open.md
Normal file
@@ -0,0 +1,15 @@
|
||||
# docker buildx history open
|
||||
|
||||
<!---MARKER_GEN_START-->
|
||||
Open a build in Docker Desktop
|
||||
|
||||
### Options
|
||||
|
||||
| Name | Type | Default | Description |
|
||||
|:----------------|:---------|:--------|:-----------------------------------------|
|
||||
| `--builder` | `string` | | Override the configured builder instance |
|
||||
| `-D`, `--debug` | `bool` | | Enable debug logging |
|
||||
|
||||
|
||||
<!---MARKER_GEN_END-->
|
||||
|
16
docs/reference/buildx_history_rm.md
Normal file
16
docs/reference/buildx_history_rm.md
Normal file
@@ -0,0 +1,16 @@
|
||||
# docker buildx history rm
|
||||
|
||||
<!---MARKER_GEN_START-->
|
||||
Remove build records
|
||||
|
||||
### Options
|
||||
|
||||
| Name | Type | Default | Description |
|
||||
|:----------------|:---------|:--------|:-----------------------------------------|
|
||||
| `--all` | `bool` | | Remove all build records |
|
||||
| `--builder` | `string` | | Override the configured builder instance |
|
||||
| `-D`, `--debug` | `bool` | | Enable debug logging |
|
||||
|
||||
|
||||
<!---MARKER_GEN_END-->
|
||||
|
17
docs/reference/buildx_history_trace.md
Normal file
17
docs/reference/buildx_history_trace.md
Normal file
@@ -0,0 +1,17 @@
|
||||
# docker buildx history trace
|
||||
|
||||
<!---MARKER_GEN_START-->
|
||||
Show the OpenTelemetry trace of a build record
|
||||
|
||||
### Options
|
||||
|
||||
| Name | Type | Default | Description |
|
||||
|:----------------|:---------|:--------------|:-----------------------------------------|
|
||||
| `--addr` | `string` | `127.0.0.1:0` | Address to bind the UI server |
|
||||
| `--builder` | `string` | | Override the configured builder instance |
|
||||
| `--compare` | `string` | | Compare with another build reference |
|
||||
| `-D`, `--debug` | `bool` | | Enable debug logging |
|
||||
|
||||
|
||||
<!---MARKER_GEN_END-->
|
||||
|
@@ -23,10 +23,10 @@ import (
|
||||
"github.com/docker/docker/api/types/mount"
|
||||
"github.com/docker/docker/api/types/network"
|
||||
"github.com/docker/docker/api/types/system"
|
||||
dockerclient "github.com/docker/docker/client"
|
||||
"github.com/docker/docker/errdefs"
|
||||
dockerarchive "github.com/docker/docker/pkg/archive"
|
||||
"github.com/docker/docker/pkg/idtools"
|
||||
"github.com/docker/docker/pkg/jsonmessage"
|
||||
"github.com/docker/docker/pkg/stdcopy"
|
||||
"github.com/moby/buildkit/client"
|
||||
"github.com/pkg/errors"
|
||||
@@ -70,7 +70,7 @@ func (d *Driver) Bootstrap(ctx context.Context, l progress.Logger) error {
|
||||
return progress.Wrap("[internal] booting buildkit", l, func(sub progress.SubLogger) error {
|
||||
_, err := d.DockerAPI.ContainerInspect(ctx, d.Name)
|
||||
if err != nil {
|
||||
if dockerclient.IsErrNotFound(err) {
|
||||
if errdefs.IsNotFound(err) {
|
||||
return d.create(ctx, sub)
|
||||
}
|
||||
return err
|
||||
@@ -95,19 +95,20 @@ func (d *Driver) create(ctx context.Context, l progress.SubLogger) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rc, err := d.DockerAPI.ImageCreate(ctx, imageName, image.CreateOptions{
|
||||
resp, err := d.DockerAPI.ImageCreate(ctx, imageName, image.CreateOptions{
|
||||
RegistryAuth: ra,
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = io.Copy(io.Discard, rc)
|
||||
return err
|
||||
defer resp.Close()
|
||||
return jsonmessage.DisplayJSONMessagesStream(resp, io.Discard, 0, false, nil)
|
||||
}); err != nil {
|
||||
// image pulling failed, check if it exists in local image store.
|
||||
// if not, return pulling error. otherwise log it.
|
||||
_, _, errInspect := d.DockerAPI.ImageInspectWithRaw(ctx, imageName)
|
||||
if errInspect != nil {
|
||||
_, errInspect := d.DockerAPI.ImageInspect(ctx, imageName)
|
||||
found := errInspect == nil
|
||||
if !found {
|
||||
return err
|
||||
}
|
||||
l.Wrap("pulling failed, using local image "+imageName, func() error { return nil })
|
||||
@@ -306,7 +307,7 @@ func (d *Driver) start(ctx context.Context) error {
|
||||
func (d *Driver) Info(ctx context.Context) (*driver.Info, error) {
|
||||
ctn, err := d.DockerAPI.ContainerInspect(ctx, d.Name)
|
||||
if err != nil {
|
||||
if dockerclient.IsErrNotFound(err) {
|
||||
if errdefs.IsNotFound(err) {
|
||||
return &driver.Info{
|
||||
Status: driver.Inactive,
|
||||
}, nil
|
||||
|
42
go.mod
42
go.mod
@@ -15,26 +15,28 @@ require (
|
||||
github.com/containerd/platforms v1.0.0-rc.1
|
||||
github.com/containerd/typeurl/v2 v2.2.3
|
||||
github.com/creack/pty v1.1.24
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc
|
||||
github.com/davecgh/go-spew v1.1.1
|
||||
github.com/distribution/reference v0.6.0
|
||||
github.com/docker/cli v27.5.0+incompatible
|
||||
github.com/docker/cli v28.0.0-rc.2+incompatible
|
||||
github.com/docker/cli-docs-tool v0.9.0
|
||||
github.com/docker/docker v27.5.0+incompatible
|
||||
github.com/docker/docker v28.0.0-rc.2+incompatible
|
||||
github.com/docker/go-units v0.5.0
|
||||
github.com/gofrs/flock v0.12.1
|
||||
github.com/google/shlex v0.0.0-20191202100458-e7afc7fbc510
|
||||
github.com/google/uuid v1.6.0
|
||||
github.com/hashicorp/go-cty-funcs v0.0.0-20241120183456-c51673e0b3dd
|
||||
github.com/hashicorp/go-multierror v1.1.1
|
||||
github.com/hashicorp/hcl/v2 v2.23.0
|
||||
github.com/in-toto/in-toto-golang v0.5.0
|
||||
github.com/mitchellh/hashstructure/v2 v2.0.2
|
||||
github.com/moby/buildkit v0.19.0-rc2
|
||||
github.com/moby/buildkit v0.20.0-rc3
|
||||
github.com/moby/sys/mountinfo v0.7.2
|
||||
github.com/moby/sys/signal v0.7.1
|
||||
github.com/morikuni/aec v1.0.0
|
||||
github.com/opencontainers/go-digest v1.0.0
|
||||
github.com/opencontainers/image-spec v1.1.0
|
||||
github.com/pelletier/go-toml v1.9.5
|
||||
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10
|
||||
github.com/serialx/hashring v0.0.0-20200727003509-22c0c7ab6b1b
|
||||
@@ -44,17 +46,20 @@ require (
|
||||
github.com/stretchr/testify v1.10.0
|
||||
github.com/tonistiigi/fsutil v0.0.0-20250113203817-b14e27f4135a
|
||||
github.com/tonistiigi/go-csvvalue v0.0.0-20240710180619-ddb21b71c0b4
|
||||
github.com/tonistiigi/jaeger-ui-rest v0.0.0-20250211190051-7d4944a45bb6
|
||||
github.com/zclconf/go-cty v1.16.0
|
||||
go.opentelemetry.io/otel v1.31.0
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.31.0
|
||||
go.opentelemetry.io/otel/metric v1.31.0
|
||||
go.opentelemetry.io/otel/sdk v1.31.0
|
||||
go.opentelemetry.io/otel/trace v1.31.0
|
||||
golang.org/x/mod v0.21.0
|
||||
golang.org/x/mod v0.22.0
|
||||
golang.org/x/sync v0.10.0
|
||||
golang.org/x/sys v0.28.0
|
||||
golang.org/x/sys v0.29.0
|
||||
golang.org/x/term v0.27.0
|
||||
golang.org/x/text v0.21.0
|
||||
google.golang.org/grpc v1.68.1
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20241021214115-324edc3d5d38
|
||||
google.golang.org/grpc v1.69.4
|
||||
google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.5.1
|
||||
google.golang.org/protobuf v1.35.2
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
@@ -65,7 +70,7 @@ require (
|
||||
|
||||
require (
|
||||
github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6 // indirect
|
||||
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect
|
||||
github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c // indirect
|
||||
github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d // indirect
|
||||
github.com/agext/levenshtein v1.2.3 // indirect
|
||||
github.com/apparentlymart/go-cidr v1.0.1 // indirect
|
||||
@@ -114,7 +119,6 @@ require (
|
||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 // indirect
|
||||
github.com/hashicorp/errwrap v1.1.0 // indirect
|
||||
github.com/hashicorp/go-cleanhttp v0.5.2 // indirect
|
||||
github.com/hashicorp/go-multierror v1.1.1 // indirect
|
||||
github.com/imdario/mergo v0.3.16 // indirect
|
||||
github.com/inconshreveable/mousetrap v1.1.0 // indirect
|
||||
github.com/josharian/intern v1.0.0 // indirect
|
||||
@@ -133,12 +137,12 @@ require (
|
||||
github.com/moby/sys/sequential v0.6.0 // indirect
|
||||
github.com/moby/sys/user v0.3.0 // indirect
|
||||
github.com/moby/sys/userns v0.1.0 // indirect
|
||||
github.com/moby/term v0.5.0 // indirect
|
||||
github.com/moby/term v0.5.2 // indirect
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
|
||||
github.com/modern-go/reflect2 v1.0.2 // indirect
|
||||
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
|
||||
github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/prometheus/client_golang v1.20.5 // indirect
|
||||
github.com/prometheus/client_model v0.6.1 // indirect
|
||||
github.com/prometheus/common v0.55.0 // indirect
|
||||
@@ -166,13 +170,12 @@ require (
|
||||
go.opentelemetry.io/otel/sdk/metric v1.31.0 // indirect
|
||||
go.opentelemetry.io/proto/otlp v1.3.1 // indirect
|
||||
golang.org/x/crypto v0.31.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 // indirect
|
||||
golang.org/x/exp v0.0.0-20241108190413-2d47ceb2692f // indirect
|
||||
golang.org/x/net v0.33.0 // indirect
|
||||
golang.org/x/oauth2 v0.23.0 // indirect
|
||||
golang.org/x/time v0.6.0 // indirect
|
||||
golang.org/x/tools v0.25.0 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20241007155032-5fefd90f89a9 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20241021214115-324edc3d5d38 // indirect
|
||||
golang.org/x/tools v0.27.0 // indirect
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20241021214115-324edc3d5d38 // indirect
|
||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
k8s.io/klog/v2 v2.130.1 // indirect
|
||||
@@ -182,3 +185,12 @@ require (
|
||||
sigs.k8s.io/structured-merge-diff/v4 v4.4.1 // indirect
|
||||
sigs.k8s.io/yaml v1.4.0 // indirect
|
||||
)
|
||||
|
||||
exclude (
|
||||
// FIXME(thaJeztah): remoove this once kubernetes updated their dependencies to no longer need this.
|
||||
//
|
||||
// For additional details, see this PR and links mentioned in that PR:
|
||||
// https://github.com/kubernetes-sigs/kustomize/pull/5830#issuecomment-2569960859
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2
|
||||
)
|
||||
|
60
go.sum
60
go.sum
@@ -2,8 +2,8 @@ github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6 h1:He8af
|
||||
github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6/go.mod h1:8o94RPi1/7XTJvwPpRSzSUedZrtlirdB3r9Z20bi2f8=
|
||||
github.com/AdamKorcz/go-118-fuzz-build v0.0.0-20231105174938-2b5cbb29f3e2 h1:dIScnXFlF784X79oi7MzVT6GWqr/W1uUt0pB5CsDs9M=
|
||||
github.com/AdamKorcz/go-118-fuzz-build v0.0.0-20231105174938-2b5cbb29f3e2/go.mod h1:gCLVsLfv1egrcZu+GoJATN5ts75F2s62ih/457eWzOw=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 h1:UQHMgLO+TxOElx5B5HZ4hJQsoJ/PvUvKRhJHDQXO8P8=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c h1:udKWzYgxTojEKWjV8V+WSxDXJ4NFATAsZjh8iIbsQIg=
|
||||
github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
|
||||
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/Masterminds/semver/v3 v3.2.1 h1:RN9w6+7QoMeJVGyfmbcgs28Br8cvmnucEXnY0rYXWg0=
|
||||
@@ -79,8 +79,8 @@ github.com/codahale/rfc6979 v0.0.0-20141003034818-6a90f24967eb h1:EDmT6Q9Zs+SbUo
|
||||
github.com/codahale/rfc6979 v0.0.0-20141003034818-6a90f24967eb/go.mod h1:ZjrT6AXHbDs86ZSdt/osfBi5qfexBrKUdONk989Wnk4=
|
||||
github.com/compose-spec/compose-go/v2 v2.4.7 h1:WNpz5bIbKG+G+w9pfu72B1ZXr+Og9jez8TMEo8ecXPk=
|
||||
github.com/compose-spec/compose-go/v2 v2.4.7/go.mod h1:lFN0DrMxIncJGYAXTfWuajfwj5haBJqrBkarHcnjJKc=
|
||||
github.com/containerd/cgroups/v3 v3.0.3 h1:S5ByHZ/h9PMe5IOQoN7E+nMc2UcLEM/V48DGDJ9kip0=
|
||||
github.com/containerd/cgroups/v3 v3.0.3/go.mod h1:8HBe7V3aWGLFPd/k03swSIsGjZhHI2WzJmticMgVuz0=
|
||||
github.com/containerd/cgroups/v3 v3.0.5 h1:44na7Ud+VwyE7LIoJ8JTNQOa549a8543BmzaJHo6Bzo=
|
||||
github.com/containerd/cgroups/v3 v3.0.5/go.mod h1:SA5DLYnXO8pTGYiAHXz94qvLQTKfVM5GEVisn4jpins=
|
||||
github.com/containerd/console v1.0.4 h1:F2g4+oChYvBTsASRTz8NP6iIAi97J3TtSAsLbIFn4ro=
|
||||
github.com/containerd/console v1.0.4/go.mod h1:YynlIjWYF8myEu6sdkwKIvGQq+cOckRm6So2avqoYAk=
|
||||
github.com/containerd/containerd/api v1.8.0 h1:hVTNJKR8fMc/2Tiw60ZRijntNMd1U+JVMyTRdsD2bS0=
|
||||
@@ -117,21 +117,20 @@ github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ3
|
||||
github.com/creack/pty v1.1.24 h1:bJrF4RRfyJnbTJqzRLHzcGaZK1NeM5kTC9jGgovnR1s=
|
||||
github.com/creack/pty v1.1.24/go.mod h1:08sCNb52WyoAwi2QDyzUCTgcvVFhUzewun7wtTfvcwE=
|
||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc h1:U9qPSI2PIWSS1VwoXQT9A3Wy9MM3WgvqSxFWenqJduM=
|
||||
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||
github.com/denisenkom/go-mssqldb v0.0.0-20191128021309-1d7a30a10f73/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU=
|
||||
github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk=
|
||||
github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
|
||||
github.com/docker/cli v27.5.0+incompatible h1:aMphQkcGtpHixwwhAXJT1rrK/detk2JIvDaFkLctbGM=
|
||||
github.com/docker/cli v27.5.0+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
|
||||
github.com/docker/cli v28.0.0-rc.2+incompatible h1:2N1dpr3qtlJwIQpqXm7oNwWNAUGzpKlsCeJ32ejvpTk=
|
||||
github.com/docker/cli v28.0.0-rc.2+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
|
||||
github.com/docker/cli-docs-tool v0.9.0 h1:CVwQbE+ZziwlPqrJ7LRyUF6GvCA+6gj7MTCsayaK9t0=
|
||||
github.com/docker/cli-docs-tool v0.9.0/go.mod h1:ClrwlNW+UioiRyH9GiAOe1o3J/TsY3Tr1ipoypjAUtc=
|
||||
github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||
github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk=
|
||||
github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||
github.com/docker/docker v27.5.0+incompatible h1:um++2NcQtGRTz5eEgO6aJimo6/JxrTXC941hd05JO6U=
|
||||
github.com/docker/docker v27.5.0+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/docker v28.0.0-rc.2+incompatible h1:p+Ri+C0mmbPkhYVD9Sxnp/TnNnZoQWEj/EwOC465Uq4=
|
||||
github.com/docker/docker v28.0.0-rc.2+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/docker-credential-helpers v0.8.2 h1:bX3YxiGzFP5sOXWc3bTPEXdEaZSeVMrFgOr3T+zrFAo=
|
||||
github.com/docker/docker-credential-helpers v0.8.2/go.mod h1:P3ci7E3lwkZg6XiHdRKft1KckHiO9a2rNtyFbZ/ry9M=
|
||||
github.com/docker/go v1.5.1-1.0.20160303222718-d30aec9fd63c h1:lzqkGL9b3znc+ZUgi7FlLnqjQhcXxkNM/quxIjBVMD0=
|
||||
@@ -298,8 +297,8 @@ github.com/mitchellh/hashstructure/v2 v2.0.2/go.mod h1:MG3aRVU/N29oo/V/IhBX8GR/z
|
||||
github.com/mitchellh/mapstructure v0.0.0-20150613213606-2caf8efc9366/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
|
||||
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
github.com/moby/buildkit v0.19.0-rc2 h1:7sAuQ5bDNIbdfmc7UDbrWJ2UPOR5w9rNWgnrEoC5aoo=
|
||||
github.com/moby/buildkit v0.19.0-rc2/go.mod h1:4WYJLet/NI2p1o2rPQ6CIFpyyyvwvPz/TVISmwqqpHI=
|
||||
github.com/moby/buildkit v0.20.0-rc3 h1:iExrfuZZuFgFudeNJhXfp/5vzJWTNrlqZ/LYJk4dG2Q=
|
||||
github.com/moby/buildkit v0.20.0-rc3/go.mod h1:kMXf90l/f3zygRK8bYbyetfyzoJYntb6Bpi2VsLfXgQ=
|
||||
github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0=
|
||||
github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo=
|
||||
github.com/moby/locker v1.0.1 h1:fOXqR41zeveg4fFODix+1Ch4mj/gT0NE1XJbp/epuBg=
|
||||
@@ -318,8 +317,8 @@ github.com/moby/sys/user v0.3.0 h1:9ni5DlcW5an3SvRSx4MouotOygvzaXbaSrc/wGDFWPo=
|
||||
github.com/moby/sys/user v0.3.0/go.mod h1:bG+tYYYJgaMtRKgEmuueC0hJEAZWwtIbZTB+85uoHjs=
|
||||
github.com/moby/sys/userns v0.1.0 h1:tVLXkFOxVu9A64/yh59slHVv9ahO9UIev4JZusOLG/g=
|
||||
github.com/moby/sys/userns v0.1.0/go.mod h1:IHUYgu/kao6N8YZlp9Cf444ySSvCmDlmzUcYfDHOl28=
|
||||
github.com/moby/term v0.5.0 h1:xt8Q1nalod/v7BqbG21f8mQPqH+xAaC9C3N3wfWbVP0=
|
||||
github.com/moby/term v0.5.0/go.mod h1:8FzsFHVUBGZdbDsJw/ot+X+d5HLUbvklYLJ9uGfcI3Y=
|
||||
github.com/moby/term v0.5.2 h1:6qk3FJAFDs6i/q3W/pQ97SX192qKfZgGjCQqfCJkgzQ=
|
||||
github.com/moby/term v0.5.2/go.mod h1:d3djjFCrjnB+fl8NJux+EJzu0msscUP+f8it8hPkFLc=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
@@ -360,15 +359,16 @@ github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsq
|
||||
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
|
||||
github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8=
|
||||
github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
|
||||
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c h1:+mdjkGKdHQG3305AYmdv1U2eRNDiU2ErMBj1gwrq8eQ=
|
||||
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c/go.mod h1:7rwL4CYBLnjLxUqIJNnCWiEdr3bn6IUYi15bNlnbCCU=
|
||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10 h1:GFCKgmp0tecUJ0sJuv4pzYCqS9+RGSn52M3FUwPs+uo=
|
||||
github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
|
||||
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/prometheus/client_golang v0.9.0-pre1.0.20180209125602-c332b6f63c06/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
|
||||
@@ -447,6 +447,8 @@ github.com/tonistiigi/fsutil v0.0.0-20250113203817-b14e27f4135a h1:EfGw4G0x/8qXW
|
||||
github.com/tonistiigi/fsutil v0.0.0-20250113203817-b14e27f4135a/go.mod h1:Dl/9oEjK7IqnjAm21Okx/XIxUCFJzvh+XdVHUlBwXTw=
|
||||
github.com/tonistiigi/go-csvvalue v0.0.0-20240710180619-ddb21b71c0b4 h1:7I5c2Ig/5FgqkYOh/N87NzoyI9U15qUPXhDD8uCupv8=
|
||||
github.com/tonistiigi/go-csvvalue v0.0.0-20240710180619-ddb21b71c0b4/go.mod h1:278M4p8WsNh3n4a1eqiFcV2FGk7wE5fwUpUom9mK9lE=
|
||||
github.com/tonistiigi/jaeger-ui-rest v0.0.0-20250211190051-7d4944a45bb6 h1:RT/a0RvdX84iwtOrUK45+wjcNpaG+hS7n7XFYqj4axg=
|
||||
github.com/tonistiigi/jaeger-ui-rest v0.0.0-20250211190051-7d4944a45bb6/go.mod h1:3Ez1Paeg+0Ghu3KwpEGC1HgZ4CHDlg+Ez/5Baeomk54=
|
||||
github.com/tonistiigi/units v0.0.0-20180711220420-6950e57a87ea h1:SXhTLE6pb6eld/v/cCndK0AMpt1wiVFb/YYmqB3/QG0=
|
||||
github.com/tonistiigi/units v0.0.0-20180711220420-6950e57a87ea/go.mod h1:WPnis/6cRcDZSUvVmezrxJPkiO87ThFYsoUiMwWNDJk=
|
||||
github.com/tonistiigi/vt100 v0.0.0-20240514184818-90bafcd6abab h1:H6aJ0yKQ0gF49Qb2z5hI1UHxSQt4JMyxebFR15KnApw=
|
||||
@@ -490,6 +492,8 @@ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.31.0 h1:FFeLy
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.31.0/go.mod h1:TMu73/k1CP8nBUpDLc71Wj/Kf7ZS9FK5b53VapRsP9o=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.31.0 h1:lUsI2TYsQw2r1IASwoROaCnjdj2cvC2+Jbxvk6nHnWU=
|
||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.31.0/go.mod h1:2HpZxxQurfGxJlJDblybejHB6RX6pmExPNe517hREw4=
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.31.0 h1:UGZ1QwZWY67Z6BmckTU+9Rxn04m2bD3gD6Mk0OIOCPk=
|
||||
go.opentelemetry.io/otel/exporters/stdout/stdouttrace v1.31.0/go.mod h1:fcwWuDuaObkkChiDlhEpSq9+X1C0omv+s5mBtToAQ64=
|
||||
go.opentelemetry.io/otel/metric v1.31.0 h1:FSErL0ATQAmYHUIzSezZibnyVlft1ybhy4ozRPcF2fE=
|
||||
go.opentelemetry.io/otel/metric v1.31.0/go.mod h1:C3dEloVbLuYoX41KpmAhOqNriGbA+qqH6PQ5E5mUfnY=
|
||||
go.opentelemetry.io/otel/sdk v1.31.0 h1:xLY3abVHYZ5HSfOg3l2E5LUj2Cwva5Y7yGxnSW9H5Gk=
|
||||
@@ -512,12 +516,12 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh
|
||||
golang.org/x/crypto v0.0.0-20201117144127-c1f2f97bffc9/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
|
||||
golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U=
|
||||
golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk=
|
||||
golang.org/x/exp v0.0.0-20240909161429-701f63a606c0 h1:e66Fs6Z+fZTbFBAxKfP3PALWBtpfqks2bwGcexMxgtk=
|
||||
golang.org/x/exp v0.0.0-20240909161429-701f63a606c0/go.mod h1:2TbTHSBQa924w8M6Xs1QcRcFwyucIwBGpK1p2f1YFFY=
|
||||
golang.org/x/exp v0.0.0-20241108190413-2d47ceb2692f h1:XdNn9LlyWAhLVp6P/i8QYBW+hlyhrhei9uErw2B5GJo=
|
||||
golang.org/x/exp v0.0.0-20241108190413-2d47ceb2692f/go.mod h1:D5SMRVC3C2/4+F/DB1wZsLRnSNimn2Sp/NPsCrsv8ak=
|
||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||
golang.org/x/mod v0.21.0 h1:vvrHzRwRfVKSiLrG+d4FMl/Qi4ukBCE6kZlTUkDYRT0=
|
||||
golang.org/x/mod v0.21.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY=
|
||||
golang.org/x/mod v0.22.0 h1:D4nJWe9zXqHOmWqj4VMOJhvzj7bEZg4wEYa759z1pH4=
|
||||
golang.org/x/mod v0.22.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
@@ -549,8 +553,8 @@ golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7w
|
||||
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.28.0 h1:Fksou7UEQUWlKvIdsqzJmUmCX3cZuD2+P3XyyzwMhlA=
|
||||
golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU=
|
||||
golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
||||
golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q=
|
||||
golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM=
|
||||
@@ -565,20 +569,20 @@ golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGm
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
|
||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||
golang.org/x/tools v0.25.0 h1:oFU9pkj/iJgs+0DT+VMHrx+oBKs/LJMV+Uvg78sl+fE=
|
||||
golang.org/x/tools v0.25.0/go.mod h1:/vtpO8WL1N9cQC3FN5zPqb//fRXskFHbLKk4OW1Q7rg=
|
||||
golang.org/x/tools v0.27.0 h1:qEKojBykQkQ4EynWy4S8Weg69NumxKdn40Fce3uc/8o=
|
||||
golang.org/x/tools v0.27.0/go.mod h1:sUi0ZgbwW9ZPAq26Ekut+weQPR5eIM6GQLQ1Yjm1H0Q=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20241007155032-5fefd90f89a9 h1:T6rh4haD3GVYsgEfWExoCZA2o2FmbNyKpTuAxbEFPTg=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20241007155032-5fefd90f89a9/go.mod h1:wp2WsuBYj6j8wUdo3ToZsdxxixbvQNAHqVJrTgi5E5M=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20241021214115-324edc3d5d38 h1:2oV8dfuIkM1Ti7DwXc0BJfnwr9csz4TDXI9EmiI+Rbw=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20241021214115-324edc3d5d38/go.mod h1:vuAjtvlwkDKF6L1GQ0SokiRLCGFfeBUXWr/aFFkHACc=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20241021214115-324edc3d5d38 h1:zciRKQ4kBpFgpfC5QQCVtnnNAcLIqweL7plyZRQHVpI=
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20241021214115-324edc3d5d38/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI=
|
||||
google.golang.org/grpc v1.0.5/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
|
||||
google.golang.org/grpc v1.68.1 h1:oI5oTa11+ng8r8XMMN7jAOmWfPZWbYpCFaMUTACxkM0=
|
||||
google.golang.org/grpc v1.68.1/go.mod h1:+q1XYFJjShcqn0QZHvCyeR4CXPA+llXIeUIfIe00waw=
|
||||
google.golang.org/grpc v1.69.4 h1:MF5TftSMkd8GLw/m0KM6V8CMOCY6NZ1NQDPGFgbTt4A=
|
||||
google.golang.org/grpc v1.69.4/go.mod h1:vyjdE6jLBI76dgpDojsFGNaHlxdjXN9ghpnd2o7JGZ4=
|
||||
google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.5.1 h1:F29+wU6Ee6qgu9TddPgooOdaqsxTMunOoj8KA5yuS5A=
|
||||
google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.5.1/go.mod h1:5KF+wpkbTSbGcR9zteSqZV6fqFOWBl4Yde8En8MryZA=
|
||||
google.golang.org/protobuf v1.35.2 h1:8Ar7bF+apOIoThw1EdZl0p1oWvMqTHmpA2fRTyZO8io=
|
||||
|
@@ -7,7 +7,7 @@ ARG XX_VERSION=1.6.1
|
||||
ARG GOLANGCI_LINT_VERSION=1.62.0
|
||||
ARG GOPLS_VERSION=v0.26.0
|
||||
# disabled: deprecated unusedvariable simplifyrange
|
||||
ARG GOPLS_ANALYZERS="embeddirective fillreturns infertypeargs nonewvars norangeoverfunc noresultvalues simplifycompositelit simplifyslice undeclaredname unusedparams useany"
|
||||
ARG GOPLS_ANALYZERS="embeddirective fillreturns infertypeargs nonewvars noresultvalues simplifycompositelit simplifyslice undeclaredname unusedparams useany"
|
||||
|
||||
FROM --platform=$BUILDPLATFORM tonistiigi/xx:${XX_VERSION} AS xx
|
||||
|
||||
|
12
hack/test
12
hack/test
@@ -2,6 +2,8 @@
|
||||
|
||||
set -eu -o pipefail
|
||||
|
||||
: "${GITHUB_ACTIONS=}"
|
||||
|
||||
: "${BUILDX_CMD=docker buildx}"
|
||||
|
||||
: "${TEST_COVERAGE=}"
|
||||
@@ -37,7 +39,15 @@ if [ "$TEST_COVERAGE" = "1" ]; then
|
||||
export GO_TEST_COVERPROFILE="/testreports/coverage-report$TEST_REPORT_SUFFIX.txt"
|
||||
fi
|
||||
|
||||
cid=$(docker create --rm --privileged \
|
||||
dockerConfigMount=""
|
||||
if [ "$GITHUB_ACTIONS" = "true" ]; then
|
||||
dockerConfigPath="$HOME/.docker/config.json"
|
||||
if [ -f "$dockerConfigPath" ]; then
|
||||
dockerConfigMount="-v $dockerConfigPath:/root/.docker/config.json:ro"
|
||||
fi
|
||||
fi
|
||||
|
||||
cid=$(docker create --rm --privileged $dockerConfigMount \
|
||||
-v /tmp $testReportsVol \
|
||||
--volumes-from=$cacheVolume \
|
||||
-e GITHUB_REF \
|
||||
|
@@ -66,7 +66,7 @@ func (cm *ReloadCmd) Exec(ctx context.Context, args []string) error {
|
||||
if err != nil {
|
||||
var be *controllererrors.BuildError
|
||||
if errors.As(err, &be) {
|
||||
ref = be.Ref
|
||||
ref = be.SessionID
|
||||
resultUpdated = true
|
||||
} else {
|
||||
fmt.Printf("failed to reload: %v\n", err)
|
||||
|
@@ -91,7 +91,7 @@ func (a *Attest) ToPB() *controllerapi.Attest {
|
||||
|
||||
func (a *Attest) MarshalJSON() ([]byte, error) {
|
||||
m := make(map[string]interface{}, len(a.Attrs)+2)
|
||||
for k, v := range m {
|
||||
for k, v := range a.Attrs {
|
||||
m[k] = v
|
||||
}
|
||||
m["type"] = a.Type
|
||||
|
@@ -22,18 +22,19 @@ func (e *Attests) FromCtyValue(in cty.Value, p cty.Path) error {
|
||||
return p.NewErrorf("%s", convert.MismatchMessage(got, want))
|
||||
}
|
||||
|
||||
func (e *Attests) fromCtyValue(in cty.Value, p cty.Path) error {
|
||||
func (e *Attests) fromCtyValue(in cty.Value, p cty.Path) (retErr error) {
|
||||
*e = make([]*Attest, 0, in.LengthInt())
|
||||
for elem := in.ElementIterator(); elem.Next(); {
|
||||
_, value := elem.Element()
|
||||
|
||||
yield := func(value cty.Value) bool {
|
||||
entry := &Attest{}
|
||||
if err := entry.FromCtyValue(value, p); err != nil {
|
||||
return err
|
||||
if retErr = entry.FromCtyValue(value, p); retErr != nil {
|
||||
return false
|
||||
}
|
||||
*e = append(*e, entry)
|
||||
return true
|
||||
}
|
||||
return nil
|
||||
eachElement(in)(yield)
|
||||
return retErr
|
||||
}
|
||||
|
||||
func (e Attests) ToCtyValue() cty.Value {
|
||||
@@ -64,6 +65,10 @@ func (e *Attest) FromCtyValue(in cty.Value, p cty.Path) error {
|
||||
e.Attrs = map[string]string{}
|
||||
for it := conv.ElementIterator(); it.Next(); {
|
||||
k, v := it.Element()
|
||||
if !v.IsKnown() {
|
||||
continue
|
||||
}
|
||||
|
||||
switch key := k.AsString(); key {
|
||||
case "type":
|
||||
e.Type = v.AsString()
|
||||
|
79
util/buildflags/attests_test.go
Normal file
79
util/buildflags/attests_test.go
Normal file
@@ -0,0 +1,79 @@
|
||||
package buildflags
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/zclconf/go-cty/cty"
|
||||
)
|
||||
|
||||
func TestAttests(t *testing.T) {
|
||||
t.Run("MarshalJSON", func(t *testing.T) {
|
||||
attests := Attests{
|
||||
{Type: "provenance", Attrs: map[string]string{"mode": "max"}},
|
||||
{Type: "sbom", Disabled: true},
|
||||
}
|
||||
|
||||
expected := `[{"type":"provenance","mode":"max"},{"type":"sbom","disabled":true}]`
|
||||
actual, err := json.Marshal(attests)
|
||||
require.NoError(t, err)
|
||||
require.JSONEq(t, expected, string(actual))
|
||||
})
|
||||
|
||||
t.Run("UnmarshalJSON", func(t *testing.T) {
|
||||
in := `[{"type":"provenance","mode":"max"},{"type":"sbom","disabled":true}]`
|
||||
|
||||
var actual Attests
|
||||
err := json.Unmarshal([]byte(in), &actual)
|
||||
require.NoError(t, err)
|
||||
|
||||
expected := Attests{
|
||||
{Type: "provenance", Attrs: map[string]string{"mode": "max"}},
|
||||
{Type: "sbom", Disabled: true, Attrs: map[string]string{}},
|
||||
}
|
||||
require.Equal(t, expected, actual)
|
||||
})
|
||||
|
||||
t.Run("FromCtyValue", func(t *testing.T) {
|
||||
in := cty.TupleVal([]cty.Value{
|
||||
cty.ObjectVal(map[string]cty.Value{
|
||||
"type": cty.StringVal("provenance"),
|
||||
"mode": cty.StringVal("max"),
|
||||
}),
|
||||
cty.StringVal("type=sbom,disabled=true"),
|
||||
})
|
||||
|
||||
var actual Attests
|
||||
err := actual.FromCtyValue(in, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
expected := Attests{
|
||||
{Type: "provenance", Attrs: map[string]string{"mode": "max"}},
|
||||
{Type: "sbom", Disabled: true, Attrs: map[string]string{}},
|
||||
}
|
||||
require.Equal(t, expected, actual)
|
||||
})
|
||||
|
||||
t.Run("ToCtyValue", func(t *testing.T) {
|
||||
attests := Attests{
|
||||
{Type: "provenance", Attrs: map[string]string{"mode": "max"}},
|
||||
{Type: "sbom", Disabled: true},
|
||||
}
|
||||
|
||||
actual := attests.ToCtyValue()
|
||||
expected := cty.ListVal([]cty.Value{
|
||||
cty.MapVal(map[string]cty.Value{
|
||||
"type": cty.StringVal("provenance"),
|
||||
"mode": cty.StringVal("max"),
|
||||
}),
|
||||
cty.MapVal(map[string]cty.Value{
|
||||
"type": cty.StringVal("sbom"),
|
||||
"disabled": cty.StringVal("true"),
|
||||
}),
|
||||
})
|
||||
|
||||
result := actual.Equals(expected)
|
||||
require.True(t, result.True())
|
||||
})
|
||||
}
|
@@ -5,6 +5,7 @@ import (
|
||||
"encoding/json"
|
||||
"maps"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
awsconfig "github.com/aws/aws-sdk-go-v2/config"
|
||||
@@ -167,34 +168,69 @@ func (e *CacheOptionsEntry) validate(gv interface{}) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func ParseCacheEntry(in []string) ([]*controllerapi.CacheOptionsEntry, error) {
|
||||
func ParseCacheEntry(in []string) (CacheOptions, error) {
|
||||
if len(in) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
opts := make(CacheOptions, 0, len(in))
|
||||
for _, in := range in {
|
||||
if !strings.Contains(in, "=") {
|
||||
// This is ref only format. Each field in the CSV is its own entry.
|
||||
fields, err := csvvalue.Fields(in, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
for _, field := range fields {
|
||||
opt := CacheOptionsEntry{}
|
||||
if err := opt.UnmarshalText([]byte(field)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
opts = append(opts, &opt)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
var out CacheOptionsEntry
|
||||
if err := out.UnmarshalText([]byte(in)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
opts = append(opts, &out)
|
||||
}
|
||||
return opts.ToPB(), nil
|
||||
return opts, nil
|
||||
}
|
||||
|
||||
func addGithubToken(ci *controllerapi.CacheOptionsEntry) {
|
||||
if ci.Type != "gha" {
|
||||
return
|
||||
}
|
||||
version, ok := ci.Attrs["version"]
|
||||
if !ok {
|
||||
// https://github.com/actions/toolkit/blob/2b08dc18f261b9fdd978b70279b85cbef81af8bc/packages/cache/src/internal/config.ts#L19
|
||||
if v, ok := os.LookupEnv("ACTIONS_CACHE_SERVICE_V2"); ok {
|
||||
if b, err := strconv.ParseBool(v); err == nil && b {
|
||||
version = "2"
|
||||
}
|
||||
}
|
||||
}
|
||||
if _, ok := ci.Attrs["token"]; !ok {
|
||||
if v, ok := os.LookupEnv("ACTIONS_RUNTIME_TOKEN"); ok {
|
||||
ci.Attrs["token"] = v
|
||||
}
|
||||
}
|
||||
if _, ok := ci.Attrs["url_v2"]; !ok && version == "2" {
|
||||
// https://github.com/actions/toolkit/blob/2b08dc18f261b9fdd978b70279b85cbef81af8bc/packages/cache/src/internal/config.ts#L34-L35
|
||||
if v, ok := os.LookupEnv("ACTIONS_RESULTS_URL"); ok {
|
||||
ci.Attrs["url_v2"] = v
|
||||
}
|
||||
}
|
||||
if _, ok := ci.Attrs["url"]; !ok {
|
||||
// https://github.com/actions/toolkit/blob/2b08dc18f261b9fdd978b70279b85cbef81af8bc/packages/cache/src/internal/config.ts#L28-L33
|
||||
if v, ok := os.LookupEnv("ACTIONS_CACHE_URL"); ok {
|
||||
ci.Attrs["url"] = v
|
||||
} else if v, ok := os.LookupEnv("ACTIONS_RESULTS_URL"); ok {
|
||||
ci.Attrs["url"] = v
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -234,5 +270,5 @@ func isActive(pb *controllerapi.CacheOptionsEntry) bool {
|
||||
if pb.Type != "gha" {
|
||||
return true
|
||||
}
|
||||
return pb.Attrs["token"] != "" && pb.Attrs["url"] != ""
|
||||
return pb.Attrs["token"] != "" && (pb.Attrs["url"] != "" || pb.Attrs["url_v2"] != "")
|
||||
}
|
||||
|
@@ -21,22 +21,30 @@ func (o *CacheOptions) FromCtyValue(in cty.Value, p cty.Path) error {
|
||||
return p.NewErrorf("%s", convert.MismatchMessage(got, want))
|
||||
}
|
||||
|
||||
func (o *CacheOptions) fromCtyValue(in cty.Value, p cty.Path) error {
|
||||
func (o *CacheOptions) fromCtyValue(in cty.Value, p cty.Path) (retErr error) {
|
||||
*o = make([]*CacheOptionsEntry, 0, in.LengthInt())
|
||||
for elem := in.ElementIterator(); elem.Next(); {
|
||||
_, value := elem.Element()
|
||||
|
||||
if isEmpty(value) {
|
||||
continue
|
||||
yield := func(value cty.Value) bool {
|
||||
// Special handling for a string type to handle ref only format.
|
||||
if value.Type() == cty.String {
|
||||
var entries CacheOptions
|
||||
entries, retErr = ParseCacheEntry([]string{value.AsString()})
|
||||
if retErr != nil {
|
||||
return false
|
||||
}
|
||||
*o = append(*o, entries...)
|
||||
return true
|
||||
}
|
||||
|
||||
entry := &CacheOptionsEntry{}
|
||||
if err := entry.FromCtyValue(value, p); err != nil {
|
||||
return err
|
||||
if retErr = entry.FromCtyValue(value, p); retErr != nil {
|
||||
return false
|
||||
}
|
||||
*o = append(*o, entry)
|
||||
return true
|
||||
}
|
||||
return nil
|
||||
eachElement(in)(yield)
|
||||
return retErr
|
||||
}
|
||||
|
||||
func (o CacheOptions) ToCtyValue() cty.Value {
|
||||
@@ -52,13 +60,6 @@ func (o CacheOptions) ToCtyValue() cty.Value {
|
||||
}
|
||||
|
||||
func (o *CacheOptionsEntry) FromCtyValue(in cty.Value, p cty.Path) error {
|
||||
if in.Type() == cty.String {
|
||||
if err := o.UnmarshalText([]byte(in.AsString())); err != nil {
|
||||
return p.NewError(err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
conv, err := convert.Convert(in, cty.Map(cty.String))
|
||||
if err != nil {
|
||||
return err
|
||||
|
@@ -1,10 +1,12 @@
|
||||
package buildflags
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/buildx/controller/pb"
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/zclconf/go-cty/cty"
|
||||
)
|
||||
|
||||
func TestCacheOptions_DerivedVars(t *testing.T) {
|
||||
@@ -35,5 +37,84 @@ func TestCacheOptions_DerivedVars(t *testing.T) {
|
||||
"session_token": "not_a_mitm_attack",
|
||||
},
|
||||
},
|
||||
}, cacheFrom)
|
||||
}, cacheFrom.ToPB())
|
||||
}
|
||||
|
||||
func TestCacheOptions(t *testing.T) {
|
||||
t.Run("MarshalJSON", func(t *testing.T) {
|
||||
cache := CacheOptions{
|
||||
{Type: "registry", Attrs: map[string]string{"ref": "user/app:cache"}},
|
||||
{Type: "local", Attrs: map[string]string{"src": "path/to/cache"}},
|
||||
}
|
||||
|
||||
expected := `[{"type":"registry","ref":"user/app:cache"},{"type":"local","src":"path/to/cache"}]`
|
||||
actual, err := json.Marshal(cache)
|
||||
require.NoError(t, err)
|
||||
require.JSONEq(t, expected, string(actual))
|
||||
})
|
||||
|
||||
t.Run("UnmarshalJSON", func(t *testing.T) {
|
||||
in := `[{"type":"registry","ref":"user/app:cache"},{"type":"local","src":"path/to/cache"}]`
|
||||
|
||||
var actual CacheOptions
|
||||
err := json.Unmarshal([]byte(in), &actual)
|
||||
require.NoError(t, err)
|
||||
|
||||
expected := CacheOptions{
|
||||
{Type: "registry", Attrs: map[string]string{"ref": "user/app:cache"}},
|
||||
{Type: "local", Attrs: map[string]string{"src": "path/to/cache"}},
|
||||
}
|
||||
require.Equal(t, expected, actual)
|
||||
})
|
||||
|
||||
t.Run("FromCtyValue", func(t *testing.T) {
|
||||
in := cty.TupleVal([]cty.Value{
|
||||
cty.ObjectVal(map[string]cty.Value{
|
||||
"type": cty.StringVal("registry"),
|
||||
"ref": cty.StringVal("user/app:cache"),
|
||||
}),
|
||||
cty.StringVal("type=local,src=path/to/cache"),
|
||||
})
|
||||
|
||||
var actual CacheOptions
|
||||
err := actual.FromCtyValue(in, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
expected := CacheOptions{
|
||||
{Type: "registry", Attrs: map[string]string{"ref": "user/app:cache"}},
|
||||
{Type: "local", Attrs: map[string]string{"src": "path/to/cache"}},
|
||||
}
|
||||
require.Equal(t, expected, actual)
|
||||
})
|
||||
|
||||
t.Run("ToCtyValue", func(t *testing.T) {
|
||||
attests := CacheOptions{
|
||||
{Type: "registry", Attrs: map[string]string{"ref": "user/app:cache"}},
|
||||
{Type: "local", Attrs: map[string]string{"src": "path/to/cache"}},
|
||||
}
|
||||
|
||||
actual := attests.ToCtyValue()
|
||||
expected := cty.ListVal([]cty.Value{
|
||||
cty.MapVal(map[string]cty.Value{
|
||||
"type": cty.StringVal("registry"),
|
||||
"ref": cty.StringVal("user/app:cache"),
|
||||
}),
|
||||
cty.MapVal(map[string]cty.Value{
|
||||
"type": cty.StringVal("local"),
|
||||
"src": cty.StringVal("path/to/cache"),
|
||||
}),
|
||||
})
|
||||
|
||||
result := actual.Equals(expected)
|
||||
require.True(t, result.True())
|
||||
})
|
||||
}
|
||||
|
||||
func TestCacheOptions_RefOnlyFormat(t *testing.T) {
|
||||
opts, err := ParseCacheEntry([]string{"ref1", "ref2"})
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, CacheOptions{
|
||||
{Type: "registry", Attrs: map[string]string{"ref": "ref1"}},
|
||||
{Type: "registry", Attrs: map[string]string{"ref": "ref2"}},
|
||||
}, opts)
|
||||
}
|
||||
|
@@ -1,19 +1,24 @@
|
||||
package buildflags
|
||||
|
||||
import "github.com/moby/buildkit/util/entitlements"
|
||||
import (
|
||||
"log"
|
||||
|
||||
func ParseEntitlements(in []string) ([]entitlements.Entitlement, error) {
|
||||
out := make([]entitlements.Entitlement, 0, len(in))
|
||||
"github.com/moby/buildkit/util/entitlements"
|
||||
)
|
||||
|
||||
func ParseEntitlements(in []string) ([]string, error) {
|
||||
out := make([]string, 0, len(in))
|
||||
log.Printf("in: %#v", in)
|
||||
for _, v := range in {
|
||||
if v == "" {
|
||||
continue
|
||||
}
|
||||
|
||||
e, err := entitlements.Parse(v)
|
||||
if err != nil {
|
||||
if _, _, err := entitlements.Parse(v); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
out = append(out, e)
|
||||
out = append(out, v)
|
||||
}
|
||||
log.Printf("Parsed entitlements: %v", out)
|
||||
return out, nil
|
||||
}
|
||||
|
@@ -21,22 +21,19 @@ func (e *Exports) FromCtyValue(in cty.Value, p cty.Path) error {
|
||||
return p.NewErrorf("%s", convert.MismatchMessage(got, want))
|
||||
}
|
||||
|
||||
func (e *Exports) fromCtyValue(in cty.Value, p cty.Path) error {
|
||||
func (e *Exports) fromCtyValue(in cty.Value, p cty.Path) (retErr error) {
|
||||
*e = make([]*ExportEntry, 0, in.LengthInt())
|
||||
for elem := in.ElementIterator(); elem.Next(); {
|
||||
_, value := elem.Element()
|
||||
|
||||
if isEmpty(value) {
|
||||
continue
|
||||
}
|
||||
|
||||
yield := func(value cty.Value) bool {
|
||||
entry := &ExportEntry{}
|
||||
if err := entry.FromCtyValue(value, p); err != nil {
|
||||
return err
|
||||
if retErr = entry.FromCtyValue(value, p); retErr != nil {
|
||||
return false
|
||||
}
|
||||
*e = append(*e, entry)
|
||||
return true
|
||||
}
|
||||
return nil
|
||||
eachElement(in)(yield)
|
||||
return retErr
|
||||
}
|
||||
|
||||
func (e Exports) ToCtyValue() cty.Value {
|
||||
|
@@ -1,6 +1,7 @@
|
||||
package buildflags
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"strings"
|
||||
|
||||
controllerapi "github.com/docker/buildx/controller/pb"
|
||||
@@ -73,6 +74,22 @@ func (s *Secret) ToPB() *controllerapi.Secret {
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Secret) UnmarshalJSON(data []byte) error {
|
||||
var v struct {
|
||||
ID string `json:"id,omitempty"`
|
||||
FilePath string `json:"src,omitempty"`
|
||||
Env string `json:"env,omitempty"`
|
||||
}
|
||||
if err := json.Unmarshal(data, &v); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s.ID = v.ID
|
||||
s.FilePath = v.FilePath
|
||||
s.Env = v.Env
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Secret) UnmarshalText(text []byte) error {
|
||||
value := string(text)
|
||||
fields, err := csvvalue.Fields(value, nil)
|
||||
|
@@ -28,22 +28,19 @@ func (s *Secrets) FromCtyValue(in cty.Value, p cty.Path) error {
|
||||
return p.NewErrorf("%s", convert.MismatchMessage(got, want))
|
||||
}
|
||||
|
||||
func (s *Secrets) fromCtyValue(in cty.Value, p cty.Path) error {
|
||||
func (s *Secrets) fromCtyValue(in cty.Value, p cty.Path) (retErr error) {
|
||||
*s = make([]*Secret, 0, in.LengthInt())
|
||||
for elem := in.ElementIterator(); elem.Next(); {
|
||||
_, value := elem.Element()
|
||||
|
||||
if isEmpty(value) {
|
||||
continue
|
||||
}
|
||||
|
||||
yield := func(value cty.Value) bool {
|
||||
entry := &Secret{}
|
||||
if err := entry.FromCtyValue(value, p); err != nil {
|
||||
return err
|
||||
if retErr = entry.FromCtyValue(value, p); retErr != nil {
|
||||
return false
|
||||
}
|
||||
*s = append(*s, entry)
|
||||
return true
|
||||
}
|
||||
return nil
|
||||
eachElement(in)(yield)
|
||||
return retErr
|
||||
}
|
||||
|
||||
func (s Secrets) ToCtyValue() cty.Value {
|
||||
@@ -71,13 +68,13 @@ func (e *Secret) FromCtyValue(in cty.Value, p cty.Path) error {
|
||||
return err
|
||||
}
|
||||
|
||||
if id := conv.GetAttr("id"); !id.IsNull() {
|
||||
if id := conv.GetAttr("id"); !id.IsNull() && id.IsKnown() {
|
||||
e.ID = id.AsString()
|
||||
}
|
||||
if src := conv.GetAttr("src"); !src.IsNull() {
|
||||
if src := conv.GetAttr("src"); !src.IsNull() && src.IsKnown() {
|
||||
e.FilePath = src.AsString()
|
||||
}
|
||||
if env := conv.GetAttr("env"); !env.IsNull() {
|
||||
if env := conv.GetAttr("env"); !env.IsNull() && env.IsKnown() {
|
||||
e.Env = env.AsString()
|
||||
}
|
||||
return nil
|
||||
|
84
util/buildflags/secrets_test.go
Normal file
84
util/buildflags/secrets_test.go
Normal file
@@ -0,0 +1,84 @@
|
||||
package buildflags
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/zclconf/go-cty/cty"
|
||||
)
|
||||
|
||||
func TestSecrets(t *testing.T) {
|
||||
t.Run("MarshalJSON", func(t *testing.T) {
|
||||
secrets := Secrets{
|
||||
{ID: "mysecret", FilePath: "/local/secret"},
|
||||
{ID: "mysecret2", Env: "TOKEN"},
|
||||
}
|
||||
|
||||
expected := `[{"id":"mysecret","src":"/local/secret"},{"id":"mysecret2","env":"TOKEN"}]`
|
||||
actual, err := json.Marshal(secrets)
|
||||
require.NoError(t, err)
|
||||
require.JSONEq(t, expected, string(actual))
|
||||
})
|
||||
|
||||
t.Run("UnmarshalJSON", func(t *testing.T) {
|
||||
in := `[{"id":"mysecret","src":"/local/secret"},{"id":"mysecret2","env":"TOKEN"}]`
|
||||
|
||||
var actual Secrets
|
||||
err := json.Unmarshal([]byte(in), &actual)
|
||||
require.NoError(t, err)
|
||||
|
||||
expected := Secrets{
|
||||
{ID: "mysecret", FilePath: "/local/secret"},
|
||||
{ID: "mysecret2", Env: "TOKEN"},
|
||||
}
|
||||
require.Equal(t, expected, actual)
|
||||
})
|
||||
|
||||
t.Run("FromCtyValue", func(t *testing.T) {
|
||||
in := cty.TupleVal([]cty.Value{
|
||||
cty.ObjectVal(map[string]cty.Value{
|
||||
"id": cty.StringVal("mysecret"),
|
||||
"src": cty.StringVal("/local/secret"),
|
||||
}),
|
||||
cty.ObjectVal(map[string]cty.Value{
|
||||
"id": cty.StringVal("mysecret2"),
|
||||
"env": cty.StringVal("TOKEN"),
|
||||
}),
|
||||
})
|
||||
|
||||
var actual Secrets
|
||||
err := actual.FromCtyValue(in, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
expected := Secrets{
|
||||
{ID: "mysecret", FilePath: "/local/secret"},
|
||||
{ID: "mysecret2", Env: "TOKEN"},
|
||||
}
|
||||
require.Equal(t, expected, actual)
|
||||
})
|
||||
|
||||
t.Run("ToCtyValue", func(t *testing.T) {
|
||||
secrets := Secrets{
|
||||
{ID: "mysecret", FilePath: "/local/secret"},
|
||||
{ID: "mysecret2", Env: "TOKEN"},
|
||||
}
|
||||
|
||||
actual := secrets.ToCtyValue()
|
||||
expected := cty.ListVal([]cty.Value{
|
||||
cty.ObjectVal(map[string]cty.Value{
|
||||
"id": cty.StringVal("mysecret"),
|
||||
"src": cty.StringVal("/local/secret"),
|
||||
"env": cty.StringVal(""),
|
||||
}),
|
||||
cty.ObjectVal(map[string]cty.Value{
|
||||
"id": cty.StringVal("mysecret2"),
|
||||
"src": cty.StringVal(""),
|
||||
"env": cty.StringVal("TOKEN"),
|
||||
}),
|
||||
})
|
||||
|
||||
result := actual.Equals(expected)
|
||||
require.True(t, result.True())
|
||||
})
|
||||
}
|
@@ -2,6 +2,7 @@ package buildflags
|
||||
|
||||
import (
|
||||
"cmp"
|
||||
"encoding/json"
|
||||
"slices"
|
||||
"strings"
|
||||
|
||||
@@ -76,6 +77,20 @@ func (s *SSH) ToPB() *controllerapi.SSH {
|
||||
}
|
||||
}
|
||||
|
||||
func (s *SSH) UnmarshalJSON(data []byte) error {
|
||||
var v struct {
|
||||
ID string `json:"id,omitempty"`
|
||||
Paths []string `json:"paths,omitempty"`
|
||||
}
|
||||
if err := json.Unmarshal(data, &v); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
s.ID = v.ID
|
||||
s.Paths = v.Paths
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *SSH) UnmarshalText(text []byte) error {
|
||||
parts := strings.SplitN(string(text), "=", 2)
|
||||
|
||||
|
@@ -28,22 +28,19 @@ func (s *SSHKeys) FromCtyValue(in cty.Value, p cty.Path) error {
|
||||
return p.NewErrorf("%s", convert.MismatchMessage(got, want))
|
||||
}
|
||||
|
||||
func (s *SSHKeys) fromCtyValue(in cty.Value, p cty.Path) error {
|
||||
func (s *SSHKeys) fromCtyValue(in cty.Value, p cty.Path) (retErr error) {
|
||||
*s = make([]*SSH, 0, in.LengthInt())
|
||||
for elem := in.ElementIterator(); elem.Next(); {
|
||||
_, value := elem.Element()
|
||||
|
||||
if isEmpty(value) {
|
||||
continue
|
||||
}
|
||||
|
||||
yield := func(value cty.Value) bool {
|
||||
entry := &SSH{}
|
||||
if err := entry.FromCtyValue(value, p); err != nil {
|
||||
return err
|
||||
if retErr = entry.FromCtyValue(value, p); retErr != nil {
|
||||
return false
|
||||
}
|
||||
*s = append(*s, entry)
|
||||
return true
|
||||
}
|
||||
return nil
|
||||
eachElement(in)(yield)
|
||||
return retErr
|
||||
}
|
||||
|
||||
func (s SSHKeys) ToCtyValue() cty.Value {
|
||||
@@ -71,10 +68,10 @@ func (e *SSH) FromCtyValue(in cty.Value, p cty.Path) error {
|
||||
return err
|
||||
}
|
||||
|
||||
if id := conv.GetAttr("id"); !id.IsNull() {
|
||||
if id := conv.GetAttr("id"); !id.IsNull() && id.IsKnown() {
|
||||
e.ID = id.AsString()
|
||||
}
|
||||
if paths := conv.GetAttr("paths"); !paths.IsNull() {
|
||||
if paths := conv.GetAttr("paths"); !paths.IsNull() && paths.IsKnown() {
|
||||
if err := gocty.FromCtyValue(paths, &e.Paths); err != nil {
|
||||
return err
|
||||
}
|
||||
|
85
util/buildflags/ssh_test.go
Normal file
85
util/buildflags/ssh_test.go
Normal file
@@ -0,0 +1,85 @@
|
||||
package buildflags
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
"github.com/zclconf/go-cty/cty"
|
||||
)
|
||||
|
||||
func TestSSHKeys(t *testing.T) {
|
||||
t.Run("MarshalJSON", func(t *testing.T) {
|
||||
sshkeys := SSHKeys{
|
||||
{ID: "default", Paths: []string{}},
|
||||
{ID: "key", Paths: []string{"path/to/key"}},
|
||||
}
|
||||
|
||||
expected := `[{"id":"default"},{"id":"key","paths":["path/to/key"]}]`
|
||||
actual, err := json.Marshal(sshkeys)
|
||||
require.NoError(t, err)
|
||||
require.JSONEq(t, expected, string(actual))
|
||||
})
|
||||
|
||||
t.Run("UnmarshalJSON", func(t *testing.T) {
|
||||
in := `[{"id":"default"},{"id":"key","paths":["path/to/key"]}]`
|
||||
|
||||
var actual SSHKeys
|
||||
err := json.Unmarshal([]byte(in), &actual)
|
||||
require.NoError(t, err)
|
||||
|
||||
expected := SSHKeys{
|
||||
{ID: "default"},
|
||||
{ID: "key", Paths: []string{"path/to/key"}},
|
||||
}
|
||||
require.Equal(t, expected, actual)
|
||||
})
|
||||
|
||||
t.Run("FromCtyValue", func(t *testing.T) {
|
||||
in := cty.TupleVal([]cty.Value{
|
||||
cty.ObjectVal(map[string]cty.Value{
|
||||
"id": cty.StringVal("default"),
|
||||
}),
|
||||
cty.ObjectVal(map[string]cty.Value{
|
||||
"id": cty.StringVal("key"),
|
||||
"paths": cty.TupleVal([]cty.Value{
|
||||
cty.StringVal("path/to/key"),
|
||||
}),
|
||||
}),
|
||||
})
|
||||
|
||||
var actual SSHKeys
|
||||
err := actual.FromCtyValue(in, nil)
|
||||
require.NoError(t, err)
|
||||
|
||||
expected := SSHKeys{
|
||||
{ID: "default"},
|
||||
{ID: "key", Paths: []string{"path/to/key"}},
|
||||
}
|
||||
require.Equal(t, expected, actual)
|
||||
})
|
||||
|
||||
t.Run("ToCtyValue", func(t *testing.T) {
|
||||
sshkeys := SSHKeys{
|
||||
{ID: "default", Paths: []string{}},
|
||||
{ID: "key", Paths: []string{"path/to/key"}},
|
||||
}
|
||||
|
||||
actual := sshkeys.ToCtyValue()
|
||||
expected := cty.ListVal([]cty.Value{
|
||||
cty.ObjectVal(map[string]cty.Value{
|
||||
"id": cty.StringVal("default"),
|
||||
"paths": cty.ListValEmpty(cty.String),
|
||||
}),
|
||||
cty.ObjectVal(map[string]cty.Value{
|
||||
"id": cty.StringVal("key"),
|
||||
"paths": cty.ListVal([]cty.Value{
|
||||
cty.StringVal("path/to/key"),
|
||||
}),
|
||||
}),
|
||||
})
|
||||
|
||||
result := actual.Equals(expected)
|
||||
require.True(t, result.True())
|
||||
})
|
||||
}
|
@@ -34,7 +34,7 @@ func removeDupes[E comparable[E]](s []E) []E {
|
||||
}
|
||||
|
||||
func getAndDelete(m map[string]cty.Value, attr string, gv interface{}) error {
|
||||
if v, ok := m[attr]; ok {
|
||||
if v, ok := m[attr]; ok && v.IsKnown() {
|
||||
delete(m, attr)
|
||||
return gocty.FromCtyValue(v, gv)
|
||||
}
|
||||
@@ -44,11 +44,33 @@ func getAndDelete(m map[string]cty.Value, attr string, gv interface{}) error {
|
||||
func asMap(m map[string]cty.Value) map[string]string {
|
||||
out := make(map[string]string, len(m))
|
||||
for k, v := range m {
|
||||
out[k] = v.AsString()
|
||||
if v.IsKnown() {
|
||||
out[k] = v.AsString()
|
||||
}
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func isEmpty(v cty.Value) bool {
|
||||
return v.Type() == cty.String && v.AsString() == ""
|
||||
func isEmptyOrUnknown(v cty.Value) bool {
|
||||
return !v.IsKnown() || (v.Type() == cty.String && v.AsString() == "")
|
||||
}
|
||||
|
||||
// Seq is a temporary definition of iter.Seq until we are able to migrate
|
||||
// to using go1.23 as our minimum version. This can be removed when go1.24
|
||||
// is released and usages can be changed to use rangefunc.
|
||||
type Seq[V any] func(yield func(V) bool)
|
||||
|
||||
func eachElement(in cty.Value) Seq[cty.Value] {
|
||||
return func(yield func(v cty.Value) bool) {
|
||||
for elem := in.ElementIterator(); elem.Next(); {
|
||||
_, value := elem.Element()
|
||||
if isEmptyOrUnknown(value) {
|
||||
continue
|
||||
}
|
||||
|
||||
if !yield(value) {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@@ -8,7 +8,7 @@ import (
|
||||
"sync"
|
||||
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/docker/pkg/ioutils"
|
||||
"github.com/docker/docker/pkg/atomicwriter"
|
||||
"github.com/moby/buildkit/cmd/buildkitd/config"
|
||||
"github.com/pelletier/go-toml"
|
||||
"github.com/pkg/errors"
|
||||
@@ -106,7 +106,7 @@ func (c *Config) MkdirAll(dir string, perm os.FileMode) error {
|
||||
// AtomicWriteFile writes data to a file within the config dir atomically
|
||||
func (c *Config) AtomicWriteFile(filename string, data []byte, perm os.FileMode) error {
|
||||
f := filepath.Join(c.dir, filename)
|
||||
if err := ioutils.AtomicWriteFile(f, data, perm); err != nil {
|
||||
if err := atomicwriter.WriteFile(f, data, perm); err != nil {
|
||||
return err
|
||||
}
|
||||
if c.chowner == nil {
|
||||
|
@@ -28,13 +28,14 @@ func BuildBackendEnabled() bool {
|
||||
return bbEnabled
|
||||
}
|
||||
|
||||
func BuildURL(ref string) string {
|
||||
return fmt.Sprintf("docker-desktop://dashboard/build/%s", ref)
|
||||
}
|
||||
|
||||
func BuildDetailsOutput(refs map[string]string, term bool) string {
|
||||
if len(refs) == 0 {
|
||||
return ""
|
||||
}
|
||||
refURL := func(ref string) string {
|
||||
return fmt.Sprintf("docker-desktop://dashboard/build/%s", ref)
|
||||
}
|
||||
var out bytes.Buffer
|
||||
out.WriteString("View build details: ")
|
||||
multiTargets := len(refs) > 1
|
||||
@@ -43,9 +44,10 @@ func BuildDetailsOutput(refs map[string]string, term bool) string {
|
||||
out.WriteString(fmt.Sprintf("\n %s: ", target))
|
||||
}
|
||||
if term {
|
||||
out.WriteString(hyperlink(refURL(ref)))
|
||||
url := BuildURL(ref)
|
||||
out.WriteString(ANSIHyperlink(url, url))
|
||||
} else {
|
||||
out.WriteString(refURL(ref))
|
||||
out.WriteString(BuildURL(ref))
|
||||
}
|
||||
}
|
||||
return out.String()
|
||||
@@ -57,9 +59,9 @@ func PrintBuildDetails(w io.Writer, refs map[string]string, term bool) {
|
||||
}
|
||||
}
|
||||
|
||||
func hyperlink(url string) string {
|
||||
func ANSIHyperlink(url, text string) string {
|
||||
// create an escape sequence using the OSC 8 format: https://gist.github.com/egmontkob/eb114294efbcd5adb1944c9f3cb5feda
|
||||
return fmt.Sprintf("\033]8;;%s\033\\%s\033]8;;\033\\", url, url)
|
||||
return fmt.Sprintf("\033]8;;%s\033\\%s\033]8;;\033\\", url, text)
|
||||
}
|
||||
|
||||
type ErrorWithBuildRef struct {
|
||||
|
@@ -3,12 +3,12 @@ package dockerutil
|
||||
import (
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/cli/cli/context/docker"
|
||||
"github.com/docker/docker/client"
|
||||
dockerclient "github.com/docker/docker/client"
|
||||
)
|
||||
|
||||
// ClientAPI represents an active docker API object.
|
||||
type ClientAPI struct {
|
||||
client.APIClient
|
||||
dockerclient.APIClient
|
||||
}
|
||||
|
||||
func NewClientAPI(cli command.Cli, ep string) (*ClientAPI, error) {
|
||||
@@ -36,7 +36,7 @@ func NewClientAPI(cli command.Cli, ep string) (*ClientAPI, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
ca.APIClient, err = client.NewClientWithOpts(clientOpts...)
|
||||
ca.APIClient, err = dockerclient.NewClientWithOpts(clientOpts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@@ -7,7 +7,7 @@ import (
|
||||
|
||||
"github.com/docker/buildx/util/progress"
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/docker/client"
|
||||
dockerclient "github.com/docker/docker/client"
|
||||
)
|
||||
|
||||
// Client represents an active docker object.
|
||||
@@ -24,7 +24,7 @@ func NewClient(cli command.Cli) *Client {
|
||||
}
|
||||
|
||||
// API returns a new docker API client.
|
||||
func (c *Client) API(name string) (client.APIClient, error) {
|
||||
func (c *Client) API(name string) (dockerclient.APIClient, error) {
|
||||
if name == "" {
|
||||
name = c.cli.CurrentContext()
|
||||
}
|
||||
@@ -52,7 +52,7 @@ func (c *Client) LoadImage(ctx context.Context, name string, status progress.Wri
|
||||
w.mu.Unlock()
|
||||
}
|
||||
|
||||
resp, err := dapi.ImageLoad(ctx, pr, false)
|
||||
resp, err := dapi.ImageLoad(ctx, pr)
|
||||
defer close(done)
|
||||
if err != nil {
|
||||
handleErr(err)
|
||||
|
11125
util/otelutil/fixtures/bktraces.json
Normal file
11125
util/otelutil/fixtures/bktraces.json
Normal file
File diff suppressed because it is too large
Load Diff
9542
util/otelutil/fixtures/jaeger.json
Normal file
9542
util/otelutil/fixtures/jaeger.json
Normal file
File diff suppressed because it is too large
Load Diff
11127
util/otelutil/fixtures/otlp.json
Normal file
11127
util/otelutil/fixtures/otlp.json
Normal file
File diff suppressed because it is too large
Load Diff
45
util/otelutil/jaeger.go
Normal file
45
util/otelutil/jaeger.go
Normal file
@@ -0,0 +1,45 @@
|
||||
package otelutil
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/docker/buildx/util/otelutil/jaeger"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/sdk/resource"
|
||||
)
|
||||
|
||||
type JaegerData struct {
|
||||
Data []jaeger.Trace `json:"data"`
|
||||
}
|
||||
|
||||
// JaegerData return Jaeger data compatible with ui import feature.
|
||||
// https://github.com/jaegertracing/jaeger-ui/issues/381#issuecomment-494150826
|
||||
func (s Spans) JaegerData() JaegerData {
|
||||
roSpans := s.ReadOnlySpans()
|
||||
|
||||
// fetch default service.name from default resource for backup
|
||||
var defaultServiceName string
|
||||
defaultResource := resource.Default()
|
||||
if value, exists := defaultResource.Set().Value(attribute.Key("service.name")); exists {
|
||||
defaultServiceName = value.AsString()
|
||||
}
|
||||
|
||||
data := jaeger.Trace{
|
||||
TraceID: jaeger.TraceID(roSpans[0].SpanContext().TraceID().String()),
|
||||
Processes: make(map[jaeger.ProcessID]jaeger.Process),
|
||||
Spans: []jaeger.Span{},
|
||||
}
|
||||
for i := range roSpans {
|
||||
ss := roSpans[i]
|
||||
pid := jaeger.ProcessID(fmt.Sprintf("p%d", i))
|
||||
data.Processes[pid] = jaeger.ResourceToProcess(ss.Resource(), defaultServiceName)
|
||||
span := jaeger.ConvertSpan(ss)
|
||||
span.Process = nil
|
||||
span.ProcessID = pid
|
||||
data.Spans = append(data.Spans, span)
|
||||
}
|
||||
|
||||
return JaegerData{
|
||||
Data: []jaeger.Trace{data},
|
||||
}
|
||||
}
|
224
util/otelutil/jaeger/convert.go
Normal file
224
util/otelutil/jaeger/convert.go
Normal file
@@ -0,0 +1,224 @@
|
||||
package jaeger
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"time"
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/codes"
|
||||
"go.opentelemetry.io/otel/sdk/resource"
|
||||
tracesdk "go.opentelemetry.io/otel/sdk/trace"
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
)
|
||||
|
||||
const (
|
||||
keyInstrumentationLibraryName = "otel.library.name"
|
||||
keyInstrumentationLibraryVersion = "otel.library.version"
|
||||
keyError = "error"
|
||||
keySpanKind = "span.kind"
|
||||
keyStatusCode = "otel.status_code"
|
||||
keyStatusMessage = "otel.status_description"
|
||||
keyDroppedAttributeCount = "otel.event.dropped_attributes_count"
|
||||
keyEventName = "event"
|
||||
)
|
||||
|
||||
func ResourceToProcess(res *resource.Resource, defaultServiceName string) Process {
|
||||
var process Process
|
||||
var serviceName attribute.KeyValue
|
||||
if res != nil {
|
||||
for iter := res.Iter(); iter.Next(); {
|
||||
if iter.Attribute().Key == attribute.Key("service.name") {
|
||||
serviceName = iter.Attribute()
|
||||
// Don't convert service.name into tag.
|
||||
continue
|
||||
}
|
||||
if tag := keyValueToJaegerTag(iter.Attribute()); tag != nil {
|
||||
process.Tags = append(process.Tags, *tag)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// If no service.name is contained in a Span's Resource,
|
||||
// that field MUST be populated from the default Resource.
|
||||
if serviceName.Value.AsString() == "" {
|
||||
serviceName = attribute.Key("service.version").String(defaultServiceName)
|
||||
}
|
||||
process.ServiceName = serviceName.Value.AsString()
|
||||
|
||||
return process
|
||||
}
|
||||
|
||||
func ConvertSpan(ss tracesdk.ReadOnlySpan) Span {
|
||||
attr := ss.Attributes()
|
||||
tags := make([]KeyValue, 0, len(attr))
|
||||
for _, kv := range attr {
|
||||
tag := keyValueToJaegerTag(kv)
|
||||
if tag != nil {
|
||||
tags = append(tags, *tag)
|
||||
}
|
||||
}
|
||||
|
||||
if is := ss.InstrumentationScope(); is.Name != "" {
|
||||
tags = append(tags, getStringTag(keyInstrumentationLibraryName, is.Name))
|
||||
if is.Version != "" {
|
||||
tags = append(tags, getStringTag(keyInstrumentationLibraryVersion, is.Version))
|
||||
}
|
||||
}
|
||||
|
||||
if ss.SpanKind() != trace.SpanKindInternal {
|
||||
tags = append(tags,
|
||||
getStringTag(keySpanKind, ss.SpanKind().String()),
|
||||
)
|
||||
}
|
||||
|
||||
if ss.Status().Code != codes.Unset {
|
||||
switch ss.Status().Code {
|
||||
case codes.Ok:
|
||||
tags = append(tags, getStringTag(keyStatusCode, "OK"))
|
||||
case codes.Error:
|
||||
tags = append(tags, getBoolTag(keyError, true))
|
||||
tags = append(tags, getStringTag(keyStatusCode, "ERROR"))
|
||||
}
|
||||
if ss.Status().Description != "" {
|
||||
tags = append(tags, getStringTag(keyStatusMessage, ss.Status().Description))
|
||||
}
|
||||
}
|
||||
|
||||
var logs []Log
|
||||
for _, a := range ss.Events() {
|
||||
nTags := len(a.Attributes)
|
||||
if a.Name != "" {
|
||||
nTags++
|
||||
}
|
||||
if a.DroppedAttributeCount != 0 {
|
||||
nTags++
|
||||
}
|
||||
fields := make([]KeyValue, 0, nTags)
|
||||
if a.Name != "" {
|
||||
// If an event contains an attribute with the same key, it needs
|
||||
// to be given precedence and overwrite this.
|
||||
fields = append(fields, getStringTag(keyEventName, a.Name))
|
||||
}
|
||||
for _, kv := range a.Attributes {
|
||||
tag := keyValueToJaegerTag(kv)
|
||||
if tag != nil {
|
||||
fields = append(fields, *tag)
|
||||
}
|
||||
}
|
||||
if a.DroppedAttributeCount != 0 {
|
||||
fields = append(fields, getInt64Tag(keyDroppedAttributeCount, int64(a.DroppedAttributeCount)))
|
||||
}
|
||||
logs = append(logs, Log{
|
||||
Timestamp: timeAsEpochMicroseconds(a.Time),
|
||||
Fields: fields,
|
||||
})
|
||||
}
|
||||
|
||||
var refs []Reference
|
||||
for _, link := range ss.Links() {
|
||||
refs = append(refs, Reference{
|
||||
RefType: FollowsFrom,
|
||||
TraceID: TraceID(link.SpanContext.TraceID().String()),
|
||||
SpanID: SpanID(link.SpanContext.SpanID().String()),
|
||||
})
|
||||
}
|
||||
refs = append(refs, Reference{
|
||||
RefType: ChildOf,
|
||||
TraceID: TraceID(ss.Parent().TraceID().String()),
|
||||
SpanID: SpanID(ss.Parent().SpanID().String()),
|
||||
})
|
||||
|
||||
return Span{
|
||||
TraceID: TraceID(ss.SpanContext().TraceID().String()),
|
||||
SpanID: SpanID(ss.SpanContext().SpanID().String()),
|
||||
Flags: uint32(ss.SpanContext().TraceFlags()),
|
||||
OperationName: ss.Name(),
|
||||
References: refs,
|
||||
StartTime: timeAsEpochMicroseconds(ss.StartTime()),
|
||||
Duration: durationAsMicroseconds(ss.EndTime().Sub(ss.StartTime())),
|
||||
Tags: tags,
|
||||
Logs: logs,
|
||||
}
|
||||
}
|
||||
|
||||
func keyValueToJaegerTag(keyValue attribute.KeyValue) *KeyValue {
|
||||
var tag *KeyValue
|
||||
switch keyValue.Value.Type() {
|
||||
case attribute.STRING:
|
||||
s := keyValue.Value.AsString()
|
||||
tag = &KeyValue{
|
||||
Key: string(keyValue.Key),
|
||||
Type: StringType,
|
||||
Value: s,
|
||||
}
|
||||
case attribute.BOOL:
|
||||
b := keyValue.Value.AsBool()
|
||||
tag = &KeyValue{
|
||||
Key: string(keyValue.Key),
|
||||
Type: BoolType,
|
||||
Value: b,
|
||||
}
|
||||
case attribute.INT64:
|
||||
i := keyValue.Value.AsInt64()
|
||||
tag = &KeyValue{
|
||||
Key: string(keyValue.Key),
|
||||
Type: Int64Type,
|
||||
Value: i,
|
||||
}
|
||||
case attribute.FLOAT64:
|
||||
f := keyValue.Value.AsFloat64()
|
||||
tag = &KeyValue{
|
||||
Key: string(keyValue.Key),
|
||||
Type: Float64Type,
|
||||
Value: f,
|
||||
}
|
||||
case attribute.BOOLSLICE,
|
||||
attribute.INT64SLICE,
|
||||
attribute.FLOAT64SLICE,
|
||||
attribute.STRINGSLICE:
|
||||
data, _ := json.Marshal(keyValue.Value.AsInterface())
|
||||
a := (string)(data)
|
||||
tag = &KeyValue{
|
||||
Key: string(keyValue.Key),
|
||||
Type: StringType,
|
||||
Value: a,
|
||||
}
|
||||
}
|
||||
return tag
|
||||
}
|
||||
|
||||
func getInt64Tag(k string, i int64) KeyValue {
|
||||
return KeyValue{
|
||||
Key: k,
|
||||
Type: Int64Type,
|
||||
Value: i,
|
||||
}
|
||||
}
|
||||
|
||||
func getStringTag(k, s string) KeyValue {
|
||||
return KeyValue{
|
||||
Key: k,
|
||||
Type: StringType,
|
||||
Value: s,
|
||||
}
|
||||
}
|
||||
|
||||
func getBoolTag(k string, b bool) KeyValue {
|
||||
return KeyValue{
|
||||
Key: k,
|
||||
Type: BoolType,
|
||||
Value: b,
|
||||
}
|
||||
}
|
||||
|
||||
// timeAsEpochMicroseconds converts time.Time to microseconds since epoch,
|
||||
// which is the format the StartTime field is stored in the Span.
|
||||
func timeAsEpochMicroseconds(t time.Time) uint64 {
|
||||
return uint64(t.UnixNano() / 1000)
|
||||
}
|
||||
|
||||
// durationAsMicroseconds converts time.Duration to microseconds,
|
||||
// which is the format the Duration field is stored in the Span.
|
||||
func durationAsMicroseconds(d time.Duration) uint64 {
|
||||
return uint64(d.Nanoseconds() / 1000)
|
||||
}
|
102
util/otelutil/jaeger/model.go
Normal file
102
util/otelutil/jaeger/model.go
Normal file
@@ -0,0 +1,102 @@
|
||||
package jaeger
|
||||
|
||||
// ReferenceType is the reference type of one span to another
|
||||
type ReferenceType string
|
||||
|
||||
// TraceID is the shared trace ID of all spans in the trace.
|
||||
type TraceID string
|
||||
|
||||
// SpanID is the id of a span
|
||||
type SpanID string
|
||||
|
||||
// ProcessID is a hashed value of the Process struct that is unique within the trace.
|
||||
type ProcessID string
|
||||
|
||||
// ValueType is the type of a value stored in KeyValue struct.
|
||||
type ValueType string
|
||||
|
||||
const (
|
||||
// ChildOf means a span is the child of another span
|
||||
ChildOf ReferenceType = "CHILD_OF"
|
||||
// FollowsFrom means a span follows from another span
|
||||
FollowsFrom ReferenceType = "FOLLOWS_FROM"
|
||||
|
||||
// StringType indicates a string value stored in KeyValue
|
||||
StringType ValueType = "string"
|
||||
// BoolType indicates a Boolean value stored in KeyValue
|
||||
BoolType ValueType = "bool"
|
||||
// Int64Type indicates a 64bit signed integer value stored in KeyValue
|
||||
Int64Type ValueType = "int64"
|
||||
// Float64Type indicates a 64bit float value stored in KeyValue
|
||||
Float64Type ValueType = "float64"
|
||||
// BinaryType indicates an arbitrary byte array stored in KeyValue
|
||||
BinaryType ValueType = "binary"
|
||||
)
|
||||
|
||||
// Trace is a list of spans
|
||||
type Trace struct {
|
||||
TraceID TraceID `json:"traceID"`
|
||||
Spans []Span `json:"spans"`
|
||||
Processes map[ProcessID]Process `json:"processes"`
|
||||
Warnings []string `json:"warnings"`
|
||||
}
|
||||
|
||||
// Span is a span denoting a piece of work in some infrastructure
|
||||
// When converting to UI model, ParentSpanID and Process should be dereferenced into
|
||||
// References and ProcessID, respectively.
|
||||
// When converting to ES model, ProcessID and Warnings should be omitted. Even if
|
||||
// included, ES with dynamic settings off will automatically ignore unneeded fields.
|
||||
type Span struct {
|
||||
TraceID TraceID `json:"traceID"`
|
||||
SpanID SpanID `json:"spanID"`
|
||||
ParentSpanID SpanID `json:"parentSpanID,omitempty"` // deprecated
|
||||
Flags uint32 `json:"flags,omitempty"`
|
||||
OperationName string `json:"operationName"`
|
||||
References []Reference `json:"references"`
|
||||
StartTime uint64 `json:"startTime"` // microseconds since Unix epoch
|
||||
Duration uint64 `json:"duration"` // microseconds
|
||||
Tags []KeyValue `json:"tags"`
|
||||
Logs []Log `json:"logs"`
|
||||
ProcessID ProcessID `json:"processID,omitempty"`
|
||||
Process *Process `json:"process,omitempty"`
|
||||
Warnings []string `json:"warnings"`
|
||||
}
|
||||
|
||||
// Reference is a reference from one span to another
|
||||
type Reference struct {
|
||||
RefType ReferenceType `json:"refType"`
|
||||
TraceID TraceID `json:"traceID"`
|
||||
SpanID SpanID `json:"spanID"`
|
||||
}
|
||||
|
||||
// Process is the process emitting a set of spans
|
||||
type Process struct {
|
||||
ServiceName string `json:"serviceName"`
|
||||
Tags []KeyValue `json:"tags"`
|
||||
}
|
||||
|
||||
// Log is a log emitted in a span
|
||||
type Log struct {
|
||||
Timestamp uint64 `json:"timestamp"`
|
||||
Fields []KeyValue `json:"fields"`
|
||||
}
|
||||
|
||||
// KeyValue is a key-value pair with typed value.
|
||||
type KeyValue struct {
|
||||
Key string `json:"key"`
|
||||
Type ValueType `json:"type,omitempty"`
|
||||
Value interface{} `json:"value"`
|
||||
}
|
||||
|
||||
// DependencyLink shows dependencies between services
|
||||
type DependencyLink struct {
|
||||
Parent string `json:"parent"`
|
||||
Child string `json:"child"`
|
||||
CallCount uint64 `json:"callCount"`
|
||||
}
|
||||
|
||||
// Operation defines the data in the operation response when query operation by service and span kind
|
||||
type Operation struct {
|
||||
Name string `json:"name"`
|
||||
SpanKind string `json:"spanKind"`
|
||||
}
|
27
util/otelutil/jaeger_test.go
Normal file
27
util/otelutil/jaeger_test.go
Normal file
@@ -0,0 +1,27 @@
|
||||
package otelutil
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
const jaegerFixture = "./fixtures/jaeger.json"
|
||||
|
||||
func TestJaegerData(t *testing.T) {
|
||||
dt, err := os.ReadFile(bktracesFixture)
|
||||
require.NoError(t, err)
|
||||
|
||||
spanStubs, err := ParseSpanStubs(bytes.NewReader(dt))
|
||||
require.NoError(t, err)
|
||||
|
||||
trace := spanStubs.JaegerData()
|
||||
dtJaegerTrace, err := json.MarshalIndent(trace, "", " ")
|
||||
require.NoError(t, err)
|
||||
dtJaeger, err := os.ReadFile(jaegerFixture)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, string(dtJaeger), string(dtJaegerTrace))
|
||||
}
|
491
util/otelutil/span.go
Normal file
491
util/otelutil/span.go
Normal file
@@ -0,0 +1,491 @@
|
||||
package otelutil
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"reflect"
|
||||
"time"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/sdk/instrumentation"
|
||||
"go.opentelemetry.io/otel/sdk/resource"
|
||||
tracesdk "go.opentelemetry.io/otel/sdk/trace"
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
)
|
||||
|
||||
// Span is a type similar to otel's SpanStub, but with the correct types needed
|
||||
// for handle marshaling and unmarshalling.
|
||||
type Span struct {
|
||||
// Name is the name of a specific span
|
||||
Name string
|
||||
// SpanContext is the unique SpanContext that identifies the span
|
||||
SpanContext trace.SpanContext
|
||||
// Parten is the unique SpanContext that identifies the parent of the span.
|
||||
// If the span has no parent, this span context will be invalid.
|
||||
Parent trace.SpanContext
|
||||
// SpanKind is the role the span plays in a Trace
|
||||
SpanKind trace.SpanKind
|
||||
// StartTime is the time the span started recording
|
||||
StartTime time.Time
|
||||
// EndTime returns the time the span stopped recording
|
||||
EndTime time.Time
|
||||
// Attributes are the defining attributes of a span
|
||||
Attributes []attribute.KeyValue
|
||||
// Events are all the events that occurred within the span
|
||||
Events []tracesdk.Event
|
||||
// Links are all the links the span has to other spans
|
||||
Links []tracesdk.Link
|
||||
// Status is that span status
|
||||
Status tracesdk.Status
|
||||
// DroppedAttributes is the number of attributes dropped by the span due to
|
||||
// a limit being reached
|
||||
DroppedAttributes int
|
||||
// DroppedEvents is the number of attributes dropped by the span due to a
|
||||
// limit being reached
|
||||
DroppedEvents int
|
||||
// DroppedLinks is the number of links dropped by the span due to a limit
|
||||
// being reached
|
||||
DroppedLinks int
|
||||
// ChildSpanCount is the count of spans that consider the span a direct
|
||||
// parent
|
||||
ChildSpanCount int
|
||||
// Resource is the information about the entity that produced the span
|
||||
// We have to change this type from the otel type to make this struct
|
||||
// marshallable
|
||||
Resource []attribute.KeyValue
|
||||
// InstrumentationLibrary is information about the library that produced
|
||||
// the span
|
||||
//nolint:staticcheck
|
||||
InstrumentationLibrary instrumentation.Library
|
||||
}
|
||||
|
||||
type Spans []Span
|
||||
|
||||
// Len return the length of the Spans.
|
||||
func (s Spans) Len() int {
|
||||
return len(s)
|
||||
}
|
||||
|
||||
// ReadOnlySpans return a list of tracesdk.ReadOnlySpan from span stubs.
|
||||
func (s Spans) ReadOnlySpans() []tracesdk.ReadOnlySpan {
|
||||
roSpans := make([]tracesdk.ReadOnlySpan, len(s))
|
||||
for i := range s {
|
||||
roSpans[i] = s[i].Snapshot()
|
||||
}
|
||||
return roSpans
|
||||
}
|
||||
|
||||
// ParseSpanStubs parses BuildKit trace data into a list of SpanStubs.
|
||||
func ParseSpanStubs(rdr io.Reader) (Spans, error) {
|
||||
var spanStubs []Span
|
||||
decoder := json.NewDecoder(rdr)
|
||||
for {
|
||||
var span Span
|
||||
if err := decoder.Decode(&span); err == io.EOF {
|
||||
break
|
||||
} else if err != nil {
|
||||
return nil, errors.Wrapf(err, "error decoding JSON")
|
||||
}
|
||||
spanStubs = append(spanStubs, span)
|
||||
}
|
||||
return spanStubs, nil
|
||||
}
|
||||
|
||||
// spanData is data that we need to unmarshal in custom ways.
|
||||
type spanData struct {
|
||||
Name string
|
||||
SpanContext spanContext
|
||||
Parent spanContext
|
||||
SpanKind trace.SpanKind
|
||||
StartTime time.Time
|
||||
EndTime time.Time
|
||||
Attributes []keyValue
|
||||
Events []event
|
||||
Links []link
|
||||
Status tracesdk.Status
|
||||
DroppedAttributes int
|
||||
DroppedEvents int
|
||||
DroppedLinks int
|
||||
ChildSpanCount int
|
||||
Resource []keyValue // change this type from the otel type to make this struct marshallable
|
||||
//nolint:staticcheck
|
||||
InstrumentationLibrary instrumentation.Library
|
||||
}
|
||||
|
||||
// spanContext is a custom type used to unmarshal otel SpanContext correctly.
|
||||
type spanContext struct {
|
||||
TraceID string
|
||||
SpanID string
|
||||
TraceFlags string
|
||||
TraceState string // TODO: implement, currently dropped
|
||||
Remote bool
|
||||
}
|
||||
|
||||
// event is a custom type used to unmarshal otel Event correctly.
|
||||
type event struct {
|
||||
Name string
|
||||
Attributes []keyValue
|
||||
DroppedAttributeCount int
|
||||
Time time.Time
|
||||
}
|
||||
|
||||
// link is a custom type used to unmarshal otel Link correctly.
|
||||
type link struct {
|
||||
SpanContext spanContext
|
||||
Attributes []keyValue
|
||||
DroppedAttributeCount int
|
||||
}
|
||||
|
||||
// keyValue is a custom type used to unmarshal otel KeyValue correctly.
|
||||
type keyValue struct {
|
||||
Key string
|
||||
Value value
|
||||
}
|
||||
|
||||
// value is a custom type used to unmarshal otel Value correctly.
|
||||
type value struct {
|
||||
Type string
|
||||
Value interface{}
|
||||
}
|
||||
|
||||
// UnmarshalJSON implements json.Unmarshaler for Span which allows correctly
|
||||
// retrieving attribute.KeyValue values.
|
||||
func (s *Span) UnmarshalJSON(data []byte) error {
|
||||
var sd spanData
|
||||
if err := json.NewDecoder(bytes.NewReader(data)).Decode(&sd); err != nil {
|
||||
return errors.Wrap(err, "unable to decode to spanData")
|
||||
}
|
||||
|
||||
s.Name = sd.Name
|
||||
s.SpanKind = sd.SpanKind
|
||||
s.StartTime = sd.StartTime
|
||||
s.EndTime = sd.EndTime
|
||||
s.Status = sd.Status
|
||||
s.DroppedAttributes = sd.DroppedAttributes
|
||||
s.DroppedEvents = sd.DroppedEvents
|
||||
s.DroppedLinks = sd.DroppedLinks
|
||||
s.ChildSpanCount = sd.ChildSpanCount
|
||||
s.InstrumentationLibrary = sd.InstrumentationLibrary
|
||||
|
||||
spanCtx, err := sd.SpanContext.asTraceSpanContext()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "unable to decode spanCtx")
|
||||
}
|
||||
s.SpanContext = spanCtx
|
||||
|
||||
parent, err := sd.Parent.asTraceSpanContext()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "unable to decode parent")
|
||||
}
|
||||
s.Parent = parent
|
||||
|
||||
var attributes []attribute.KeyValue
|
||||
for _, a := range sd.Attributes {
|
||||
kv, err := a.asAttributeKeyValue()
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "unable to decode attribute (%s)", a.Key)
|
||||
}
|
||||
attributes = append(attributes, kv)
|
||||
}
|
||||
s.Attributes = attributes
|
||||
|
||||
var events []tracesdk.Event
|
||||
for _, e := range sd.Events {
|
||||
var eventAttributes []attribute.KeyValue
|
||||
for _, a := range e.Attributes {
|
||||
kv, err := a.asAttributeKeyValue()
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "unable to decode event attribute (%s)", a.Key)
|
||||
}
|
||||
eventAttributes = append(eventAttributes, kv)
|
||||
}
|
||||
events = append(events, tracesdk.Event{
|
||||
Name: e.Name,
|
||||
Attributes: eventAttributes,
|
||||
DroppedAttributeCount: e.DroppedAttributeCount,
|
||||
Time: e.Time,
|
||||
})
|
||||
}
|
||||
s.Events = events
|
||||
|
||||
var links []tracesdk.Link
|
||||
for _, l := range sd.Links {
|
||||
linkSpanCtx, err := l.SpanContext.asTraceSpanContext()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "unable to decode linkSpanCtx")
|
||||
}
|
||||
var linkAttributes []attribute.KeyValue
|
||||
for _, a := range l.Attributes {
|
||||
kv, err := a.asAttributeKeyValue()
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "unable to decode link attribute (%s)", a.Key)
|
||||
}
|
||||
linkAttributes = append(linkAttributes, kv)
|
||||
}
|
||||
links = append(links, tracesdk.Link{
|
||||
SpanContext: linkSpanCtx,
|
||||
Attributes: linkAttributes,
|
||||
DroppedAttributeCount: l.DroppedAttributeCount,
|
||||
})
|
||||
}
|
||||
s.Links = links
|
||||
|
||||
var resources []attribute.KeyValue
|
||||
for _, r := range sd.Resource {
|
||||
kv, err := r.asAttributeKeyValue()
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "unable to decode resource (%s)", r.Key)
|
||||
}
|
||||
resources = append(resources, kv)
|
||||
}
|
||||
s.Resource = resources
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// asTraceSpanContext converts the internal spanContext representation to an
|
||||
// otel one.
|
||||
func (sc *spanContext) asTraceSpanContext() (trace.SpanContext, error) {
|
||||
traceID, err := traceIDFromHex(sc.TraceID)
|
||||
if err != nil {
|
||||
return trace.SpanContext{}, errors.Wrap(err, "unable to parse trace id")
|
||||
}
|
||||
spanID, err := spanIDFromHex(sc.SpanID)
|
||||
if err != nil {
|
||||
return trace.SpanContext{}, errors.Wrap(err, "unable to parse span id")
|
||||
}
|
||||
traceFlags := trace.TraceFlags(0x00)
|
||||
if sc.TraceFlags == "01" {
|
||||
traceFlags = trace.TraceFlags(0x01)
|
||||
}
|
||||
config := trace.SpanContextConfig{
|
||||
TraceID: traceID,
|
||||
SpanID: spanID,
|
||||
TraceFlags: traceFlags,
|
||||
Remote: sc.Remote,
|
||||
}
|
||||
return trace.NewSpanContext(config), nil
|
||||
}
|
||||
|
||||
// asAttributeKeyValue converts the internal keyValue representation to an
|
||||
// otel one.
|
||||
func (kv *keyValue) asAttributeKeyValue() (attribute.KeyValue, error) {
|
||||
// value types get encoded as string
|
||||
switch kv.Value.Type {
|
||||
case attribute.INVALID.String():
|
||||
return attribute.KeyValue{}, errors.New("invalid value type")
|
||||
case attribute.BOOL.String():
|
||||
return attribute.Bool(kv.Key, kv.Value.Value.(bool)), nil
|
||||
case attribute.INT64.String():
|
||||
// value could be int64 or float64, so handle both cases (float64 comes
|
||||
// from json unmarshal)
|
||||
var v int64
|
||||
switch i := kv.Value.Value.(type) {
|
||||
case int64:
|
||||
v = i
|
||||
case float64:
|
||||
v = int64(i)
|
||||
}
|
||||
return attribute.Int64(kv.Key, v), nil
|
||||
case attribute.FLOAT64.String():
|
||||
return attribute.Float64(kv.Key, kv.Value.Value.(float64)), nil
|
||||
case attribute.STRING.String():
|
||||
return attribute.String(kv.Key, kv.Value.Value.(string)), nil
|
||||
case attribute.BOOLSLICE.String():
|
||||
return attribute.BoolSlice(kv.Key, kv.Value.Value.([]bool)), nil
|
||||
case attribute.INT64SLICE.String():
|
||||
// handle both float64 and int64 (float64 comes from json unmarshal)
|
||||
var v []int64
|
||||
switch sli := kv.Value.Value.(type) {
|
||||
case []int64:
|
||||
v = sli
|
||||
case []float64:
|
||||
for i := range sli {
|
||||
v = append(v, int64(sli[i]))
|
||||
}
|
||||
}
|
||||
return attribute.Int64Slice(kv.Key, v), nil
|
||||
case attribute.FLOAT64SLICE.String():
|
||||
return attribute.Float64Slice(kv.Key, kv.Value.Value.([]float64)), nil
|
||||
case attribute.STRINGSLICE.String():
|
||||
var strSli []string
|
||||
// sometimes we can get an []interface{} instead of a []string, so
|
||||
// always cast to []string if that happens.
|
||||
switch sli := kv.Value.Value.(type) {
|
||||
case []string:
|
||||
strSli = sli
|
||||
case []interface{}:
|
||||
for i := range sli {
|
||||
var v string
|
||||
// best case we have a string, otherwise, cast it using
|
||||
// fmt.Sprintf
|
||||
if str, ok := sli[i].(string); ok {
|
||||
v = str
|
||||
} else {
|
||||
v = fmt.Sprintf("%v", sli[i])
|
||||
}
|
||||
// add the string to the slice
|
||||
strSli = append(strSli, v)
|
||||
}
|
||||
default:
|
||||
return attribute.KeyValue{}, errors.Errorf("got unsupported type %q for %s", reflect.ValueOf(kv.Value.Value).Kind(), attribute.STRINGSLICE.String())
|
||||
}
|
||||
return attribute.StringSlice(kv.Key, strSli), nil
|
||||
default:
|
||||
return attribute.KeyValue{}, errors.Errorf("unknown value type %s", kv.Value.Type)
|
||||
}
|
||||
}
|
||||
|
||||
// traceIDFromHex returns a TraceID from a hex string if it is compliant with
|
||||
// the W3C trace-context specification and removes the validity check.
|
||||
// https://www.w3.org/TR/trace-context/#trace-id
|
||||
func traceIDFromHex(h string) (trace.TraceID, error) {
|
||||
t := trace.TraceID{}
|
||||
if len(h) != 32 {
|
||||
return t, errors.New("unable to parse trace id")
|
||||
}
|
||||
if err := decodeHex(h, t[:]); err != nil {
|
||||
return t, err
|
||||
}
|
||||
return t, nil
|
||||
}
|
||||
|
||||
// spanIDFromHex returns a SpanID from a hex string if it is compliant with the
|
||||
// W3C trace-context specification and removes the validity check.
|
||||
// https://www.w3.org/TR/trace-context/#parent-id
|
||||
func spanIDFromHex(h string) (trace.SpanID, error) {
|
||||
s := trace.SpanID{}
|
||||
if len(h) != 16 {
|
||||
return s, errors.New("unable to parse span id of length: %d")
|
||||
}
|
||||
if err := decodeHex(h, s[:]); err != nil {
|
||||
return s, err
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// decodeHex decodes hex in a manner compliant with otel.
|
||||
func decodeHex(h string, b []byte) error {
|
||||
for _, r := range h {
|
||||
switch {
|
||||
case 'a' <= r && r <= 'f':
|
||||
continue
|
||||
case '0' <= r && r <= '9':
|
||||
continue
|
||||
default:
|
||||
return errors.New("unable to parse hex id")
|
||||
}
|
||||
}
|
||||
decoded, err := hex.DecodeString(h)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
copy(b, decoded)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Snapshot turns a Span into a ReadOnlySpan which is exportable by otel.
|
||||
func (s *Span) Snapshot() tracesdk.ReadOnlySpan {
|
||||
return spanSnapshot{
|
||||
name: s.Name,
|
||||
spanContext: s.SpanContext,
|
||||
parent: s.Parent,
|
||||
spanKind: s.SpanKind,
|
||||
startTime: s.StartTime,
|
||||
endTime: s.EndTime,
|
||||
attributes: s.Attributes,
|
||||
events: s.Events,
|
||||
links: s.Links,
|
||||
status: s.Status,
|
||||
droppedAttributes: s.DroppedAttributes,
|
||||
droppedEvents: s.DroppedEvents,
|
||||
droppedLinks: s.DroppedLinks,
|
||||
childSpanCount: s.ChildSpanCount,
|
||||
resource: resource.NewSchemaless(s.Resource...),
|
||||
instrumentationScope: s.InstrumentationLibrary,
|
||||
}
|
||||
}
|
||||
|
||||
// spanSnapshot is a helper type for transforming a Span into a ReadOnlySpan.
|
||||
type spanSnapshot struct {
|
||||
// Embed the interface to implement the private method.
|
||||
tracesdk.ReadOnlySpan
|
||||
|
||||
name string
|
||||
spanContext trace.SpanContext
|
||||
parent trace.SpanContext
|
||||
spanKind trace.SpanKind
|
||||
startTime time.Time
|
||||
endTime time.Time
|
||||
attributes []attribute.KeyValue
|
||||
events []tracesdk.Event
|
||||
links []tracesdk.Link
|
||||
status tracesdk.Status
|
||||
droppedAttributes int
|
||||
droppedEvents int
|
||||
droppedLinks int
|
||||
childSpanCount int
|
||||
resource *resource.Resource
|
||||
instrumentationScope instrumentation.Scope
|
||||
}
|
||||
|
||||
// Name returns the Name of the snapshot
|
||||
func (s spanSnapshot) Name() string { return s.name }
|
||||
|
||||
// SpanContext returns the SpanContext of the snapshot
|
||||
func (s spanSnapshot) SpanContext() trace.SpanContext { return s.spanContext }
|
||||
|
||||
// Parent returns the Parent of the snapshot
|
||||
func (s spanSnapshot) Parent() trace.SpanContext { return s.parent }
|
||||
|
||||
// SpanKind returns the SpanKind of the snapshot
|
||||
func (s spanSnapshot) SpanKind() trace.SpanKind { return s.spanKind }
|
||||
|
||||
// StartTime returns the StartTime of the snapshot
|
||||
func (s spanSnapshot) StartTime() time.Time { return s.startTime }
|
||||
|
||||
// EndTime returns the EndTime of the snapshot
|
||||
func (s spanSnapshot) EndTime() time.Time { return s.endTime }
|
||||
|
||||
// Attributes returns the Attributes of the snapshot
|
||||
func (s spanSnapshot) Attributes() []attribute.KeyValue { return s.attributes }
|
||||
|
||||
// Links returns the Links of the snapshot
|
||||
func (s spanSnapshot) Links() []tracesdk.Link { return s.links }
|
||||
|
||||
// Events return the Events of the snapshot
|
||||
func (s spanSnapshot) Events() []tracesdk.Event { return s.events }
|
||||
|
||||
// Status returns the Status of the snapshot
|
||||
func (s spanSnapshot) Status() tracesdk.Status { return s.status }
|
||||
|
||||
// DroppedAttributes returns the DroppedAttributes of the snapshot
|
||||
func (s spanSnapshot) DroppedAttributes() int { return s.droppedAttributes }
|
||||
|
||||
// DroppedLinks returns the DroppedLinks of the snapshot
|
||||
func (s spanSnapshot) DroppedLinks() int { return s.droppedLinks }
|
||||
|
||||
// DroppedEvents returns the DroppedEvents of the snapshot
|
||||
func (s spanSnapshot) DroppedEvents() int { return s.droppedEvents }
|
||||
|
||||
// ChildSpanCount returns the ChildSpanCount of the snapshot
|
||||
func (s spanSnapshot) ChildSpanCount() int { return s.childSpanCount }
|
||||
|
||||
// Resource returns the Resource of the snapshot
|
||||
func (s spanSnapshot) Resource() *resource.Resource { return s.resource }
|
||||
|
||||
// InstrumentationScope returns the InstrumentationScope of the snapshot
|
||||
func (s spanSnapshot) InstrumentationScope() instrumentation.Scope {
|
||||
return s.instrumentationScope
|
||||
}
|
||||
|
||||
// InstrumentationLibrary returns the InstrumentationLibrary of the snapshot
|
||||
//
|
||||
//nolint:staticcheck
|
||||
func (s spanSnapshot) InstrumentationLibrary() instrumentation.Library {
|
||||
return s.instrumentationScope
|
||||
}
|
159
util/otelutil/span_test.go
Normal file
159
util/otelutil/span_test.go
Normal file
@@ -0,0 +1,159 @@
|
||||
package otelutil
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/exporters/stdout/stdouttrace"
|
||||
)
|
||||
|
||||
// curl -s --unix-socket /tmp/docker-desktop-build-dev.sock http://localhost/blobs/default/default?digest=sha256:3103104e9fa908087bd47572da6ad9a5a7bf973608f736536d18d635a7da0140 -X GET > ./fixtures/bktraces.json
|
||||
const bktracesFixture = "./fixtures/bktraces.json"
|
||||
|
||||
const otlpFixture = "./fixtures/otlp.json"
|
||||
|
||||
func TestParseSpanStubs(t *testing.T) {
|
||||
dt, err := os.ReadFile(bktracesFixture)
|
||||
require.NoError(t, err)
|
||||
|
||||
spanStubs, err := ParseSpanStubs(bytes.NewReader(dt))
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 73, len(spanStubs))
|
||||
|
||||
dtSpanStubs, err := json.MarshalIndent(spanStubs, "", " ")
|
||||
require.NoError(t, err)
|
||||
dtotel, err := os.ReadFile(otlpFixture)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, string(dtotel), string(dtSpanStubs))
|
||||
|
||||
exp, err := stdouttrace.New(stdouttrace.WithPrettyPrint())
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, exp.ExportSpans(context.Background(), spanStubs.ReadOnlySpans()))
|
||||
}
|
||||
|
||||
func TestAsAttributeKeyValue(t *testing.T) {
|
||||
type args struct {
|
||||
Type string
|
||||
value any
|
||||
}
|
||||
tests := []struct {
|
||||
name string
|
||||
args args
|
||||
want attribute.KeyValue
|
||||
}{
|
||||
{
|
||||
name: "string",
|
||||
args: args{
|
||||
Type: attribute.STRING.String(),
|
||||
value: "value",
|
||||
},
|
||||
want: attribute.String("key", "value"),
|
||||
},
|
||||
{
|
||||
name: "int64 (int64)",
|
||||
args: args{
|
||||
Type: attribute.INT64.String(),
|
||||
value: int64(1),
|
||||
},
|
||||
want: attribute.Int64("key", 1),
|
||||
},
|
||||
{
|
||||
name: "int64 (float64)",
|
||||
args: args{
|
||||
Type: attribute.INT64.String(),
|
||||
value: float64(1.0),
|
||||
},
|
||||
want: attribute.Int64("key", 1),
|
||||
},
|
||||
{
|
||||
name: "bool",
|
||||
args: args{
|
||||
Type: attribute.BOOL.String(),
|
||||
value: true,
|
||||
},
|
||||
want: attribute.Bool("key", true),
|
||||
},
|
||||
{
|
||||
name: "float64",
|
||||
args: args{
|
||||
Type: attribute.FLOAT64.String(),
|
||||
value: float64(1.0),
|
||||
},
|
||||
want: attribute.Float64("key", 1.0),
|
||||
},
|
||||
{
|
||||
name: "float64slice",
|
||||
args: args{
|
||||
Type: attribute.FLOAT64SLICE.String(),
|
||||
value: []float64{1.0, 2.0},
|
||||
},
|
||||
want: attribute.Float64Slice("key", []float64{1.0, 2.0}),
|
||||
},
|
||||
{
|
||||
name: "int64slice (int64)",
|
||||
args: args{
|
||||
Type: attribute.INT64SLICE.String(),
|
||||
value: []int64{1, 2},
|
||||
},
|
||||
want: attribute.Int64Slice("key", []int64{1, 2}),
|
||||
},
|
||||
{
|
||||
name: "int64slice (float64)",
|
||||
args: args{
|
||||
Type: attribute.INT64SLICE.String(),
|
||||
value: []float64{1.0, 2.0},
|
||||
},
|
||||
want: attribute.Int64Slice("key", []int64{1, 2}),
|
||||
},
|
||||
{
|
||||
name: "boolslice",
|
||||
args: args{
|
||||
Type: attribute.BOOLSLICE.String(),
|
||||
value: []bool{true, false},
|
||||
},
|
||||
want: attribute.BoolSlice("key", []bool{true, false}),
|
||||
},
|
||||
{
|
||||
name: "stringslice (strings)",
|
||||
args: args{
|
||||
Type: attribute.STRINGSLICE.String(),
|
||||
value: []string{"value1", "value2"},
|
||||
},
|
||||
want: attribute.StringSlice("key", []string{"value1", "value2"}),
|
||||
},
|
||||
{
|
||||
name: "stringslice (interface of string)",
|
||||
args: args{
|
||||
Type: attribute.STRINGSLICE.String(),
|
||||
value: []interface{}{"value1", "value2"},
|
||||
},
|
||||
want: attribute.StringSlice("key", []string{"value1", "value2"}),
|
||||
},
|
||||
{
|
||||
name: "stringslice (interface mixed)",
|
||||
args: args{
|
||||
Type: attribute.STRINGSLICE.String(),
|
||||
value: []interface{}{"value1", 2},
|
||||
},
|
||||
want: attribute.StringSlice("key", []string{"value1", "2"}),
|
||||
},
|
||||
}
|
||||
for _, tt := range tests {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
kv := keyValue{
|
||||
Key: "key",
|
||||
Value: value{Type: tt.args.Type, Value: tt.args.value},
|
||||
}
|
||||
attr, err := kv.asAttributeKeyValue()
|
||||
require.NoError(t, err, "failed to convert key value to attribute key value")
|
||||
assert.Equal(t, tt.want, attr, "attribute key value mismatch")
|
||||
})
|
||||
}
|
||||
}
|
@@ -122,6 +122,7 @@ func NewPrinter(ctx context.Context, out console.File, mode progressui.DisplayMo
|
||||
for {
|
||||
pw.status = make(chan *client.SolveStatus)
|
||||
pw.done = make(chan struct{})
|
||||
pw.closeOnce = sync.Once{}
|
||||
|
||||
pw.logMu.Lock()
|
||||
pw.logSourceMap = map[digest.Digest]interface{}{}
|
||||
|
41
vendor/github.com/Azure/go-ansiterm/SECURITY.md
generated
vendored
Normal file
41
vendor/github.com/Azure/go-ansiterm/SECURITY.md
generated
vendored
Normal file
@@ -0,0 +1,41 @@
|
||||
<!-- BEGIN MICROSOFT SECURITY.MD V0.0.8 BLOCK -->
|
||||
|
||||
## Security
|
||||
|
||||
Microsoft takes the security of our software products and services seriously, which includes all source code repositories managed through our GitHub organizations, which include [Microsoft](https://github.com/microsoft), [Azure](https://github.com/Azure), [DotNet](https://github.com/dotnet), [AspNet](https://github.com/aspnet), [Xamarin](https://github.com/xamarin), and [our GitHub organizations](https://opensource.microsoft.com/).
|
||||
|
||||
If you believe you have found a security vulnerability in any Microsoft-owned repository that meets [Microsoft's definition of a security vulnerability](https://aka.ms/opensource/security/definition), please report it to us as described below.
|
||||
|
||||
## Reporting Security Issues
|
||||
|
||||
**Please do not report security vulnerabilities through public GitHub issues.**
|
||||
|
||||
Instead, please report them to the Microsoft Security Response Center (MSRC) at [https://msrc.microsoft.com/create-report](https://aka.ms/opensource/security/create-report).
|
||||
|
||||
If you prefer to submit without logging in, send email to [secure@microsoft.com](mailto:secure@microsoft.com). If possible, encrypt your message with our PGP key; please download it from the [Microsoft Security Response Center PGP Key page](https://aka.ms/opensource/security/pgpkey).
|
||||
|
||||
You should receive a response within 24 hours. If for some reason you do not, please follow up via email to ensure we received your original message. Additional information can be found at [microsoft.com/msrc](https://aka.ms/opensource/security/msrc).
|
||||
|
||||
Please include the requested information listed below (as much as you can provide) to help us better understand the nature and scope of the possible issue:
|
||||
|
||||
* Type of issue (e.g. buffer overflow, SQL injection, cross-site scripting, etc.)
|
||||
* Full paths of source file(s) related to the manifestation of the issue
|
||||
* The location of the affected source code (tag/branch/commit or direct URL)
|
||||
* Any special configuration required to reproduce the issue
|
||||
* Step-by-step instructions to reproduce the issue
|
||||
* Proof-of-concept or exploit code (if possible)
|
||||
* Impact of the issue, including how an attacker might exploit the issue
|
||||
|
||||
This information will help us triage your report more quickly.
|
||||
|
||||
If you are reporting for a bug bounty, more complete reports can contribute to a higher bounty award. Please visit our [Microsoft Bug Bounty Program](https://aka.ms/opensource/security/bounty) page for more details about our active programs.
|
||||
|
||||
## Preferred Languages
|
||||
|
||||
We prefer all communications to be in English.
|
||||
|
||||
## Policy
|
||||
|
||||
Microsoft follows the principle of [Coordinated Vulnerability Disclosure](https://aka.ms/opensource/security/cvd).
|
||||
|
||||
<!-- END MICROSOFT SECURITY.MD BLOCK -->
|
18
vendor/github.com/Azure/go-ansiterm/osc_string_state.go
generated
vendored
18
vendor/github.com/Azure/go-ansiterm/osc_string_state.go
generated
vendored
@@ -11,21 +11,13 @@ func (oscState oscStringState) Handle(b byte) (s state, e error) {
|
||||
return nextState, err
|
||||
}
|
||||
|
||||
switch {
|
||||
case isOscStringTerminator(b):
|
||||
// There are several control characters and sequences which can
|
||||
// terminate an OSC string. Most of them are handled by the baseState
|
||||
// handler. The ANSI_BEL character is a special case which behaves as a
|
||||
// terminator only for an OSC string.
|
||||
if b == ANSI_BEL {
|
||||
return oscState.parser.ground, nil
|
||||
}
|
||||
|
||||
return oscState, nil
|
||||
}
|
||||
|
||||
// See below for OSC string terminators for linux
|
||||
// http://man7.org/linux/man-pages/man4/console_codes.4.html
|
||||
func isOscStringTerminator(b byte) bool {
|
||||
|
||||
if b == ANSI_BEL || b == 0x5C {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
4
vendor/github.com/docker/cli/cli-plugins/hooks/printer.go
generated
vendored
4
vendor/github.com/docker/cli/cli-plugins/hooks/printer.go
generated
vendored
@@ -11,8 +11,8 @@ func PrintNextSteps(out io.Writer, messages []string) {
|
||||
if len(messages) == 0 {
|
||||
return
|
||||
}
|
||||
fmt.Fprintln(out, aec.Bold.Apply("\nWhat's next:"))
|
||||
_, _ = fmt.Fprintln(out, aec.Bold.Apply("\nWhat's next:"))
|
||||
for _, n := range messages {
|
||||
_, _ = fmt.Fprintf(out, " %s\n", n)
|
||||
_, _ = fmt.Fprintln(out, " ", n)
|
||||
}
|
||||
}
|
||||
|
3
vendor/github.com/docker/cli/cli-plugins/manager/cobra.go
generated
vendored
3
vendor/github.com/docker/cli/cli-plugins/manager/cobra.go
generated
vendored
@@ -52,7 +52,6 @@ func AddPluginCommandStubs(dockerCli command.Cli, rootCmd *cobra.Command) (err e
|
||||
return
|
||||
}
|
||||
for _, p := range plugins {
|
||||
p := p
|
||||
vendor := p.Vendor
|
||||
if vendor == "" {
|
||||
vendor = "unknown"
|
||||
@@ -82,7 +81,7 @@ func AddPluginCommandStubs(dockerCli command.Cli, rootCmd *cobra.Command) (err e
|
||||
cmd.HelpFunc()(rootCmd, args)
|
||||
return nil
|
||||
}
|
||||
return fmt.Errorf("docker: '%s' is not a docker command.\nSee 'docker --help'", cmd.Name())
|
||||
return fmt.Errorf("docker: unknown command: docker %s\n\nRun 'docker --help' for more information", cmd.Name())
|
||||
},
|
||||
ValidArgsFunction: func(cmd *cobra.Command, args []string, toComplete string) ([]string, cobra.ShellCompDirective) {
|
||||
// Delegate completion to plugin
|
||||
|
24
vendor/github.com/docker/cli/cli-plugins/plugin/plugin.go
generated
vendored
24
vendor/github.com/docker/cli/cli-plugins/plugin/plugin.go
generated
vendored
@@ -3,6 +3,7 @@ package plugin
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"os"
|
||||
"sync"
|
||||
@@ -34,7 +35,7 @@ func RunPlugin(dockerCli *command.DockerCli, plugin *cobra.Command, meta manager
|
||||
|
||||
var persistentPreRunOnce sync.Once
|
||||
PersistentPreRunE = func(cmd *cobra.Command, _ []string) error {
|
||||
var err error
|
||||
var retErr error
|
||||
persistentPreRunOnce.Do(func() {
|
||||
ctx, cancel := context.WithCancel(cmd.Context())
|
||||
cmd.SetContext(ctx)
|
||||
@@ -46,7 +47,7 @@ func RunPlugin(dockerCli *command.DockerCli, plugin *cobra.Command, meta manager
|
||||
opts = append(opts, withPluginClientConn(plugin.Name()))
|
||||
}
|
||||
opts = append(opts, command.WithEnableGlobalMeterProvider(), command.WithEnableGlobalTracerProvider())
|
||||
err = tcmd.Initialize(opts...)
|
||||
retErr = tcmd.Initialize(opts...)
|
||||
ogRunE := cmd.RunE
|
||||
if ogRunE == nil {
|
||||
ogRun := cmd.Run
|
||||
@@ -66,7 +67,7 @@ func RunPlugin(dockerCli *command.DockerCli, plugin *cobra.Command, meta manager
|
||||
return err
|
||||
}
|
||||
})
|
||||
return err
|
||||
return retErr
|
||||
}
|
||||
|
||||
cmd, args, err := tcmd.HandleGlobalFlags()
|
||||
@@ -92,18 +93,17 @@ func Run(makeCmd func(command.Cli) *cobra.Command, meta manager.Metadata) {
|
||||
plugin := makeCmd(dockerCli)
|
||||
|
||||
if err := RunPlugin(dockerCli, plugin, meta); err != nil {
|
||||
if sterr, ok := err.(cli.StatusError); ok {
|
||||
if sterr.Status != "" {
|
||||
fmt.Fprintln(dockerCli.Err(), sterr.Status)
|
||||
}
|
||||
var stErr cli.StatusError
|
||||
if errors.As(err, &stErr) {
|
||||
// StatusError should only be used for errors, and all errors should
|
||||
// have a non-zero exit status, so never exit with 0
|
||||
if sterr.StatusCode == 0 {
|
||||
os.Exit(1)
|
||||
if stErr.StatusCode == 0 { // FIXME(thaJeztah): this should never be used with a zero status-code. Check if we do this anywhere.
|
||||
stErr.StatusCode = 1
|
||||
}
|
||||
os.Exit(sterr.StatusCode)
|
||||
_, _ = fmt.Fprintln(dockerCli.Err(), stErr)
|
||||
os.Exit(stErr.StatusCode)
|
||||
}
|
||||
fmt.Fprintln(dockerCli.Err(), err)
|
||||
_, _ = fmt.Fprintln(dockerCli.Err(), err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
@@ -158,7 +158,7 @@ func newPluginCommand(dockerCli *command.DockerCli, plugin *cobra.Command, meta
|
||||
CompletionOptions: cobra.CompletionOptions{
|
||||
DisableDefaultCmd: false,
|
||||
HiddenDefaultCmd: true,
|
||||
DisableDescriptions: true,
|
||||
DisableDescriptions: os.Getenv("DOCKER_CLI_DISABLE_COMPLETION_DESCRIPTION") != "",
|
||||
},
|
||||
}
|
||||
opts, _ := cli.SetupPluginRootCommand(cmd)
|
||||
|
8
vendor/github.com/docker/cli/cli/cobra.go
generated
vendored
8
vendor/github.com/docker/cli/cli/cobra.go
generated
vendored
@@ -92,12 +92,8 @@ func FlagErrorFunc(cmd *cobra.Command, err error) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
usage := ""
|
||||
if cmd.HasSubCommands() {
|
||||
usage = "\n\n" + cmd.UsageString()
|
||||
}
|
||||
return StatusError{
|
||||
Status: fmt.Sprintf("%s\nSee '%s --help'.%s", err, cmd.CommandPath(), usage),
|
||||
Status: fmt.Sprintf("%s\n\nUsage: %s\n\nRun '%s --help' for more information", err, cmd.UseLine(), cmd.CommandPath()),
|
||||
StatusCode: 125,
|
||||
}
|
||||
}
|
||||
@@ -522,4 +518,4 @@ Run '{{.CommandPath}} COMMAND --help' for more information on a command.
|
||||
`
|
||||
|
||||
const helpTemplate = `
|
||||
{{if or .Runnable .HasSubCommands}}{{.UsageString}}{{end}}`
|
||||
{{- if or .Runnable .HasSubCommands}}{{.UsageString}}{{end}}`
|
||||
|
8
vendor/github.com/docker/cli/cli/command/cli.go
generated
vendored
8
vendor/github.com/docker/cli/cli/command/cli.go
generated
vendored
@@ -114,7 +114,7 @@ func (cli *DockerCli) CurrentVersion() string {
|
||||
// Client returns the APIClient
|
||||
func (cli *DockerCli) Client() client.APIClient {
|
||||
if err := cli.initialize(); err != nil {
|
||||
_, _ = fmt.Fprintf(cli.Err(), "Failed to initialize: %s\n", err)
|
||||
_, _ = fmt.Fprintln(cli.Err(), "Failed to initialize:", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
return cli.client
|
||||
@@ -272,7 +272,7 @@ func (cli *DockerCli) Initialize(opts *cliflags.ClientOptions, ops ...CLIOption)
|
||||
debug.Enable()
|
||||
}
|
||||
if opts.Context != "" && len(opts.Hosts) > 0 {
|
||||
return errors.New("conflicting options: either specify --host or --context, not both")
|
||||
return errors.New("conflicting options: cannot specify both --host and --context")
|
||||
}
|
||||
|
||||
cli.options = opts
|
||||
@@ -299,7 +299,7 @@ func (cli *DockerCli) Initialize(opts *cliflags.ClientOptions, ops ...CLIOption)
|
||||
// NewAPIClientFromFlags creates a new APIClient from command line flags
|
||||
func NewAPIClientFromFlags(opts *cliflags.ClientOptions, configFile *configfile.ConfigFile) (client.APIClient, error) {
|
||||
if opts.Context != "" && len(opts.Hosts) > 0 {
|
||||
return nil, errors.New("conflicting options: either specify --host or --context, not both")
|
||||
return nil, errors.New("conflicting options: cannot specify both --host and --context")
|
||||
}
|
||||
|
||||
storeConfig := DefaultContextStoreConfig()
|
||||
@@ -475,7 +475,7 @@ func (cli *DockerCli) DockerEndpoint() docker.Endpoint {
|
||||
if err := cli.initialize(); err != nil {
|
||||
// Note that we're not terminating here, as this function may be used
|
||||
// in cases where we're able to continue.
|
||||
_, _ = fmt.Fprintf(cli.Err(), "%v\n", cli.initErr)
|
||||
_, _ = fmt.Fprintln(cli.Err(), cli.initErr)
|
||||
}
|
||||
return cli.dockerEndpoint
|
||||
}
|
||||
|
20
vendor/github.com/docker/cli/cli/command/formatter/container.go
generated
vendored
20
vendor/github.com/docker/cli/cli/command/formatter/container.go
generated
vendored
@@ -12,7 +12,7 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/distribution/reference"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/pkg/stringid"
|
||||
"github.com/docker/go-units"
|
||||
)
|
||||
@@ -67,10 +67,10 @@ ports: {{- pad .Ports 1 0}}
|
||||
}
|
||||
|
||||
// ContainerWrite renders the context for a list of containers
|
||||
func ContainerWrite(ctx Context, containers []types.Container) error {
|
||||
func ContainerWrite(ctx Context, containers []container.Summary) error {
|
||||
render := func(format func(subContext SubContext) error) error {
|
||||
for _, container := range containers {
|
||||
err := format(&ContainerContext{trunc: ctx.Trunc, c: container})
|
||||
for _, ctr := range containers {
|
||||
err := format(&ContainerContext{trunc: ctx.Trunc, c: ctr})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -84,7 +84,7 @@ func ContainerWrite(ctx Context, containers []types.Container) error {
|
||||
type ContainerContext struct {
|
||||
HeaderContext
|
||||
trunc bool
|
||||
c types.Container
|
||||
c container.Summary
|
||||
|
||||
// FieldsUsed is used in the pre-processing step to detect which fields are
|
||||
// used in the template. It's currently only used to detect use of the .Size
|
||||
@@ -193,7 +193,9 @@ func (c *ContainerContext) Command() string {
|
||||
return strconv.Quote(command)
|
||||
}
|
||||
|
||||
// CreatedAt returns the "Created" date/time of the container as a unix timestamp.
|
||||
// CreatedAt returns the formatted string representing the container's creation date/time.
|
||||
// The format may include nanoseconds if present.
|
||||
// e.g. "2006-01-02 15:04:05.999999999 -0700 MST" or "2006-01-02 15:04:05 -0700 MST"
|
||||
func (c *ContainerContext) CreatedAt() string {
|
||||
return time.Unix(c.c.Created, 0).String()
|
||||
}
|
||||
@@ -314,7 +316,7 @@ func (c *ContainerContext) Networks() string {
|
||||
// DisplayablePorts returns formatted string representing open ports of container
|
||||
// e.g. "0.0.0.0:80->9090/tcp, 9988/tcp"
|
||||
// it's used by command 'docker ps'
|
||||
func DisplayablePorts(ports []types.Port) string {
|
||||
func DisplayablePorts(ports []container.Port) string {
|
||||
type portGroup struct {
|
||||
first uint16
|
||||
last uint16
|
||||
@@ -375,12 +377,12 @@ func formGroup(key string, start, last uint16) string {
|
||||
group = fmt.Sprintf("%s-%d", group, last)
|
||||
}
|
||||
if ip != "" {
|
||||
group = fmt.Sprintf("%s:%s->%s", ip, group, group)
|
||||
group = fmt.Sprintf("%s->%s", net.JoinHostPort(ip, group), group)
|
||||
}
|
||||
return group + "/" + groupType
|
||||
}
|
||||
|
||||
func comparePorts(i, j types.Port) bool {
|
||||
func comparePorts(i, j container.Port) bool {
|
||||
if i.PrivatePort != j.PrivatePort {
|
||||
return i.PrivatePort < j.PrivatePort
|
||||
}
|
||||
|
38
vendor/github.com/docker/cli/cli/command/formatter/disk_usage.go
generated
vendored
38
vendor/github.com/docker/cli/cli/command/formatter/disk_usage.go
generated
vendored
@@ -9,6 +9,7 @@ import (
|
||||
|
||||
"github.com/distribution/reference"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/docker/api/types/container"
|
||||
"github.com/docker/docker/api/types/image"
|
||||
"github.com/docker/docker/api/types/volume"
|
||||
units "github.com/docker/go-units"
|
||||
@@ -36,7 +37,7 @@ type DiskUsageContext struct {
|
||||
Verbose bool
|
||||
LayersSize int64
|
||||
Images []*image.Summary
|
||||
Containers []*types.Container
|
||||
Containers []*container.Summary
|
||||
Volumes []*volume.Volume
|
||||
BuildCache []*types.BuildCache
|
||||
BuilderSize int64
|
||||
@@ -124,7 +125,7 @@ func (ctx *DiskUsageContext) Write() (err error) {
|
||||
return err
|
||||
}
|
||||
|
||||
diskUsageContainersCtx := diskUsageContainersContext{containers: []*types.Container{}}
|
||||
diskUsageContainersCtx := diskUsageContainersContext{containers: []*container.Summary{}}
|
||||
diskUsageContainersCtx.Header = SubHeaderContext{
|
||||
"Type": typeHeader,
|
||||
"TotalCount": totalHeader,
|
||||
@@ -236,7 +237,7 @@ func (ctx *DiskUsageContext) verboseWriteTable(duc *diskUsageContext) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ctx.Output.Write([]byte("\nLocal Volumes space usage:\n\n"))
|
||||
_, _ = ctx.Output.Write([]byte("\nLocal Volumes space usage:\n\n"))
|
||||
for _, v := range duc.Volumes {
|
||||
if err := ctx.contextFormat(tmpl, v); err != nil {
|
||||
return err
|
||||
@@ -248,7 +249,7 @@ func (ctx *DiskUsageContext) verboseWriteTable(duc *diskUsageContext) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
fmt.Fprintf(ctx.Output, "\nBuild cache usage: %s\n\n", units.HumanSize(float64(ctx.BuilderSize)))
|
||||
_, _ = fmt.Fprintf(ctx.Output, "\nBuild cache usage: %s\n\n", units.HumanSize(float64(ctx.BuilderSize)))
|
||||
for _, v := range duc.BuildCache {
|
||||
if err := ctx.contextFormat(tmpl, v); err != nil {
|
||||
return err
|
||||
@@ -313,7 +314,7 @@ func (c *diskUsageImagesContext) Reclaimable() string {
|
||||
|
||||
type diskUsageContainersContext struct {
|
||||
HeaderContext
|
||||
containers []*types.Container
|
||||
containers []*container.Summary
|
||||
}
|
||||
|
||||
func (c *diskUsageContainersContext) MarshalJSON() ([]byte, error) {
|
||||
@@ -328,16 +329,16 @@ func (c *diskUsageContainersContext) TotalCount() string {
|
||||
return strconv.Itoa(len(c.containers))
|
||||
}
|
||||
|
||||
func (c *diskUsageContainersContext) isActive(container types.Container) bool {
|
||||
return strings.Contains(container.State, "running") ||
|
||||
strings.Contains(container.State, "paused") ||
|
||||
strings.Contains(container.State, "restarting")
|
||||
func (c *diskUsageContainersContext) isActive(ctr container.Summary) bool {
|
||||
return strings.Contains(ctr.State, "running") ||
|
||||
strings.Contains(ctr.State, "paused") ||
|
||||
strings.Contains(ctr.State, "restarting")
|
||||
}
|
||||
|
||||
func (c *diskUsageContainersContext) Active() string {
|
||||
used := 0
|
||||
for _, container := range c.containers {
|
||||
if c.isActive(*container) {
|
||||
for _, ctr := range c.containers {
|
||||
if c.isActive(*ctr) {
|
||||
used++
|
||||
}
|
||||
}
|
||||
@@ -348,22 +349,21 @@ func (c *diskUsageContainersContext) Active() string {
|
||||
func (c *diskUsageContainersContext) Size() string {
|
||||
var size int64
|
||||
|
||||
for _, container := range c.containers {
|
||||
size += container.SizeRw
|
||||
for _, ctr := range c.containers {
|
||||
size += ctr.SizeRw
|
||||
}
|
||||
|
||||
return units.HumanSize(float64(size))
|
||||
}
|
||||
|
||||
func (c *diskUsageContainersContext) Reclaimable() string {
|
||||
var reclaimable int64
|
||||
var totalSize int64
|
||||
var reclaimable, totalSize int64
|
||||
|
||||
for _, container := range c.containers {
|
||||
if !c.isActive(*container) {
|
||||
reclaimable += container.SizeRw
|
||||
for _, ctr := range c.containers {
|
||||
if !c.isActive(*ctr) {
|
||||
reclaimable += ctr.SizeRw
|
||||
}
|
||||
totalSize += container.SizeRw
|
||||
totalSize += ctr.SizeRw
|
||||
}
|
||||
|
||||
if totalSize > 0 {
|
||||
|
8
vendor/github.com/docker/cli/cli/command/registry.go
generated
vendored
8
vendor/github.com/docker/cli/cli/command/registry.go
generated
vendored
@@ -13,9 +13,10 @@ import (
|
||||
configtypes "github.com/docker/cli/cli/config/types"
|
||||
"github.com/docker/cli/cli/hints"
|
||||
"github.com/docker/cli/cli/streams"
|
||||
"github.com/docker/docker/api/types"
|
||||
"github.com/docker/cli/internal/tui"
|
||||
registrytypes "github.com/docker/docker/api/types/registry"
|
||||
"github.com/docker/docker/registry"
|
||||
"github.com/morikuni/aec"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
@@ -29,7 +30,7 @@ const (
|
||||
|
||||
// RegistryAuthenticationPrivilegedFunc returns a RequestPrivilegeFunc from the specified registry index info
|
||||
// for the given command.
|
||||
func RegistryAuthenticationPrivilegedFunc(cli Cli, index *registrytypes.IndexInfo, cmdName string) types.RequestPrivilegeFunc {
|
||||
func RegistryAuthenticationPrivilegedFunc(cli Cli, index *registrytypes.IndexInfo, cmdName string) registrytypes.RequestAuthConfig {
|
||||
return func(ctx context.Context) (string, error) {
|
||||
_, _ = fmt.Fprintf(cli.Out(), "\nLogin prior to %s:\n", cmdName)
|
||||
indexServer := registry.GetAuthConfigKey(index)
|
||||
@@ -179,6 +180,9 @@ func PromptUserForCredentials(ctx context.Context, cli Cli, argUser, argPassword
|
||||
}
|
||||
}()
|
||||
|
||||
out := tui.NewOutput(cli.Err())
|
||||
out.PrintNote("A Personal Access Token (PAT) can be used instead.\n" +
|
||||
"To create a PAT, visit " + aec.Underline.Apply("https://app.docker.com/settings") + "\n\n")
|
||||
argPassword, err = PromptForInput(ctx, cli.In(), cli.Out(), "Password: ")
|
||||
if err != nil {
|
||||
return registrytypes.AuthConfig{}, err
|
||||
|
2
vendor/github.com/docker/cli/cli/command/telemetry.go
generated
vendored
2
vendor/github.com/docker/cli/cli/command/telemetry.go
generated
vendored
@@ -14,7 +14,7 @@ import (
|
||||
"go.opentelemetry.io/otel/sdk/metric/metricdata"
|
||||
"go.opentelemetry.io/otel/sdk/resource"
|
||||
sdktrace "go.opentelemetry.io/otel/sdk/trace"
|
||||
semconv "go.opentelemetry.io/otel/semconv/v1.21.0"
|
||||
semconv "go.opentelemetry.io/otel/semconv/v1.26.0"
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
)
|
||||
|
||||
|
2
vendor/github.com/docker/cli/cli/command/utils.go
generated
vendored
2
vendor/github.com/docker/cli/cli/command/utils.go
generated
vendored
@@ -199,7 +199,7 @@ func PruneFilters(dockerCli Cli, pruneFilters filters.Args) filters.Args {
|
||||
// AddPlatformFlag adds `platform` to a set of flags for API version 1.32 and later.
|
||||
func AddPlatformFlag(flags *pflag.FlagSet, target *string) {
|
||||
flags.StringVar(target, "platform", os.Getenv("DOCKER_DEFAULT_PLATFORM"), "Set platform if server is multi-platform capable")
|
||||
flags.SetAnnotation("platform", "version", []string{"1.32"})
|
||||
_ = flags.SetAnnotation("platform", "version", []string{"1.32"})
|
||||
}
|
||||
|
||||
// ValidateOutputPath validates the output paths of the `export` and `save` commands.
|
||||
|
2
vendor/github.com/docker/cli/cli/config/config.go
generated
vendored
2
vendor/github.com/docker/cli/cli/config/config.go
generated
vendored
@@ -143,7 +143,7 @@ func load(configDir string) (*configfile.ConfigFile, error) {
|
||||
defer file.Close()
|
||||
err = configFile.LoadFromReader(file)
|
||||
if err != nil {
|
||||
err = errors.Wrapf(err, "loading config file: %s: ", filename)
|
||||
err = errors.Wrapf(err, "parsing config file (%s)", filename)
|
||||
}
|
||||
return configFile, err
|
||||
}
|
||||
|
36
vendor/github.com/docker/cli/cli/config/credentials/file_store.go
generated
vendored
36
vendor/github.com/docker/cli/cli/config/credentials/file_store.go
generated
vendored
@@ -1,9 +1,12 @@
|
||||
package credentials
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net"
|
||||
"net/url"
|
||||
"os"
|
||||
"strings"
|
||||
"sync/atomic"
|
||||
|
||||
"github.com/docker/cli/cli/config/types"
|
||||
)
|
||||
@@ -57,6 +60,21 @@ func (c *fileStore) GetAll() (map[string]types.AuthConfig, error) {
|
||||
return c.file.GetAuthConfigs(), nil
|
||||
}
|
||||
|
||||
// unencryptedWarning warns the user when using an insecure credential storage.
|
||||
// After a deprecation period, user will get prompted if stdin and stderr are a terminal.
|
||||
// Otherwise, we'll assume they want it (sadly), because people may have been scripting
|
||||
// insecure logins and we don't want to break them. Maybe they'll see the warning in their
|
||||
// logs and fix things.
|
||||
const unencryptedWarning = `
|
||||
WARNING! Your credentials are stored unencrypted in '%s'.
|
||||
Configure a credential helper to remove this warning. See
|
||||
https://docs.docker.com/go/credential-store/
|
||||
`
|
||||
|
||||
// alreadyPrinted ensures that we only print the unencryptedWarning once per
|
||||
// CLI invocation (no need to warn the user multiple times per command).
|
||||
var alreadyPrinted atomic.Bool
|
||||
|
||||
// Store saves the given credentials in the file store. This function is
|
||||
// idempotent and does not update the file if credentials did not change.
|
||||
func (c *fileStore) Store(authConfig types.AuthConfig) error {
|
||||
@@ -66,15 +84,19 @@ func (c *fileStore) Store(authConfig types.AuthConfig) error {
|
||||
return nil
|
||||
}
|
||||
authConfigs[authConfig.ServerAddress] = authConfig
|
||||
return c.file.Save()
|
||||
}
|
||||
if err := c.file.Save(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *fileStore) GetFilename() string {
|
||||
return c.file.GetFilename()
|
||||
}
|
||||
if !alreadyPrinted.Load() && authConfig.Password != "" {
|
||||
// Display a warning if we're storing the users password (not a token).
|
||||
//
|
||||
// FIXME(thaJeztah): make output configurable instead of hardcoding to os.Stderr
|
||||
_, _ = fmt.Fprintln(os.Stderr, fmt.Sprintf(unencryptedWarning, c.file.GetFilename()))
|
||||
alreadyPrinted.Store(true)
|
||||
}
|
||||
|
||||
func (c *fileStore) IsFileStore() bool {
|
||||
return true
|
||||
return nil
|
||||
}
|
||||
|
||||
// ConvertToHostname converts a registry url which has http|https prepended
|
||||
|
22
vendor/github.com/docker/cli/cli/connhelper/commandconn/commandconn.go
generated
vendored
22
vendor/github.com/docker/cli/cli/connhelper/commandconn/commandconn.go
generated
vendored
@@ -33,18 +33,28 @@ import (
|
||||
)
|
||||
|
||||
// New returns net.Conn
|
||||
func New(_ context.Context, cmd string, args ...string) (net.Conn, error) {
|
||||
var (
|
||||
c commandConn
|
||||
err error
|
||||
)
|
||||
c.cmd = exec.Command(cmd, args...)
|
||||
func New(ctx context.Context, cmd string, args ...string) (net.Conn, error) {
|
||||
// Don't kill the ssh process if the context is cancelled. Killing the
|
||||
// ssh process causes an error when go's http.Client tries to reuse the
|
||||
// net.Conn (commandConn).
|
||||
//
|
||||
// Not passing down the Context might seem counter-intuitive, but in this
|
||||
// case, the lifetime of the process should be managed by the http.Client,
|
||||
// not the caller's Context.
|
||||
//
|
||||
// Further details;;
|
||||
//
|
||||
// - https://github.com/docker/cli/pull/3900
|
||||
// - https://github.com/docker/compose/issues/9448#issuecomment-1264263721
|
||||
ctx = context.WithoutCancel(ctx)
|
||||
c := commandConn{cmd: exec.CommandContext(ctx, cmd, args...)}
|
||||
// we assume that args never contains sensitive information
|
||||
logrus.Debugf("commandconn: starting %s with %v", cmd, args)
|
||||
c.cmd.Env = os.Environ()
|
||||
c.cmd.SysProcAttr = &syscall.SysProcAttr{}
|
||||
setPdeathsig(c.cmd)
|
||||
createSession(c.cmd)
|
||||
var err error
|
||||
c.stdin, err = c.cmd.StdinPipe()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
4
vendor/github.com/docker/cli/cli/context/store/metadatastore.go
generated
vendored
4
vendor/github.com/docker/cli/cli/context/store/metadatastore.go
generated
vendored
@@ -12,7 +12,7 @@ import (
|
||||
"sort"
|
||||
|
||||
"github.com/docker/docker/errdefs"
|
||||
"github.com/docker/docker/pkg/ioutils"
|
||||
"github.com/docker/docker/pkg/atomicwriter"
|
||||
"github.com/fvbommel/sortorder"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
@@ -40,7 +40,7 @@ func (s *metadataStore) createOrUpdate(meta Metadata) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return ioutils.AtomicWriteFile(filepath.Join(contextDir, metaFile), bytes, 0o644)
|
||||
return atomicwriter.WriteFile(filepath.Join(contextDir, metaFile), bytes, 0o644)
|
||||
}
|
||||
|
||||
func parseTypedOrMap(payload []byte, getter TypeGetter) (any, error) {
|
||||
|
4
vendor/github.com/docker/cli/cli/context/store/tlsstore.go
generated
vendored
4
vendor/github.com/docker/cli/cli/context/store/tlsstore.go
generated
vendored
@@ -5,7 +5,7 @@ import (
|
||||
"path/filepath"
|
||||
|
||||
"github.com/docker/docker/errdefs"
|
||||
"github.com/docker/docker/pkg/ioutils"
|
||||
"github.com/docker/docker/pkg/atomicwriter"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
@@ -32,7 +32,7 @@ func (s *tlsStore) createOrUpdate(name, endpointName, filename string, data []by
|
||||
if err := os.MkdirAll(endpointDir, 0o700); err != nil {
|
||||
return err
|
||||
}
|
||||
return ioutils.AtomicWriteFile(filepath.Join(endpointDir, filename), data, 0o600)
|
||||
return atomicwriter.WriteFile(filepath.Join(endpointDir, filename), data, 0o600)
|
||||
}
|
||||
|
||||
func (s *tlsStore) getData(name, endpointName, filename string) ([]byte, error) {
|
||||
|
38
vendor/github.com/docker/cli/cli/error.go
generated
vendored
38
vendor/github.com/docker/cli/cli/error.go
generated
vendored
@@ -1,35 +1,29 @@
|
||||
package cli
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// Errors is a list of errors.
|
||||
// Useful in a loop if you don't want to return the error right away and you want to display after the loop,
|
||||
// all the errors that happened during the loop.
|
||||
//
|
||||
// Deprecated: use [errors.Join] instead; will be removed in the next release.
|
||||
type Errors []error
|
||||
|
||||
func (errList Errors) Error() string {
|
||||
if len(errList) < 1 {
|
||||
return ""
|
||||
}
|
||||
|
||||
out := make([]string, len(errList))
|
||||
for i := range errList {
|
||||
out[i] = errList[i].Error()
|
||||
}
|
||||
return strings.Join(out, ", ")
|
||||
}
|
||||
|
||||
// StatusError reports an unsuccessful exit by a command.
|
||||
type StatusError struct {
|
||||
Cause error
|
||||
Status string
|
||||
StatusCode int
|
||||
}
|
||||
|
||||
// Error formats the error for printing. If a custom Status is provided,
|
||||
// it is returned as-is, otherwise it generates a generic error-message
|
||||
// based on the StatusCode.
|
||||
func (e StatusError) Error() string {
|
||||
return fmt.Sprintf("Status: %s, Code: %d", e.Status, e.StatusCode)
|
||||
if e.Status != "" {
|
||||
return e.Status
|
||||
}
|
||||
if e.Cause != nil {
|
||||
return e.Cause.Error()
|
||||
}
|
||||
return "exit status " + strconv.Itoa(e.StatusCode)
|
||||
}
|
||||
|
||||
func (e StatusError) Unwrap() error {
|
||||
return e.Cause
|
||||
}
|
||||
|
2
vendor/github.com/docker/cli/cli/flags/options.go
generated
vendored
2
vendor/github.com/docker/cli/cli/flags/options.go
generated
vendored
@@ -138,7 +138,7 @@ func SetLogLevel(logLevel string) {
|
||||
if logLevel != "" {
|
||||
lvl, err := logrus.ParseLevel(logLevel)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Unable to parse logging level: %s\n", logLevel)
|
||||
_, _ = fmt.Fprintln(os.Stderr, "Unable to parse logging level:", logLevel)
|
||||
os.Exit(1)
|
||||
}
|
||||
logrus.SetLevel(lvl)
|
||||
|
4
vendor/github.com/docker/cli/cli/hints/hints.go
generated
vendored
4
vendor/github.com/docker/cli/cli/hints/hints.go
generated
vendored
@@ -5,7 +5,9 @@ import (
|
||||
"strconv"
|
||||
)
|
||||
|
||||
// Enabled returns whether cli hints are enabled or not
|
||||
// Enabled returns whether cli hints are enabled or not. Hints are enabled by
|
||||
// default, but can be disabled through the "DOCKER_CLI_HINTS" environment
|
||||
// variable.
|
||||
func Enabled() bool {
|
||||
if v := os.Getenv("DOCKER_CLI_HINTS"); v != "" {
|
||||
enabled, err := strconv.ParseBool(v)
|
||||
|
7
vendor/github.com/docker/cli/cli/registry/client/endpoint.go
generated
vendored
7
vendor/github.com/docker/cli/cli/registry/client/endpoint.go
generated
vendored
@@ -22,12 +22,7 @@ type repositoryEndpoint struct {
|
||||
|
||||
// Name returns the repository name
|
||||
func (r repositoryEndpoint) Name() string {
|
||||
repoName := r.info.Name.Name()
|
||||
// If endpoint does not support CanonicalName, use the RemoteName instead
|
||||
if r.endpoint.TrimHostname {
|
||||
repoName = reference.Path(r.info.Name)
|
||||
}
|
||||
return repoName
|
||||
return reference.Path(r.info.Name)
|
||||
}
|
||||
|
||||
// BaseURL returns the endpoint url
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user