diff --git a/.github/workflows/e2e.yml b/.github/workflows/e2e.yml
index b0d3ac43..484e20fc 100644
--- a/.github/workflows/e2e.yml
+++ b/.github/workflows/e2e.yml
@@ -84,6 +84,8 @@ jobs:
endpoint: tcp://localhost:1234
- driver: docker-container
metadata-provenance: max
+ - driver: docker-container
+ metadata-warnings: true
exclude:
- driver: docker
multi-node: mnode-true
@@ -134,6 +136,9 @@ jobs:
if [ -n "${{ matrix.metadata-provenance }}" ]; then
echo "BUILDX_METADATA_PROVENANCE=${{ matrix.metadata-provenance }}" >> $GITHUB_ENV
fi
+ if [ -n "${{ matrix.metadata-warnings }}" ]; then
+ echo "BUILDX_METADATA_WARNINGS=${{ matrix.metadata-warnings }}" >> $GITHUB_ENV
+ fi
-
name: Install k3s
if: matrix.driver == 'kubernetes'
diff --git a/commands/bake.go b/commands/bake.go
index 6c5a02e2..a8805eb9 100644
--- a/commands/bake.go
+++ b/commands/bake.go
@@ -22,6 +22,7 @@ import (
"github.com/docker/buildx/util/progress"
"github.com/docker/buildx/util/tracing"
"github.com/docker/cli/cli/command"
+ "github.com/moby/buildkit/client"
"github.com/moby/buildkit/identity"
"github.com/moby/buildkit/util/progress/progressui"
"github.com/pkg/errors"
@@ -130,15 +131,30 @@ func runBake(ctx context.Context, dockerCli command.Cli, targets []string, in ba
return err
}
+ var resp map[string]*client.SolveResponse
+
defer func() {
if printer != nil {
err1 := printer.Wait()
if err == nil {
err = err1
}
- if err == nil && progressMode != progressui.QuietMode && progressMode != progressui.RawJSONMode {
+ if err != nil {
+ return
+ }
+ if progressMode != progressui.QuietMode && progressMode != progressui.RawJSONMode {
desktop.PrintBuildDetails(os.Stderr, printer.BuildRefs(), term)
}
+ if resp != nil && len(in.metadataFile) > 0 {
+ dt := make(map[string]interface{})
+ for t, r := range resp {
+ dt[t] = decodeExporterResponse(r.ExporterResponse)
+ }
+ if warnings := printer.Warnings(); len(warnings) > 0 && confutil.MetadataWarningsEnabled() {
+ dt["buildx.build.warnings"] = warnings
+ }
+ err = writeMetadataFile(in.metadataFile, dt)
+ }
}
}()
@@ -229,22 +245,12 @@ func runBake(ctx context.Context, dockerCli command.Cli, targets []string, in ba
return err
}
- resp, err := build.Build(ctx, nodes, bo, dockerutil.NewClient(dockerCli), confutil.ConfigDir(dockerCli), printer)
+ resp, err = build.Build(ctx, nodes, bo, dockerutil.NewClient(dockerCli), confutil.ConfigDir(dockerCli), printer)
if err != nil {
return wrapBuildError(err, true)
}
- if len(in.metadataFile) > 0 {
- dt := make(map[string]interface{})
- for t, r := range resp {
- dt[t] = decodeExporterResponse(r.ExporterResponse)
- }
- if err := writeMetadataFile(in.metadataFile, dt); err != nil {
- return err
- }
- }
-
- return err
+ return
}
func bakeCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
diff --git a/commands/build.go b/commands/build.go
index 7301e1cc..c3b40bd7 100644
--- a/commands/build.go
+++ b/commands/build.go
@@ -374,7 +374,11 @@ func runBuild(ctx context.Context, dockerCli command.Cli, options buildOptions)
return err
}
} else if options.metadataFile != "" {
- if err := writeMetadataFile(options.metadataFile, decodeExporterResponse(resp.ExporterResponse)); err != nil {
+ dt := decodeExporterResponse(resp.ExporterResponse)
+ if warnings := printer.Warnings(); len(warnings) > 0 && confutil.MetadataWarningsEnabled() {
+ dt["buildx.build.warnings"] = warnings
+ }
+ if err := writeMetadataFile(options.metadataFile, dt); err != nil {
return err
}
}
diff --git a/docs/reference/buildx_bake.md b/docs/reference/buildx_bake.md
index d50b8f77..cc21e523 100644
--- a/docs/reference/buildx_bake.md
+++ b/docs/reference/buildx_bake.md
@@ -119,6 +119,7 @@ $ cat metadata.json
```json
{
+ "buildx.build.warnings": {},
"db": {
"buildx.build.provenance": {},
"buildx.build.ref": "mybuilder/mybuilder0/0fjb6ubs52xx3vygf6fgdl611",
@@ -161,6 +162,12 @@ $ cat metadata.json
> * `max` sets full provenance.
> * `disabled`, `false` or `0` does not set any provenance.
+> **Note**
+>
+> Build warnings (`buildx.build.warnings`) are not included by default. Set the
+> `BUILDX_METADATA_WARNINGS` environment variable to `1` or `true` to
+> include them.
+
### Don't use cache when building the image (--no-cache)
Same as `build --no-cache`. Don't use cache when building the image.
diff --git a/docs/reference/buildx_build.md b/docs/reference/buildx_build.md
index 3196de92..ea74ea20 100644
--- a/docs/reference/buildx_build.md
+++ b/docs/reference/buildx_build.md
@@ -330,6 +330,7 @@ $ cat metadata.json
{
"buildx.build.provenance": {},
"buildx.build.ref": "mybuilder/mybuilder0/0fjb6ubs52xx3vygf6fgdl611",
+ "buildx.build.warnings": {},
"containerimage.config.digest": "sha256:2937f66a9722f7f4a2df583de2f8cb97fc9196059a410e7f00072fc918930e66",
"containerimage.descriptor": {
"annotations": {
@@ -353,6 +354,12 @@ $ cat metadata.json
> * `max` sets full provenance.
> * `disabled`, `false` or `0` does not set any provenance.
+> **Note**
+>
+> Build warnings (`buildx.build.warnings`) are not included by default. Set the
+> `BUILDX_METADATA_WARNINGS` environment variable to `1` or `true` to
+> include them.
+
### Ignore build cache for specific stages (--no-cache-filter)
The `--no-cache-filter` lets you specify one or more stages of a multi-stage
diff --git a/go.mod b/go.mod
index e85a5e17..d3c29d6f 100644
--- a/go.mod
+++ b/go.mod
@@ -27,6 +27,7 @@ require (
github.com/hashicorp/go-cty-funcs v0.0.0-20230405223818-a090f58aa992
github.com/hashicorp/hcl/v2 v2.20.1
github.com/in-toto/in-toto-golang v0.5.0
+ github.com/mitchellh/hashstructure/v2 v2.0.2
github.com/moby/buildkit v0.14.1
github.com/moby/sys/mountinfo v0.7.1
github.com/moby/sys/signal v0.7.0
diff --git a/go.sum b/go.sum
index 20856b50..156bd030 100644
--- a/go.sum
+++ b/go.sum
@@ -295,6 +295,8 @@ github.com/miekg/pkcs11 v1.1.1/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WT
github.com/mitchellh/go-homedir v1.1.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
github.com/mitchellh/go-wordwrap v0.0.0-20150314170334-ad45545899c7 h1:DpOJ2HYzCv8LZP15IdmG+YdwD2luVPHITV96TkirNBM=
github.com/mitchellh/go-wordwrap v0.0.0-20150314170334-ad45545899c7/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo=
+github.com/mitchellh/hashstructure/v2 v2.0.2 h1:vGKWl0YJqUNxE8d+h8f6NJLcCJrgbhC4NcD46KavDd4=
+github.com/mitchellh/hashstructure/v2 v2.0.2/go.mod h1:MG3aRVU/N29oo/V/IhBX8GR/zz4kQkprJgF2EVszyDE=
github.com/mitchellh/mapstructure v0.0.0-20150613213606-2caf8efc9366/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
diff --git a/hack/test-driver b/hack/test-driver
index 42659592..5d244a9e 100755
--- a/hack/test-driver
+++ b/hack/test-driver
@@ -109,21 +109,21 @@ buildxCmd inspect --bootstrap --builder="${builderName}"
# create dockerfile
cat > "${dockerfile}" < /log
-FROM busybox AS log
+FROM busybox As log
COPY --from=build /log /log
RUN cat /log
RUN uname -a
-FROM busybox AS hello
+FROm busybox AS hello
RUN echo hello > /hello
FROM scratch
-COPY --from=log /log /log
+CoPY --from=log /log /log
COPY --from=hello /hello /hello
EOL
diff --git a/tests/bake.go b/tests/bake.go
index 3b3591fe..f50e6608 100644
--- a/tests/bake.go
+++ b/tests/bake.go
@@ -9,6 +9,7 @@ import (
"github.com/containerd/continuity/fs/fstest"
"github.com/docker/buildx/util/gitutil"
+ "github.com/moby/buildkit/client"
"github.com/moby/buildkit/identity"
provenancetypes "github.com/moby/buildkit/solver/llbsolver/provenance/types"
"github.com/moby/buildkit/util/contentutil"
@@ -42,7 +43,9 @@ var bakeTests = []func(t *testing.T, sb integration.Sandbox){
testBakeEmpty,
testBakeShmSize,
testBakeUlimits,
- testBakeMetadata,
+ testBakeMetadataProvenance,
+ testBakeMetadataWarnings,
+ testBakeMetadataWarningsDedup,
testBakeMultiExporters,
testBakeLoadPush,
}
@@ -633,19 +636,22 @@ target "default" {
require.Contains(t, string(dt), `1024`)
}
-func testBakeMetadata(t *testing.T, sb integration.Sandbox) {
+func testBakeMetadataProvenance(t *testing.T, sb integration.Sandbox) {
+ t.Run("default", func(t *testing.T) {
+ bakeMetadataProvenance(t, sb, "")
+ })
t.Run("max", func(t *testing.T) {
- bakeMetadata(t, sb, "max")
+ bakeMetadataProvenance(t, sb, "max")
})
t.Run("min", func(t *testing.T) {
- bakeMetadata(t, sb, "min")
+ bakeMetadataProvenance(t, sb, "min")
})
t.Run("disabled", func(t *testing.T) {
- bakeMetadata(t, sb, "disabled")
+ bakeMetadataProvenance(t, sb, "disabled")
})
}
-func bakeMetadata(t *testing.T, sb integration.Sandbox, metadataMode string) {
+func bakeMetadataProvenance(t *testing.T, sb integration.Sandbox, metadataMode string) {
dockerfile := []byte(`
FROM scratch
COPY foo /foo
@@ -676,7 +682,7 @@ target "default" {
withEnv("BUILDX_METADATA_PROVENANCE="+metadataMode),
)
out, err := cmd.CombinedOutput()
- require.NoError(t, err, out)
+ require.NoError(t, err, string(out))
dt, err := os.ReadFile(filepath.Join(dirDest, "md.json"))
require.NoError(t, err)
@@ -706,6 +712,130 @@ target "default" {
require.Equal(t, provenancetypes.BuildKitBuildType, prv.BuildType)
}
+func testBakeMetadataWarnings(t *testing.T, sb integration.Sandbox) {
+ t.Run("default", func(t *testing.T) {
+ bakeMetadataWarnings(t, sb, "")
+ })
+ t.Run("true", func(t *testing.T) {
+ bakeMetadataWarnings(t, sb, "true")
+ })
+ t.Run("false", func(t *testing.T) {
+ bakeMetadataWarnings(t, sb, "false")
+ })
+}
+
+func bakeMetadataWarnings(t *testing.T, sb integration.Sandbox, mode string) {
+ dockerfile := []byte(`
+frOM busybox as base
+cOpy Dockerfile .
+from scratch
+COPy --from=base \
+ /Dockerfile \
+ /
+ `)
+ bakefile := []byte(`
+target "default" {
+}
+`)
+ dir := tmpdir(
+ t,
+ fstest.CreateFile("docker-bake.hcl", bakefile, 0600),
+ fstest.CreateFile("Dockerfile", dockerfile, 0600),
+ )
+
+ dirDest := t.TempDir()
+
+ cmd := buildxCmd(
+ sb,
+ withDir(dir),
+ withArgs("bake", "--metadata-file", filepath.Join(dirDest, "md.json"), "--set", "*.output=type=cacheonly"),
+ withEnv("BUILDX_METADATA_WARNINGS="+mode),
+ )
+ out, err := cmd.CombinedOutput()
+ require.NoError(t, err, string(out))
+
+ dt, err := os.ReadFile(filepath.Join(dirDest, "md.json"))
+ require.NoError(t, err)
+
+ type mdT struct {
+ BuildWarnings []client.VertexWarning `json:"buildx.build.warnings"`
+ Default struct {
+ BuildRef string `json:"buildx.build.ref"`
+ } `json:"default"`
+ }
+ var md mdT
+ err = json.Unmarshal(dt, &md)
+ require.NoError(t, err, string(dt))
+
+ require.NotEmpty(t, md.Default.BuildRef, string(dt))
+ if mode == "" || mode == "false" {
+ require.Empty(t, md.BuildWarnings, string(dt))
+ return
+ }
+
+ skipNoCompatBuildKit(t, sb, ">= 0.14.0-0", "lint")
+ require.Len(t, md.BuildWarnings, 3, string(dt))
+}
+
+func testBakeMetadataWarningsDedup(t *testing.T, sb integration.Sandbox) {
+ dockerfile := []byte(`
+frOM busybox as base
+cOpy Dockerfile .
+from scratch
+COPy --from=base \
+ /Dockerfile \
+ /
+ `)
+ bakefile := []byte(`
+group "default" {
+ targets = ["base", "def"]
+}
+target "base" {
+ target = "base"
+}
+target "def" {
+}
+`)
+ dir := tmpdir(
+ t,
+ fstest.CreateFile("docker-bake.hcl", bakefile, 0600),
+ fstest.CreateFile("Dockerfile", dockerfile, 0600),
+ )
+
+ dirDest := t.TempDir()
+
+ cmd := buildxCmd(
+ sb,
+ withDir(dir),
+ withArgs("bake", "--metadata-file", filepath.Join(dirDest, "md.json"), "--set", "*.output=type=cacheonly"),
+ withEnv("BUILDX_METADATA_WARNINGS=true"),
+ )
+ out, err := cmd.CombinedOutput()
+ require.NoError(t, err, string(out))
+
+ dt, err := os.ReadFile(filepath.Join(dirDest, "md.json"))
+ require.NoError(t, err)
+
+ type mdT struct {
+ BuildWarnings []client.VertexWarning `json:"buildx.build.warnings"`
+ Base struct {
+ BuildRef string `json:"buildx.build.ref"`
+ } `json:"base"`
+ Def struct {
+ BuildRef string `json:"buildx.build.ref"`
+ } `json:"def"`
+ }
+ var md mdT
+ err = json.Unmarshal(dt, &md)
+ require.NoError(t, err, string(dt))
+
+ require.NotEmpty(t, md.Base.BuildRef, string(dt))
+ require.NotEmpty(t, md.Def.BuildRef, string(dt))
+
+ skipNoCompatBuildKit(t, sb, ">= 0.14.0-0", "lint")
+ require.Len(t, md.BuildWarnings, 3, string(dt))
+}
+
func testBakeMultiExporters(t *testing.T, sb integration.Sandbox) {
if !isDockerContainerWorker(sb) {
t.Skip("only testing with docker-container worker")
diff --git a/tests/build.go b/tests/build.go
index c79b2228..7d586a15 100644
--- a/tests/build.go
+++ b/tests/build.go
@@ -16,6 +16,7 @@ import (
"github.com/containerd/containerd/platforms"
"github.com/containerd/continuity/fs/fstest"
"github.com/creack/pty"
+ "github.com/moby/buildkit/client"
"github.com/moby/buildkit/frontend/subrequests/lint"
"github.com/moby/buildkit/frontend/subrequests/outline"
"github.com/moby/buildkit/frontend/subrequests/targets"
@@ -59,7 +60,8 @@ var buildTests = []func(t *testing.T, sb integration.Sandbox){
testBuildNetworkModeBridge,
testBuildShmSize,
testBuildUlimit,
- testBuildMetadata,
+ testBuildMetadataProvenance,
+ testBuildMetadataWarnings,
testBuildMultiExporters,
testBuildLoadPush,
testBuildSecret,
@@ -560,19 +562,22 @@ COPY --from=build /ulimit /
require.Contains(t, string(dt), `1024`)
}
-func testBuildMetadata(t *testing.T, sb integration.Sandbox) {
+func testBuildMetadataProvenance(t *testing.T, sb integration.Sandbox) {
+ t.Run("default", func(t *testing.T) {
+ buildMetadataProvenance(t, sb, "")
+ })
t.Run("max", func(t *testing.T) {
- buildMetadata(t, sb, "max")
+ buildMetadataProvenance(t, sb, "max")
})
t.Run("min", func(t *testing.T) {
- buildMetadata(t, sb, "min")
+ buildMetadataProvenance(t, sb, "min")
})
t.Run("disabled", func(t *testing.T) {
- buildMetadata(t, sb, "disabled")
+ buildMetadataProvenance(t, sb, "disabled")
})
}
-func buildMetadata(t *testing.T, sb integration.Sandbox, metadataMode string) {
+func buildMetadataProvenance(t *testing.T, sb integration.Sandbox, metadataMode string) {
dir := createTestProject(t)
dirDest := t.TempDir()
@@ -616,6 +621,61 @@ func buildMetadata(t *testing.T, sb integration.Sandbox, metadataMode string) {
require.Equal(t, provenancetypes.BuildKitBuildType, prv.BuildType)
}
+func testBuildMetadataWarnings(t *testing.T, sb integration.Sandbox) {
+ t.Run("default", func(t *testing.T) {
+ buildMetadataWarnings(t, sb, "")
+ })
+ t.Run("true", func(t *testing.T) {
+ buildMetadataWarnings(t, sb, "true")
+ })
+ t.Run("false", func(t *testing.T) {
+ buildMetadataWarnings(t, sb, "false")
+ })
+}
+
+func buildMetadataWarnings(t *testing.T, sb integration.Sandbox, mode string) {
+ dockerfile := []byte(`
+frOM busybox as base
+cOpy Dockerfile .
+from scratch
+COPy --from=base \
+ /Dockerfile \
+ /
+ `)
+ dir := tmpdir(
+ t,
+ fstest.CreateFile("Dockerfile", dockerfile, 0600),
+ )
+
+ cmd := buildxCmd(
+ sb,
+ withArgs("build", "--metadata-file", filepath.Join(dir, "md.json"), dir),
+ withEnv("BUILDX_METADATA_WARNINGS="+mode),
+ )
+ out, err := cmd.CombinedOutput()
+ require.NoError(t, err, string(out))
+
+ dt, err := os.ReadFile(filepath.Join(dir, "md.json"))
+ require.NoError(t, err)
+
+ type mdT struct {
+ BuildRef string `json:"buildx.build.ref"`
+ BuildWarnings []client.VertexWarning `json:"buildx.build.warnings"`
+ }
+ var md mdT
+ err = json.Unmarshal(dt, &md)
+ require.NoError(t, err, string(dt))
+
+ require.NotEmpty(t, md.BuildRef, string(dt))
+ if mode == "" || mode == "false" {
+ require.Empty(t, md.BuildWarnings, string(dt))
+ return
+ }
+
+ skipNoCompatBuildKit(t, sb, ">= 0.14.0-0", "lint")
+ require.Len(t, md.BuildWarnings, 3, string(dt))
+}
+
func testBuildMultiExporters(t *testing.T, sb integration.Sandbox) {
if !isDockerContainerWorker(sb) {
t.Skip("only testing with docker-container worker")
diff --git a/util/confutil/metadata.go b/util/confutil/metadata.go
index a655eec8..b30a7b26 100644
--- a/util/confutil/metadata.go
+++ b/util/confutil/metadata.go
@@ -39,3 +39,12 @@ func ParseMetadataProvenance(inp string) MetadataProvenanceMode {
}
return MetadataProvenanceModeMin
}
+
+// MetadataWarningsEnabled returns whether metadata warnings are enabled from
+// BUILDX_METADATA_WARNINGS environment variable (default false)
+func MetadataWarningsEnabled() bool {
+ if ok, err := strconv.ParseBool(os.Getenv("BUILDX_METADATA_WARNINGS")); err == nil {
+ return ok
+ }
+ return false
+}
diff --git a/util/progress/printer.go b/util/progress/printer.go
index 6cbb2e26..deef3a67 100644
--- a/util/progress/printer.go
+++ b/util/progress/printer.go
@@ -7,6 +7,7 @@ import (
"github.com/containerd/console"
"github.com/docker/buildx/util/logutil"
+ "github.com/mitchellh/hashstructure/v2"
"github.com/moby/buildkit/client"
"github.com/moby/buildkit/util/progress/progressui"
"github.com/opencontainers/go-digest"
@@ -58,7 +59,7 @@ func (p *Printer) Write(s *client.SolveStatus) {
}
func (p *Printer) Warnings() []client.VertexWarning {
- return p.warnings
+ return dedupWarnings(p.warnings)
}
func (p *Printer) ValidateLogSource(dgst digest.Digest, v interface{}) bool {
@@ -184,3 +185,26 @@ func WithOnClose(onclose func()) PrinterOpt {
opt.onclose = onclose
}
}
+
+func dedupWarnings(inp []client.VertexWarning) []client.VertexWarning {
+ m := make(map[uint64]client.VertexWarning)
+ for _, w := range inp {
+ wcp := w
+ wcp.Vertex = ""
+ if wcp.SourceInfo != nil {
+ wcp.SourceInfo.Definition = nil
+ }
+ h, err := hashstructure.Hash(wcp, hashstructure.FormatV2, nil)
+ if err != nil {
+ continue
+ }
+ if _, ok := m[h]; !ok {
+ m[h] = w
+ }
+ }
+ res := make([]client.VertexWarning, 0, len(m))
+ for _, w := range m {
+ res = append(res, w)
+ }
+ return res
+}
diff --git a/vendor/github.com/mitchellh/hashstructure/v2/LICENSE b/vendor/github.com/mitchellh/hashstructure/v2/LICENSE
new file mode 100644
index 00000000..a3866a29
--- /dev/null
+++ b/vendor/github.com/mitchellh/hashstructure/v2/LICENSE
@@ -0,0 +1,21 @@
+The MIT License (MIT)
+
+Copyright (c) 2016 Mitchell Hashimoto
+
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+THE SOFTWARE.
diff --git a/vendor/github.com/mitchellh/hashstructure/v2/README.md b/vendor/github.com/mitchellh/hashstructure/v2/README.md
new file mode 100644
index 00000000..21f36be1
--- /dev/null
+++ b/vendor/github.com/mitchellh/hashstructure/v2/README.md
@@ -0,0 +1,76 @@
+# hashstructure [](https://godoc.org/github.com/mitchellh/hashstructure)
+
+hashstructure is a Go library for creating a unique hash value
+for arbitrary values in Go.
+
+This can be used to key values in a hash (for use in a map, set, etc.)
+that are complex. The most common use case is comparing two values without
+sending data across the network, caching values locally (de-dup), and so on.
+
+## Features
+
+ * Hash any arbitrary Go value, including complex types.
+
+ * Tag a struct field to ignore it and not affect the hash value.
+
+ * Tag a slice type struct field to treat it as a set where ordering
+ doesn't affect the hash code but the field itself is still taken into
+ account to create the hash value.
+
+ * Optionally, specify a custom hash function to optimize for speed, collision
+ avoidance for your data set, etc.
+
+ * Optionally, hash the output of `.String()` on structs that implement fmt.Stringer,
+ allowing effective hashing of time.Time
+
+ * Optionally, override the hashing process by implementing `Hashable`.
+
+## Installation
+
+Standard `go get`:
+
+```
+$ go get github.com/mitchellh/hashstructure/v2
+```
+
+**Note on v2:** It is highly recommended you use the "v2" release since this
+fixes some significant hash collisions issues from v1. In practice, we used
+v1 for many years in real projects at HashiCorp and never had issues, but it
+is highly dependent on the shape of the data you're hashing and how you use
+those hashes.
+
+When using v2+, you can still generate weaker v1 hashes by using the
+`FormatV1` format when calling `Hash`.
+
+## Usage & Example
+
+For usage and examples see the [Godoc](http://godoc.org/github.com/mitchellh/hashstructure).
+
+A quick code example is shown below:
+
+```go
+type ComplexStruct struct {
+ Name string
+ Age uint
+ Metadata map[string]interface{}
+}
+
+v := ComplexStruct{
+ Name: "mitchellh",
+ Age: 64,
+ Metadata: map[string]interface{}{
+ "car": true,
+ "location": "California",
+ "siblings": []string{"Bob", "John"},
+ },
+}
+
+hash, err := hashstructure.Hash(v, hashstructure.FormatV2, nil)
+if err != nil {
+ panic(err)
+}
+
+fmt.Printf("%d", hash)
+// Output:
+// 2307517237273902113
+```
diff --git a/vendor/github.com/mitchellh/hashstructure/v2/errors.go b/vendor/github.com/mitchellh/hashstructure/v2/errors.go
new file mode 100644
index 00000000..44b89514
--- /dev/null
+++ b/vendor/github.com/mitchellh/hashstructure/v2/errors.go
@@ -0,0 +1,22 @@
+package hashstructure
+
+import (
+ "fmt"
+)
+
+// ErrNotStringer is returned when there's an error with hash:"string"
+type ErrNotStringer struct {
+ Field string
+}
+
+// Error implements error for ErrNotStringer
+func (ens *ErrNotStringer) Error() string {
+ return fmt.Sprintf("hashstructure: %s has hash:\"string\" set, but does not implement fmt.Stringer", ens.Field)
+}
+
+// ErrFormat is returned when an invalid format is given to the Hash function.
+type ErrFormat struct{}
+
+func (*ErrFormat) Error() string {
+ return "format must be one of the defined Format values in the hashstructure library"
+}
diff --git a/vendor/github.com/mitchellh/hashstructure/v2/hashstructure.go b/vendor/github.com/mitchellh/hashstructure/v2/hashstructure.go
new file mode 100644
index 00000000..3dc0eb74
--- /dev/null
+++ b/vendor/github.com/mitchellh/hashstructure/v2/hashstructure.go
@@ -0,0 +1,482 @@
+package hashstructure
+
+import (
+ "encoding/binary"
+ "fmt"
+ "hash"
+ "hash/fnv"
+ "reflect"
+ "time"
+)
+
+// HashOptions are options that are available for hashing.
+type HashOptions struct {
+ // Hasher is the hash function to use. If this isn't set, it will
+ // default to FNV.
+ Hasher hash.Hash64
+
+ // TagName is the struct tag to look at when hashing the structure.
+ // By default this is "hash".
+ TagName string
+
+ // ZeroNil is flag determining if nil pointer should be treated equal
+ // to a zero value of pointed type. By default this is false.
+ ZeroNil bool
+
+ // IgnoreZeroValue is determining if zero value fields should be
+ // ignored for hash calculation.
+ IgnoreZeroValue bool
+
+ // SlicesAsSets assumes that a `set` tag is always present for slices.
+ // Default is false (in which case the tag is used instead)
+ SlicesAsSets bool
+
+ // UseStringer will attempt to use fmt.Stringer always. If the struct
+ // doesn't implement fmt.Stringer, it'll fall back to trying usual tricks.
+ // If this is true, and the "string" tag is also set, the tag takes
+ // precedence (meaning that if the type doesn't implement fmt.Stringer, we
+ // panic)
+ UseStringer bool
+}
+
+// Format specifies the hashing process used. Different formats typically
+// generate different hashes for the same value and have different properties.
+type Format uint
+
+const (
+ // To disallow the zero value
+ formatInvalid Format = iota
+
+ // FormatV1 is the format used in v1.x of this library. This has the
+ // downsides noted in issue #18 but allows simultaneous v1/v2 usage.
+ FormatV1
+
+ // FormatV2 is the current recommended format and fixes the issues
+ // noted in FormatV1.
+ FormatV2
+
+ formatMax // so we can easily find the end
+)
+
+// Hash returns the hash value of an arbitrary value.
+//
+// If opts is nil, then default options will be used. See HashOptions
+// for the default values. The same *HashOptions value cannot be used
+// concurrently. None of the values within a *HashOptions struct are
+// safe to read/write while hashing is being done.
+//
+// The "format" is required and must be one of the format values defined
+// by this library. You should probably just use "FormatV2". This allows
+// generated hashes uses alternate logic to maintain compatibility with
+// older versions.
+//
+// Notes on the value:
+//
+// * Unexported fields on structs are ignored and do not affect the
+// hash value.
+//
+// * Adding an exported field to a struct with the zero value will change
+// the hash value.
+//
+// For structs, the hashing can be controlled using tags. For example:
+//
+// struct {
+// Name string
+// UUID string `hash:"ignore"`
+// }
+//
+// The available tag values are:
+//
+// * "ignore" or "-" - The field will be ignored and not affect the hash code.
+//
+// * "set" - The field will be treated as a set, where ordering doesn't
+// affect the hash code. This only works for slices.
+//
+// * "string" - The field will be hashed as a string, only works when the
+// field implements fmt.Stringer
+//
+func Hash(v interface{}, format Format, opts *HashOptions) (uint64, error) {
+ // Validate our format
+ if format <= formatInvalid || format >= formatMax {
+ return 0, &ErrFormat{}
+ }
+
+ // Create default options
+ if opts == nil {
+ opts = &HashOptions{}
+ }
+ if opts.Hasher == nil {
+ opts.Hasher = fnv.New64()
+ }
+ if opts.TagName == "" {
+ opts.TagName = "hash"
+ }
+
+ // Reset the hash
+ opts.Hasher.Reset()
+
+ // Create our walker and walk the structure
+ w := &walker{
+ format: format,
+ h: opts.Hasher,
+ tag: opts.TagName,
+ zeronil: opts.ZeroNil,
+ ignorezerovalue: opts.IgnoreZeroValue,
+ sets: opts.SlicesAsSets,
+ stringer: opts.UseStringer,
+ }
+ return w.visit(reflect.ValueOf(v), nil)
+}
+
+type walker struct {
+ format Format
+ h hash.Hash64
+ tag string
+ zeronil bool
+ ignorezerovalue bool
+ sets bool
+ stringer bool
+}
+
+type visitOpts struct {
+ // Flags are a bitmask of flags to affect behavior of this visit
+ Flags visitFlag
+
+ // Information about the struct containing this field
+ Struct interface{}
+ StructField string
+}
+
+var timeType = reflect.TypeOf(time.Time{})
+
+func (w *walker) visit(v reflect.Value, opts *visitOpts) (uint64, error) {
+ t := reflect.TypeOf(0)
+
+ // Loop since these can be wrapped in multiple layers of pointers
+ // and interfaces.
+ for {
+ // If we have an interface, dereference it. We have to do this up
+ // here because it might be a nil in there and the check below must
+ // catch that.
+ if v.Kind() == reflect.Interface {
+ v = v.Elem()
+ continue
+ }
+
+ if v.Kind() == reflect.Ptr {
+ if w.zeronil {
+ t = v.Type().Elem()
+ }
+ v = reflect.Indirect(v)
+ continue
+ }
+
+ break
+ }
+
+ // If it is nil, treat it like a zero.
+ if !v.IsValid() {
+ v = reflect.Zero(t)
+ }
+
+ // Binary writing can use raw ints, we have to convert to
+ // a sized-int, we'll choose the largest...
+ switch v.Kind() {
+ case reflect.Int:
+ v = reflect.ValueOf(int64(v.Int()))
+ case reflect.Uint:
+ v = reflect.ValueOf(uint64(v.Uint()))
+ case reflect.Bool:
+ var tmp int8
+ if v.Bool() {
+ tmp = 1
+ }
+ v = reflect.ValueOf(tmp)
+ }
+
+ k := v.Kind()
+
+ // We can shortcut numeric values by directly binary writing them
+ if k >= reflect.Int && k <= reflect.Complex64 {
+ // A direct hash calculation
+ w.h.Reset()
+ err := binary.Write(w.h, binary.LittleEndian, v.Interface())
+ return w.h.Sum64(), err
+ }
+
+ switch v.Type() {
+ case timeType:
+ w.h.Reset()
+ b, err := v.Interface().(time.Time).MarshalBinary()
+ if err != nil {
+ return 0, err
+ }
+
+ err = binary.Write(w.h, binary.LittleEndian, b)
+ return w.h.Sum64(), err
+ }
+
+ switch k {
+ case reflect.Array:
+ var h uint64
+ l := v.Len()
+ for i := 0; i < l; i++ {
+ current, err := w.visit(v.Index(i), nil)
+ if err != nil {
+ return 0, err
+ }
+
+ h = hashUpdateOrdered(w.h, h, current)
+ }
+
+ return h, nil
+
+ case reflect.Map:
+ var includeMap IncludableMap
+ if opts != nil && opts.Struct != nil {
+ if v, ok := opts.Struct.(IncludableMap); ok {
+ includeMap = v
+ }
+ }
+
+ // Build the hash for the map. We do this by XOR-ing all the key
+ // and value hashes. This makes it deterministic despite ordering.
+ var h uint64
+ for _, k := range v.MapKeys() {
+ v := v.MapIndex(k)
+ if includeMap != nil {
+ incl, err := includeMap.HashIncludeMap(
+ opts.StructField, k.Interface(), v.Interface())
+ if err != nil {
+ return 0, err
+ }
+ if !incl {
+ continue
+ }
+ }
+
+ kh, err := w.visit(k, nil)
+ if err != nil {
+ return 0, err
+ }
+ vh, err := w.visit(v, nil)
+ if err != nil {
+ return 0, err
+ }
+
+ fieldHash := hashUpdateOrdered(w.h, kh, vh)
+ h = hashUpdateUnordered(h, fieldHash)
+ }
+
+ if w.format != FormatV1 {
+ // Important: read the docs for hashFinishUnordered
+ h = hashFinishUnordered(w.h, h)
+ }
+
+ return h, nil
+
+ case reflect.Struct:
+ parent := v.Interface()
+ var include Includable
+ if impl, ok := parent.(Includable); ok {
+ include = impl
+ }
+
+ if impl, ok := parent.(Hashable); ok {
+ return impl.Hash()
+ }
+
+ // If we can address this value, check if the pointer value
+ // implements our interfaces and use that if so.
+ if v.CanAddr() {
+ vptr := v.Addr()
+ parentptr := vptr.Interface()
+ if impl, ok := parentptr.(Includable); ok {
+ include = impl
+ }
+
+ if impl, ok := parentptr.(Hashable); ok {
+ return impl.Hash()
+ }
+ }
+
+ t := v.Type()
+ h, err := w.visit(reflect.ValueOf(t.Name()), nil)
+ if err != nil {
+ return 0, err
+ }
+
+ l := v.NumField()
+ for i := 0; i < l; i++ {
+ if innerV := v.Field(i); v.CanSet() || t.Field(i).Name != "_" {
+ var f visitFlag
+ fieldType := t.Field(i)
+ if fieldType.PkgPath != "" {
+ // Unexported
+ continue
+ }
+
+ tag := fieldType.Tag.Get(w.tag)
+ if tag == "ignore" || tag == "-" {
+ // Ignore this field
+ continue
+ }
+
+ if w.ignorezerovalue {
+ if innerV.IsZero() {
+ continue
+ }
+ }
+
+ // if string is set, use the string value
+ if tag == "string" || w.stringer {
+ if impl, ok := innerV.Interface().(fmt.Stringer); ok {
+ innerV = reflect.ValueOf(impl.String())
+ } else if tag == "string" {
+ // We only show this error if the tag explicitly
+ // requests a stringer.
+ return 0, &ErrNotStringer{
+ Field: v.Type().Field(i).Name,
+ }
+ }
+ }
+
+ // Check if we implement includable and check it
+ if include != nil {
+ incl, err := include.HashInclude(fieldType.Name, innerV)
+ if err != nil {
+ return 0, err
+ }
+ if !incl {
+ continue
+ }
+ }
+
+ switch tag {
+ case "set":
+ f |= visitFlagSet
+ }
+
+ kh, err := w.visit(reflect.ValueOf(fieldType.Name), nil)
+ if err != nil {
+ return 0, err
+ }
+
+ vh, err := w.visit(innerV, &visitOpts{
+ Flags: f,
+ Struct: parent,
+ StructField: fieldType.Name,
+ })
+ if err != nil {
+ return 0, err
+ }
+
+ fieldHash := hashUpdateOrdered(w.h, kh, vh)
+ h = hashUpdateUnordered(h, fieldHash)
+ }
+
+ if w.format != FormatV1 {
+ // Important: read the docs for hashFinishUnordered
+ h = hashFinishUnordered(w.h, h)
+ }
+ }
+
+ return h, nil
+
+ case reflect.Slice:
+ // We have two behaviors here. If it isn't a set, then we just
+ // visit all the elements. If it is a set, then we do a deterministic
+ // hash code.
+ var h uint64
+ var set bool
+ if opts != nil {
+ set = (opts.Flags & visitFlagSet) != 0
+ }
+ l := v.Len()
+ for i := 0; i < l; i++ {
+ current, err := w.visit(v.Index(i), nil)
+ if err != nil {
+ return 0, err
+ }
+
+ if set || w.sets {
+ h = hashUpdateUnordered(h, current)
+ } else {
+ h = hashUpdateOrdered(w.h, h, current)
+ }
+ }
+
+ if set && w.format != FormatV1 {
+ // Important: read the docs for hashFinishUnordered
+ h = hashFinishUnordered(w.h, h)
+ }
+
+ return h, nil
+
+ case reflect.String:
+ // Directly hash
+ w.h.Reset()
+ _, err := w.h.Write([]byte(v.String()))
+ return w.h.Sum64(), err
+
+ default:
+ return 0, fmt.Errorf("unknown kind to hash: %s", k)
+ }
+
+}
+
+func hashUpdateOrdered(h hash.Hash64, a, b uint64) uint64 {
+ // For ordered updates, use a real hash function
+ h.Reset()
+
+ // We just panic if the binary writes fail because we are writing
+ // an int64 which should never be fail-able.
+ e1 := binary.Write(h, binary.LittleEndian, a)
+ e2 := binary.Write(h, binary.LittleEndian, b)
+ if e1 != nil {
+ panic(e1)
+ }
+ if e2 != nil {
+ panic(e2)
+ }
+
+ return h.Sum64()
+}
+
+func hashUpdateUnordered(a, b uint64) uint64 {
+ return a ^ b
+}
+
+// After mixing a group of unique hashes with hashUpdateUnordered, it's always
+// necessary to call hashFinishUnordered. Why? Because hashUpdateUnordered
+// is a simple XOR, and calling hashUpdateUnordered on hashes produced by
+// hashUpdateUnordered can effectively cancel out a previous change to the hash
+// result if the same hash value appears later on. For example, consider:
+//
+// hashUpdateUnordered(hashUpdateUnordered("A", "B"), hashUpdateUnordered("A", "C")) =
+// H("A") ^ H("B")) ^ (H("A") ^ H("C")) =
+// (H("A") ^ H("A")) ^ (H("B") ^ H(C)) =
+// H(B) ^ H(C) =
+// hashUpdateUnordered(hashUpdateUnordered("Z", "B"), hashUpdateUnordered("Z", "C"))
+//
+// hashFinishUnordered "hardens" the result, so that encountering partially
+// overlapping input data later on in a different context won't cancel out.
+func hashFinishUnordered(h hash.Hash64, a uint64) uint64 {
+ h.Reset()
+
+ // We just panic if the writes fail
+ e1 := binary.Write(h, binary.LittleEndian, a)
+ if e1 != nil {
+ panic(e1)
+ }
+
+ return h.Sum64()
+}
+
+// visitFlag is used as a bitmask for affecting visit behavior
+type visitFlag uint
+
+const (
+ visitFlagInvalid visitFlag = iota
+ visitFlagSet = iota << 1
+)
diff --git a/vendor/github.com/mitchellh/hashstructure/v2/include.go b/vendor/github.com/mitchellh/hashstructure/v2/include.go
new file mode 100644
index 00000000..702d3541
--- /dev/null
+++ b/vendor/github.com/mitchellh/hashstructure/v2/include.go
@@ -0,0 +1,22 @@
+package hashstructure
+
+// Includable is an interface that can optionally be implemented by
+// a struct. It will be called for each field in the struct to check whether
+// it should be included in the hash.
+type Includable interface {
+ HashInclude(field string, v interface{}) (bool, error)
+}
+
+// IncludableMap is an interface that can optionally be implemented by
+// a struct. It will be called when a map-type field is found to ask the
+// struct if the map item should be included in the hash.
+type IncludableMap interface {
+ HashIncludeMap(field string, k, v interface{}) (bool, error)
+}
+
+// Hashable is an interface that can optionally be implemented by a struct
+// to override the hash value. This value will override the hash value for
+// the entire struct. Entries in the struct will not be hashed.
+type Hashable interface {
+ Hash() (uint64, error)
+}
diff --git a/vendor/modules.txt b/vendor/modules.txt
index 598e5733..4ac65f09 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -511,6 +511,9 @@ github.com/miekg/pkcs11
# github.com/mitchellh/go-wordwrap v0.0.0-20150314170334-ad45545899c7
## explicit
github.com/mitchellh/go-wordwrap
+# github.com/mitchellh/hashstructure/v2 v2.0.2
+## explicit; go 1.14
+github.com/mitchellh/hashstructure/v2
# github.com/mitchellh/mapstructure v1.5.0
## explicit; go 1.14
# github.com/moby/buildkit v0.14.1