Compare commits

..

9 Commits

Author SHA1 Message Date
Tõnis Tiigi
39db6159f9 Merge pull request #2503 from tonistiigi/20240606-update-buildkit
update buildkit to v0.14.0-rc2
2024-06-06 16:30:31 -07:00
Tonis Tiigi
922328cbaf build: update lint fallback image to v1.8.0-rc2
Signed-off-by: Tonis Tiigi <tonistiigi@gmail.com>
2024-06-06 16:18:43 -07:00
Tonis Tiigi
aa0f90fdd6 vendor: update buildkit to v0.14.0-rc2
Signed-off-by: Tonis Tiigi <tonistiigi@gmail.com>
2024-06-06 16:17:17 -07:00
CrazyMax
82b6826cd7 Merge pull request #2500 from dvdksn/doc-rawjson
docs: mention rawjson progress output mode
2024-06-06 17:26:14 +02:00
David Karlsson
1e3aec1ae2 docs: mention rawjson progress output mode
Signed-off-by: David Karlsson <35727626+dvdksn@users.noreply.github.com>
2024-06-06 16:51:42 +02:00
CrazyMax
cfef22ddf0 Merge pull request #2502 from thaJeztah/bump_compose_go
vendor: github.com/compose-spec/compose-go/v2 v2.1.2
2024-06-06 15:31:51 +02:00
Sebastiaan van Stijn
9e5ba66553 vendor: github.com/compose-spec/compose-go/v2 v2.1.2
Replaces uses of the github.com/mitchellh/mapstructure module, which
was deprecated by the owner and moved to new maintainership at
github.com/go-viper/mapstructure.

The old module is still referenced as indirect dependency (through
docker/cli and theupdateframework/notary), but not used in code, and
should eventually go away.

full diff: https://github.com/compose-spec/compose-go/compare/v2.1.1...v2.1.2

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
2024-06-06 12:44:51 +02:00
CrazyMax
747b75a217 Merge pull request #2497 from crazy-max/fix-k8s-kubeconfig
k8s: fix concurrent kubeconfig access when loading nodes
2024-06-04 12:10:44 +02:00
CrazyMax
945e774a02 k8s: fix concurrent kubeconfig access when loading nodes
Signed-off-by: CrazyMax <1951866+crazy-max@users.noreply.github.com>
2024-06-03 16:16:24 +02:00
97 changed files with 2256 additions and 871 deletions

View File

@@ -52,9 +52,8 @@ var (
) )
const ( const (
printFallbackImage = "docker/dockerfile:1.5@sha256:dbbd5e059e8a07ff7ea6233b213b36aa516b4c53c645f1817a4dd18b83cbea56" printFallbackImage = "docker/dockerfile:1.5@sha256:dbbd5e059e8a07ff7ea6233b213b36aa516b4c53c645f1817a4dd18b83cbea56"
// moby/buildkit#3d789eb740a93ac814b078fd752307e2a8da5b84 printLintFallbackImage = "docker.io/docker/dockerfile-upstream:1.8.0-rc2@sha256:515538ca94186029d466cf4c10c61b5147e849c592955e3a78922e24595c63a9"
printLintFallbackImage = "docker.io/docker/dockerfile-upstream:master@sha256:ea5a8efbaf785bdfe6c7ad74f0b11849df42d62861b218429bea17bb078df6ca"
) )
type Options struct { type Options struct {

View File

@@ -699,7 +699,7 @@ type commonFlags struct {
func commonBuildFlags(options *commonFlags, flags *pflag.FlagSet) { func commonBuildFlags(options *commonFlags, flags *pflag.FlagSet) {
options.noCache = flags.Bool("no-cache", false, "Do not use cache when building the image") options.noCache = flags.Bool("no-cache", false, "Do not use cache when building the image")
flags.StringVar(&options.progress, "progress", "auto", `Set type of progress output ("auto", "plain", "tty"). Use plain to show container output`) flags.StringVar(&options.progress, "progress", "auto", `Set type of progress output ("auto", "plain", "tty", "rawjson"). Use plain to show container output`)
options.pull = flags.Bool("pull", false, "Always attempt to pull all referenced images") options.pull = flags.Bool("pull", false, "Always attempt to pull all referenced images")
flags.StringVar(&options.metadataFile, "metadata-file", "", "Write build result metadata to a file") flags.StringVar(&options.metadataFile, "metadata-file", "", "Write build result metadata to a file")
} }

View File

@@ -80,7 +80,7 @@ func RootCmd(dockerCli command.Cli, children ...DebuggableCmd) *cobra.Command {
flags.StringVar(&controlOptions.Root, "root", "", "Specify root directory of server to connect for the monitor") flags.StringVar(&controlOptions.Root, "root", "", "Specify root directory of server to connect for the monitor")
flags.BoolVar(&controlOptions.Detach, "detach", runtime.GOOS == "linux", "Detach buildx server for the monitor (supported only on linux)") flags.BoolVar(&controlOptions.Detach, "detach", runtime.GOOS == "linux", "Detach buildx server for the monitor (supported only on linux)")
flags.StringVar(&controlOptions.ServerConfig, "server-config", "", "Specify buildx server config file for the monitor (used only when launching new server)") flags.StringVar(&controlOptions.ServerConfig, "server-config", "", "Specify buildx server config file for the monitor (used only when launching new server)")
flags.StringVar(&progressMode, "progress", "auto", `Set type of progress output ("auto", "plain", "tty") for the monitor. Use plain to show container output`) flags.StringVar(&progressMode, "progress", "auto", `Set type of progress output ("auto", "plain", "tty", "rawjson") for the monitor. Use plain to show container output`)
cobrautil.MarkFlagsExperimental(flags, "invoke", "on", "root", "detach", "server-config") cobrautil.MarkFlagsExperimental(flags, "invoke", "on", "root", "detach", "server-config")

View File

@@ -126,6 +126,6 @@ func dialStdioCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
flags := cmd.Flags() flags := cmd.Flags()
flags.StringVar(&opts.platform, "platform", os.Getenv("DOCKER_DEFAULT_PLATFORM"), "Target platform: this is used for node selection") flags.StringVar(&opts.platform, "platform", os.Getenv("DOCKER_DEFAULT_PLATFORM"), "Target platform: this is used for node selection")
flags.StringVar(&opts.progress, "progress", "quiet", "Set type of progress output (auto, plain, tty).") flags.StringVar(&opts.progress, "progress", "quiet", `Set type of progress output ("auto", "plain", "tty", "rawjson"). Use plain to show container output`)
return cmd return cmd
} }

View File

@@ -282,7 +282,7 @@ func createCmd(dockerCli command.Cli, opts RootOptions) *cobra.Command {
flags.StringArrayVarP(&options.tags, "tag", "t", []string{}, "Set reference for new image") flags.StringArrayVarP(&options.tags, "tag", "t", []string{}, "Set reference for new image")
flags.BoolVar(&options.dryrun, "dry-run", false, "Show final image instead of pushing") flags.BoolVar(&options.dryrun, "dry-run", false, "Show final image instead of pushing")
flags.BoolVar(&options.actionAppend, "append", false, "Append to existing manifest") flags.BoolVar(&options.actionAppend, "append", false, "Append to existing manifest")
flags.StringVar(&options.progress, "progress", "auto", `Set type of progress output ("auto", "plain", "tty"). Use plain to show container output`) flags.StringVar(&options.progress, "progress", "auto", `Set type of progress output ("auto", "plain", "tty", "rawjson"). Use plain to show container output`)
flags.StringArrayVarP(&options.annotations, "annotation", "", []string{}, "Add annotation to the image") flags.StringArrayVarP(&options.annotations, "annotation", "", []string{}, "Add annotation to the image")
flags.BoolVar(&options.preferIndex, "prefer-index", true, "When only a single source is specified, prefer outputting an image index or manifest list instead of performing a carbon copy") flags.BoolVar(&options.preferIndex, "prefer-index", true, "When only a single source is specified, prefer outputting an image index or manifest list instead of performing a carbon copy")

View File

@@ -13,20 +13,20 @@ Build from a file
### Options ### Options
| Name | Type | Default | Description | | Name | Type | Default | Description |
|:------------------------------------|:--------------|:--------|:-----------------------------------------------------------------------------------------| |:------------------------------------|:--------------|:--------|:----------------------------------------------------------------------------------------------------|
| [`--builder`](#builder) | `string` | | Override the configured builder instance | | [`--builder`](#builder) | `string` | | Override the configured builder instance |
| [`-f`](#file), [`--file`](#file) | `stringArray` | | Build definition file | | [`-f`](#file), [`--file`](#file) | `stringArray` | | Build definition file |
| `--load` | | | Shorthand for `--set=*.output=type=docker` | | `--load` | | | Shorthand for `--set=*.output=type=docker` |
| [`--metadata-file`](#metadata-file) | `string` | | Write build result metadata to a file | | [`--metadata-file`](#metadata-file) | `string` | | Write build result metadata to a file |
| [`--no-cache`](#no-cache) | | | Do not use cache when building the image | | [`--no-cache`](#no-cache) | | | Do not use cache when building the image |
| [`--print`](#print) | | | Print the options without building | | [`--print`](#print) | | | Print the options without building |
| [`--progress`](#progress) | `string` | `auto` | Set type of progress output (`auto`, `plain`, `tty`). Use plain to show container output | | [`--progress`](#progress) | `string` | `auto` | Set type of progress output (`auto`, `plain`, `tty`, `rawjson`). Use plain to show container output |
| [`--provenance`](#provenance) | `string` | | Shorthand for `--set=*.attest=type=provenance` | | [`--provenance`](#provenance) | `string` | | Shorthand for `--set=*.attest=type=provenance` |
| [`--pull`](#pull) | | | Always attempt to pull all referenced images | | [`--pull`](#pull) | | | Always attempt to pull all referenced images |
| `--push` | | | Shorthand for `--set=*.output=type=registry` | | `--push` | | | Shorthand for `--set=*.output=type=registry` |
| [`--sbom`](#sbom) | `string` | | Shorthand for `--set=*.attest=type=sbom` | | [`--sbom`](#sbom) | `string` | | Shorthand for `--set=*.attest=type=sbom` |
| [`--set`](#set) | `stringArray` | | Override target value (e.g., `targetpattern.key=value`) | | [`--set`](#set) | `stringArray` | | Override target value (e.g., `targetpattern.key=value`) |
<!---MARKER_GEN_END--> <!---MARKER_GEN_END-->

View File

@@ -38,7 +38,7 @@ Start a build
| [`--no-cache-filter`](#no-cache-filter) | `stringArray` | | Do not cache specified stages | | [`--no-cache-filter`](#no-cache-filter) | `stringArray` | | Do not cache specified stages |
| [`-o`](#output), [`--output`](#output) | `stringArray` | | Output destination (format: `type=local,dest=path`) | | [`-o`](#output), [`--output`](#output) | `stringArray` | | Output destination (format: `type=local,dest=path`) |
| [`--platform`](#platform) | `stringArray` | | Set target platform for build | | [`--platform`](#platform) | `stringArray` | | Set target platform for build |
| [`--progress`](#progress) | `string` | `auto` | Set type of progress output (`auto`, `plain`, `tty`). Use plain to show container output | | [`--progress`](#progress) | `string` | `auto` | Set type of progress output (`auto`, `plain`, `tty`, `rawjson`). Use plain to show container output |
| [`--provenance`](#provenance) | `string` | | Shorthand for `--attest=type=provenance` | | [`--provenance`](#provenance) | `string` | | Shorthand for `--attest=type=provenance` |
| `--pull` | | | Always attempt to pull all referenced images | | `--pull` | | | Always attempt to pull all referenced images |
| [`--push`](#push) | | | Shorthand for `--output=type=registry` | | [`--push`](#push) | | | Shorthand for `--output=type=registry` |
@@ -551,8 +551,8 @@ $ docker buildx build --platform=darwin .
--progress=VALUE --progress=VALUE
``` ```
Set type of progress output (`auto`, `plain`, `tty`). Use plain to show container Set type of progress output (`auto`, `plain`, `tty`, `rawjson`). Use `plain` to show container
output (default "auto"). output (default `auto`).
> **Note** > **Note**
> >
@@ -578,6 +578,9 @@ $ docker buildx build --load --progress=plain .
> Check also the [`BUILDKIT_COLORS`](https://docs.docker.com/build/building/variables/#buildkit_colors) > Check also the [`BUILDKIT_COLORS`](https://docs.docker.com/build/building/variables/#buildkit_colors)
> environment variable for modifying the colors of the terminal output. > environment variable for modifying the colors of the terminal output.
The `rawjson` output marshals the solve status events from BuildKit to JSON lines.
This mode is designed to be read by an external program.
### <a name="provenance"></a> Create provenance attestations (--provenance) ### <a name="provenance"></a> Create provenance attestations (--provenance)
Shorthand for [`--attest=type=provenance`](#attest), used to configure Shorthand for [`--attest=type=provenance`](#attest), used to configure

View File

@@ -12,15 +12,15 @@ Start debugger (EXPERIMENTAL)
### Options ### Options
| Name | Type | Default | Description | | Name | Type | Default | Description |
|:------------------|:---------|:--------|:---------------------------------------------------------------------------------------------------------| |:------------------|:---------|:--------|:--------------------------------------------------------------------------------------------------------------------|
| `--builder` | `string` | | Override the configured builder instance | | `--builder` | `string` | | Override the configured builder instance |
| `--detach` | `bool` | `true` | Detach buildx server for the monitor (supported only on linux) (EXPERIMENTAL) | | `--detach` | `bool` | `true` | Detach buildx server for the monitor (supported only on linux) (EXPERIMENTAL) |
| `--invoke` | `string` | | Launch a monitor with executing specified command (EXPERIMENTAL) | | `--invoke` | `string` | | Launch a monitor with executing specified command (EXPERIMENTAL) |
| `--on` | `string` | `error` | When to launch the monitor ([always, error]) (EXPERIMENTAL) | | `--on` | `string` | `error` | When to launch the monitor ([always, error]) (EXPERIMENTAL) |
| `--progress` | `string` | `auto` | Set type of progress output (`auto`, `plain`, `tty`) for the monitor. Use plain to show container output | | `--progress` | `string` | `auto` | Set type of progress output (`auto`, `plain`, `tty`, `rawjson`) for the monitor. Use plain to show container output |
| `--root` | `string` | | Specify root directory of server to connect for the monitor (EXPERIMENTAL) | | `--root` | `string` | | Specify root directory of server to connect for the monitor (EXPERIMENTAL) |
| `--server-config` | `string` | | Specify buildx server config file for the monitor (used only when launching new server) (EXPERIMENTAL) | | `--server-config` | `string` | | Specify buildx server config file for the monitor (used only when launching new server) (EXPERIMENTAL) |
<!---MARKER_GEN_END--> <!---MARKER_GEN_END-->

View File

@@ -34,7 +34,7 @@ Start a build
| `--no-cache-filter` | `stringArray` | | Do not cache specified stages | | `--no-cache-filter` | `stringArray` | | Do not cache specified stages |
| `-o`, `--output` | `stringArray` | | Output destination (format: `type=local,dest=path`) | | `-o`, `--output` | `stringArray` | | Output destination (format: `type=local,dest=path`) |
| `--platform` | `stringArray` | | Set target platform for build | | `--platform` | `stringArray` | | Set target platform for build |
| `--progress` | `string` | `auto` | Set type of progress output (`auto`, `plain`, `tty`). Use plain to show container output | | `--progress` | `string` | `auto` | Set type of progress output (`auto`, `plain`, `tty`, `rawjson`). Use plain to show container output |
| `--provenance` | `string` | | Shorthand for `--attest=type=provenance` | | `--provenance` | `string` | | Shorthand for `--attest=type=provenance` |
| `--pull` | | | Always attempt to pull all referenced images | | `--pull` | | | Always attempt to pull all referenced images |
| `--push` | | | Shorthand for `--output=type=registry` | | `--push` | | | Shorthand for `--output=type=registry` |

View File

@@ -5,11 +5,11 @@ Proxy current stdio streams to builder instance
### Options ### Options
| Name | Type | Default | Description | | Name | Type | Default | Description |
|:-------------|:---------|:--------|:-------------------------------------------------| |:-------------|:---------|:--------|:----------------------------------------------------------------------------------------------------|
| `--builder` | `string` | | Override the configured builder instance | | `--builder` | `string` | | Override the configured builder instance |
| `--platform` | `string` | | Target platform: this is used for node selection | | `--platform` | `string` | | Target platform: this is used for node selection |
| `--progress` | `string` | `quiet` | Set type of progress output (auto, plain, tty). | | `--progress` | `string` | `quiet` | Set type of progress output (`auto`, `plain`, `tty`, `rawjson`). Use plain to show container output |
<!---MARKER_GEN_END--> <!---MARKER_GEN_END-->

View File

@@ -17,7 +17,7 @@ Create a new image based on source images
| [`--dry-run`](#dry-run) | | | Show final image instead of pushing | | [`--dry-run`](#dry-run) | | | Show final image instead of pushing |
| [`-f`](#file), [`--file`](#file) | `stringArray` | | Read source descriptor from file | | [`-f`](#file), [`--file`](#file) | `stringArray` | | Read source descriptor from file |
| `--prefer-index` | `bool` | `true` | When only a single source is specified, prefer outputting an image index or manifest list instead of performing a carbon copy | | `--prefer-index` | `bool` | `true` | When only a single source is specified, prefer outputting an image index or manifest list instead of performing a carbon copy |
| `--progress` | `string` | `auto` | Set type of progress output (`auto`, `plain`, `tty`). Use plain to show container output | | `--progress` | `string` | `auto` | Set type of progress output (`auto`, `plain`, `tty`, `rawjson`). Use plain to show container output |
| [`-t`](#tag), [`--tag`](#tag) | `stringArray` | | Set reference for new image | | [`-t`](#tag), [`--tag`](#tag) | `stringArray` | | Set reference for new image |

View File

@@ -9,8 +9,8 @@ contexts:
cluster: test-cluster cluster: test-cluster
user: test-user user: test-user
namespace: zoinx namespace: zoinx
name: test name: k3s
current-context: test current-context: k3s
kind: Config kind: Config
preferences: {} preferences: {}
users: users:

View File

@@ -167,11 +167,12 @@ func NewKubernetesConfig(configPath string) clientcmd.ClientConfig {
// ConfigFromEndpoint loads kubernetes config from endpoint // ConfigFromEndpoint loads kubernetes config from endpoint
func ConfigFromEndpoint(endpointName string, s store.Reader) (clientcmd.ClientConfig, error) { func ConfigFromEndpoint(endpointName string, s store.Reader) (clientcmd.ClientConfig, error) {
if strings.HasPrefix(endpointName, "kubernetes://") { if strings.HasPrefix(endpointName, "kubernetes://") {
rules := clientcmd.NewDefaultClientConfigLoadingRules()
u, _ := url.Parse(endpointName) u, _ := url.Parse(endpointName)
if kubeconfig := u.Query().Get("kubeconfig"); kubeconfig != "" { if kubeconfig := u.Query().Get("kubeconfig"); kubeconfig != "" {
_ = os.Setenv(clientcmd.RecommendedConfigPathEnvVar, kubeconfig) rules.Precedence = append(rules.Precedence, kubeconfig)
rules.ExplicitPath = kubeconfig
} }
rules := clientcmd.NewDefaultClientConfigLoadingRules()
apiConfig, err := rules.Load() apiConfig, err := rules.Load()
if err != nil { if err != nil {
return nil, err return nil, err

View File

@@ -1,20 +1,35 @@
package context package context
import ( import (
"os"
"testing" "testing"
"github.com/docker/cli/cli/command" "github.com/docker/cli/cli/command"
"github.com/docker/cli/cli/config"
"github.com/docker/cli/cli/context/store"
cliflags "github.com/docker/cli/cli/flags" cliflags "github.com/docker/cli/cli/flags"
"github.com/stretchr/testify/assert" "github.com/stretchr/testify/assert"
"github.com/stretchr/testify/require" "github.com/stretchr/testify/require"
) )
func TestDefaultContextInitializer(t *testing.T) { func TestDefaultContextInitializer(t *testing.T) {
os.Setenv("KUBECONFIG", "./fixtures/test-kubeconfig") t.Setenv("KUBECONFIG", "./fixtures/test-kubeconfig")
defer os.Unsetenv("KUBECONFIG")
ctx, err := command.ResolveDefaultContext(&cliflags.ClientOptions{}, command.DefaultContextStoreConfig()) ctx, err := command.ResolveDefaultContext(&cliflags.ClientOptions{}, command.DefaultContextStoreConfig())
require.NoError(t, err) require.NoError(t, err)
assert.Equal(t, "default", ctx.Meta.Name) assert.Equal(t, "default", ctx.Meta.Name)
assert.Equal(t, "zoinx", ctx.Meta.Endpoints[KubernetesEndpoint].(EndpointMeta).DefaultNamespace) assert.Equal(t, "zoinx", ctx.Meta.Endpoints[KubernetesEndpoint].(EndpointMeta).DefaultNamespace)
} }
func TestConfigFromEndpoint(t *testing.T) {
t.Setenv("KUBECONFIG", "./fixtures/test-kubeconfig")
cfg, err := ConfigFromEndpoint(
"kubernetes:///buildx-test-4c972a3f9d369614b40f28a281790c7e?deployment=buildkit-4c2ed3ed-970f-4f3d-a6df-a4fcbab4d5cf-d9d73&kubeconfig=.%2Ffixtures%2Fk3s-kubeconfig",
store.New(config.ContextStoreDir(), command.DefaultContextStoreConfig()),
)
require.NoError(t, err)
rawcfg, err := cfg.RawConfig()
require.NoError(t, err)
ctxcfg := "k3s"
if _, ok := rawcfg.Contexts[ctxcfg]; !ok {
t.Errorf("Context config %q not found", ctxcfg)
}
}

14
go.mod
View File

@@ -6,17 +6,17 @@ require (
github.com/Masterminds/semver/v3 v3.2.1 github.com/Masterminds/semver/v3 v3.2.1
github.com/Microsoft/go-winio v0.6.2 github.com/Microsoft/go-winio v0.6.2
github.com/aws/aws-sdk-go-v2/config v1.26.6 github.com/aws/aws-sdk-go-v2/config v1.26.6
github.com/compose-spec/compose-go/v2 v2.1.1 github.com/compose-spec/compose-go/v2 v2.1.2
github.com/containerd/console v1.0.4 github.com/containerd/console v1.0.4
github.com/containerd/containerd v1.7.17 github.com/containerd/containerd v1.7.18
github.com/containerd/continuity v0.4.3 github.com/containerd/continuity v0.4.3
github.com/containerd/log v0.1.0 github.com/containerd/log v0.1.0
github.com/containerd/typeurl/v2 v2.1.1 github.com/containerd/typeurl/v2 v2.1.1
github.com/creack/pty v1.1.18 github.com/creack/pty v1.1.18
github.com/distribution/reference v0.5.0 github.com/distribution/reference v0.5.0
github.com/docker/cli v26.1.3+incompatible github.com/docker/cli v26.1.4+incompatible
github.com/docker/cli-docs-tool v0.7.0 github.com/docker/cli-docs-tool v0.7.0
github.com/docker/docker v26.1.3+incompatible github.com/docker/docker v26.1.4+incompatible
github.com/docker/go-units v0.5.0 github.com/docker/go-units v0.5.0
github.com/gofrs/flock v0.8.1 github.com/gofrs/flock v0.8.1
github.com/gogo/protobuf v1.3.2 github.com/gogo/protobuf v1.3.2
@@ -26,7 +26,7 @@ require (
github.com/hashicorp/go-cty-funcs v0.0.0-20230405223818-a090f58aa992 github.com/hashicorp/go-cty-funcs v0.0.0-20230405223818-a090f58aa992
github.com/hashicorp/hcl/v2 v2.20.1 github.com/hashicorp/hcl/v2 v2.20.1
github.com/in-toto/in-toto-golang v0.5.0 github.com/in-toto/in-toto-golang v0.5.0
github.com/moby/buildkit v0.14.0-rc1 github.com/moby/buildkit v0.14.0-rc2
github.com/moby/sys/mountinfo v0.7.1 github.com/moby/sys/mountinfo v0.7.1
github.com/moby/sys/signal v0.7.0 github.com/moby/sys/signal v0.7.0
github.com/morikuni/aec v1.0.0 github.com/morikuni/aec v1.0.0
@@ -45,7 +45,7 @@ require (
go.opentelemetry.io/otel/metric v1.21.0 go.opentelemetry.io/otel/metric v1.21.0
go.opentelemetry.io/otel/sdk v1.21.0 go.opentelemetry.io/otel/sdk v1.21.0
go.opentelemetry.io/otel/trace v1.21.0 go.opentelemetry.io/otel/trace v1.21.0
golang.org/x/mod v0.14.0 golang.org/x/mod v0.17.0
golang.org/x/sync v0.6.0 golang.org/x/sync v0.6.0
golang.org/x/sys v0.18.0 golang.org/x/sys v0.18.0
golang.org/x/term v0.18.0 golang.org/x/term v0.18.0
@@ -79,6 +79,7 @@ require (
github.com/beorn7/perks v1.0.1 // indirect github.com/beorn7/perks v1.0.1 // indirect
github.com/cenkalti/backoff/v4 v4.2.1 // indirect github.com/cenkalti/backoff/v4 v4.2.1 // indirect
github.com/cespare/xxhash/v2 v2.2.0 // indirect github.com/cespare/xxhash/v2 v2.2.0 // indirect
github.com/containerd/errdefs v0.1.0 // indirect
github.com/containerd/ttrpc v1.2.4 // indirect github.com/containerd/ttrpc v1.2.4 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect github.com/davecgh/go-spew v1.1.1 // indirect
github.com/docker/distribution v2.8.2+incompatible // indirect github.com/docker/distribution v2.8.2+incompatible // indirect
@@ -94,6 +95,7 @@ require (
github.com/go-openapi/jsonpointer v0.19.6 // indirect github.com/go-openapi/jsonpointer v0.19.6 // indirect
github.com/go-openapi/jsonreference v0.20.2 // indirect github.com/go-openapi/jsonreference v0.20.2 // indirect
github.com/go-openapi/swag v0.22.3 // indirect github.com/go-openapi/swag v0.22.3 // indirect
github.com/go-viper/mapstructure/v2 v2.0.0 // indirect
github.com/gogo/googleapis v1.4.1 // indirect github.com/gogo/googleapis v1.4.1 // indirect
github.com/google/gnostic-models v0.6.8 // indirect github.com/google/gnostic-models v0.6.8 // indirect
github.com/google/go-cmp v0.6.0 // indirect github.com/google/go-cmp v0.6.0 // indirect

28
go.sum
View File

@@ -84,16 +84,18 @@ github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4 h1:/inchEIKaYC1Akx+H+g
github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs= github.com/cncf/xds/go v0.0.0-20230607035331-e9ce68804cb4/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/codahale/rfc6979 v0.0.0-20141003034818-6a90f24967eb h1:EDmT6Q9Zs+SbUoc7Ik9EfrFqcylYqgPZ9ANSbTAntnE= github.com/codahale/rfc6979 v0.0.0-20141003034818-6a90f24967eb h1:EDmT6Q9Zs+SbUoc7Ik9EfrFqcylYqgPZ9ANSbTAntnE=
github.com/codahale/rfc6979 v0.0.0-20141003034818-6a90f24967eb/go.mod h1:ZjrT6AXHbDs86ZSdt/osfBi5qfexBrKUdONk989Wnk4= github.com/codahale/rfc6979 v0.0.0-20141003034818-6a90f24967eb/go.mod h1:ZjrT6AXHbDs86ZSdt/osfBi5qfexBrKUdONk989Wnk4=
github.com/compose-spec/compose-go/v2 v2.1.1 h1:tKuYJwAVgxIryRrsvWJSf1kNviVOQVVqwyHsV6YoIUc= github.com/compose-spec/compose-go/v2 v2.1.2 h1:N2XmNYg5jHNBaU+4/zSAe2UrZLq7Kkp1eSsOHfAHbxQ=
github.com/compose-spec/compose-go/v2 v2.1.1/go.mod h1:bEPizBkIojlQ20pi2vNluBa58tevvj0Y18oUSHPyfdc= github.com/compose-spec/compose-go/v2 v2.1.2/go.mod h1:NJGRGazJfh0tD7d13h66KDVvyOHK49Wil2CIhoffiD0=
github.com/containerd/cgroups v1.1.0 h1:v8rEWFl6EoqHB+swVNjVoCJE8o3jX7e8nqBGPLaDFBM= github.com/containerd/cgroups v1.1.0 h1:v8rEWFl6EoqHB+swVNjVoCJE8o3jX7e8nqBGPLaDFBM=
github.com/containerd/cgroups v1.1.0/go.mod h1:6ppBcbh/NOOUU+dMKrykgaBnK9lCIBxHqJDGwsa1mIw= github.com/containerd/cgroups v1.1.0/go.mod h1:6ppBcbh/NOOUU+dMKrykgaBnK9lCIBxHqJDGwsa1mIw=
github.com/containerd/console v1.0.4 h1:F2g4+oChYvBTsASRTz8NP6iIAi97J3TtSAsLbIFn4ro= github.com/containerd/console v1.0.4 h1:F2g4+oChYvBTsASRTz8NP6iIAi97J3TtSAsLbIFn4ro=
github.com/containerd/console v1.0.4/go.mod h1:YynlIjWYF8myEu6sdkwKIvGQq+cOckRm6So2avqoYAk= github.com/containerd/console v1.0.4/go.mod h1:YynlIjWYF8myEu6sdkwKIvGQq+cOckRm6So2avqoYAk=
github.com/containerd/containerd v1.7.17 h1:KjNnn0+tAVQHAoaWRjmdak9WlvnFR/8rU1CHHy8Rm2A= github.com/containerd/containerd v1.7.18 h1:jqjZTQNfXGoEaZdW1WwPU0RqSn1Bm2Ay/KJPUuO8nao=
github.com/containerd/containerd v1.7.17/go.mod h1:vK+hhT4TIv2uejlcDlbVIc8+h/BqtKLIyNrtCZol8lI= github.com/containerd/containerd v1.7.18/go.mod h1:IYEk9/IO6wAPUz2bCMVUbsfXjzw5UNP5fLz4PsUygQ4=
github.com/containerd/continuity v0.4.3 h1:6HVkalIp+2u1ZLH1J/pYX2oBVXlJZvh1X1A7bEZ9Su8= github.com/containerd/continuity v0.4.3 h1:6HVkalIp+2u1ZLH1J/pYX2oBVXlJZvh1X1A7bEZ9Su8=
github.com/containerd/continuity v0.4.3/go.mod h1:F6PTNCKepoxEaXLQp3wDAjygEnImnZ/7o4JzpodfroQ= github.com/containerd/continuity v0.4.3/go.mod h1:F6PTNCKepoxEaXLQp3wDAjygEnImnZ/7o4JzpodfroQ=
github.com/containerd/errdefs v0.1.0 h1:m0wCRBiu1WJT/Fr+iOoQHMQS/eP5myQ8lCv4Dz5ZURM=
github.com/containerd/errdefs v0.1.0/go.mod h1:YgWiiHtLmSeBrvpw+UfPijzbLaB77mEG1WwJTDETIV0=
github.com/containerd/fifo v1.1.0 h1:4I2mbh5stb1u6ycIABlBw9zgtlK8viPI9QkQNRQEEmY= github.com/containerd/fifo v1.1.0 h1:4I2mbh5stb1u6ycIABlBw9zgtlK8viPI9QkQNRQEEmY=
github.com/containerd/fifo v1.1.0/go.mod h1:bmC4NWMbXlt2EZ0Hc7Fx7QzTFxgPID13eH0Qu+MAb2o= github.com/containerd/fifo v1.1.0/go.mod h1:bmC4NWMbXlt2EZ0Hc7Fx7QzTFxgPID13eH0Qu+MAb2o=
github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I= github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I=
@@ -117,15 +119,15 @@ github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs
github.com/denisenkom/go-mssqldb v0.0.0-20191128021309-1d7a30a10f73/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU= github.com/denisenkom/go-mssqldb v0.0.0-20191128021309-1d7a30a10f73/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU=
github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0= github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0=
github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
github.com/docker/cli v26.1.3+incompatible h1:bUpXT/N0kDE3VUHI2r5VMsYQgi38kYuoC0oL9yt3lqc= github.com/docker/cli v26.1.4+incompatible h1:I8PHdc0MtxEADqYJZvhBrW9bo8gawKwwenxRM7/rLu8=
github.com/docker/cli v26.1.3+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/cli v26.1.4+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
github.com/docker/cli-docs-tool v0.7.0 h1:M2Da98Unz2kz3A5d4yeSGbhyOge2mfYSNjAFt01Rw0M= github.com/docker/cli-docs-tool v0.7.0 h1:M2Da98Unz2kz3A5d4yeSGbhyOge2mfYSNjAFt01Rw0M=
github.com/docker/cli-docs-tool v0.7.0/go.mod h1:zMjqTFCU361PRh8apiXzeAZ1Q/xupbIwTusYpzCXS/o= github.com/docker/cli-docs-tool v0.7.0/go.mod h1:zMjqTFCU361PRh8apiXzeAZ1Q/xupbIwTusYpzCXS/o=
github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
github.com/docker/distribution v2.8.2+incompatible h1:T3de5rq0dB1j30rp0sA2rER+m322EBzniBPB6ZIzuh8= github.com/docker/distribution v2.8.2+incompatible h1:T3de5rq0dB1j30rp0sA2rER+m322EBzniBPB6ZIzuh8=
github.com/docker/distribution v2.8.2+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/distribution v2.8.2+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
github.com/docker/docker v26.1.3+incompatible h1:lLCzRbrVZrljpVNobJu1J2FHk8V0s4BawoZippkc+xo= github.com/docker/docker v26.1.4+incompatible h1:vuTpXDuoga+Z38m1OZHzl7NKisKWaWlhjQk7IDPSLsU=
github.com/docker/docker v26.1.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk= github.com/docker/docker v26.1.4+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/docker-credential-helpers v0.8.0 h1:YQFtbBQb4VrpoPxhFuzEBPQ9E16qz5SpHLS+uswaCp8= github.com/docker/docker-credential-helpers v0.8.0 h1:YQFtbBQb4VrpoPxhFuzEBPQ9E16qz5SpHLS+uswaCp8=
github.com/docker/docker-credential-helpers v0.8.0/go.mod h1:UGFXcuoQ5TxPiB54nHOZ32AWRqQdECoh/Mg0AlEYb40= github.com/docker/docker-credential-helpers v0.8.0/go.mod h1:UGFXcuoQ5TxPiB54nHOZ32AWRqQdECoh/Mg0AlEYb40=
github.com/docker/go v1.5.1-1.0.20160303222718-d30aec9fd63c h1:lzqkGL9b3znc+ZUgi7FlLnqjQhcXxkNM/quxIjBVMD0= github.com/docker/go v1.5.1-1.0.20160303222718-d30aec9fd63c h1:lzqkGL9b3znc+ZUgi7FlLnqjQhcXxkNM/quxIjBVMD0=
@@ -174,6 +176,8 @@ github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572 h1:tfuBGBXKqDEe
github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls= github.com/go-task/slim-sprig v0.0.0-20230315185526-52ccab3ef572/go.mod h1:9Pwr4B2jHnOSGXyyzV8ROjYa2ojvAY6HCGYYfMoC3Ls=
github.com/go-test/deep v1.0.3 h1:ZrJSEWsXzPOxaZnFteGEfooLba+ju3FYIbOrS+rQd68= github.com/go-test/deep v1.0.3 h1:ZrJSEWsXzPOxaZnFteGEfooLba+ju3FYIbOrS+rQd68=
github.com/go-test/deep v1.0.3/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA= github.com/go-test/deep v1.0.3/go.mod h1:wGDj63lr65AM2AQyKZd/NYHGb0R+1RLqB8NKt3aSFNA=
github.com/go-viper/mapstructure/v2 v2.0.0 h1:dhn8MZ1gZ0mzeodTG3jt5Vj/o87xZKuNAprG2mQfMfc=
github.com/go-viper/mapstructure/v2 v2.0.0/go.mod h1:oJDH3BJKyqBA2TXFhDsKDGDTlndYOZ6rGS0BRZIxGhM=
github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw= github.com/gofrs/flock v0.8.1 h1:+gYjHKf32LDeiEEFhQaotPbLuUXjY5ZqxKgXy7n59aw=
github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU= github.com/gofrs/flock v0.8.1/go.mod h1:F1TvTiK9OcQqauNUHlbJvyl9Qa1QvF/gOUDKA14jxHU=
github.com/gogo/googleapis v1.4.1 h1:1Yx4Myt7BxzvUr5ldGSbwYiZG6t9wGBZ+8/fX3Wvtq0= github.com/gogo/googleapis v1.4.1 h1:1Yx4Myt7BxzvUr5ldGSbwYiZG6t9wGBZ+8/fX3Wvtq0=
@@ -298,8 +302,8 @@ github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyua
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo= github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ= github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ=
github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw= github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=
github.com/moby/buildkit v0.14.0-rc1 h1:pdnGXXT0Wu480PMQzyFUBN/LgtfIwlYoLlT7jba1IVQ= github.com/moby/buildkit v0.14.0-rc2 h1:qvl0hOKeyAWReOkksNtstQjPNaAD4jN3Dvq4r7slqYM=
github.com/moby/buildkit v0.14.0-rc1/go.mod h1:J3TW5gH3fc2BVrLnqxnGGQauChFwCzhv6FPkBKTGciU= github.com/moby/buildkit v0.14.0-rc2/go.mod h1:/ZJNHNVso1nf063XlDhEkNEcRNW19utVpUKixCUo9Ks=
github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0=
github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo=
github.com/moby/locker v1.0.1 h1:fOXqR41zeveg4fFODix+1Ch4mj/gT0NE1XJbp/epuBg= github.com/moby/locker v1.0.1 h1:fOXqR41zeveg4fFODix+1Ch4mj/gT0NE1XJbp/epuBg=
@@ -502,8 +506,8 @@ golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3 h1:hNQpMuAJe5CtcUqCXaWga3FHu
golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3/go.mod h1:idGWGoKP1toJGkd5/ig9ZLuPcZBC3ewk7SzmH0uou08= golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3/go.mod h1:idGWGoKP1toJGkd5/ig9ZLuPcZBC3ewk7SzmH0uou08=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.14.0 h1:dGoOF9QVLYng8IHTm7BAyWqCqSheQ5pYWGhzW00YJr0= golang.org/x/mod v0.17.0 h1:zY54UmvipHiNd+pm+m0x9KhZ9hl1/7QNMyxXbc6ICqA=
golang.org/x/mod v0.14.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c= golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=

View File

@@ -215,14 +215,16 @@ func WithLoadOptions(loadOptions ...func(*loader.Options)) ProjectOptionsFn {
// WithDefaultProfiles uses the provided profiles (if any), and falls back to // WithDefaultProfiles uses the provided profiles (if any), and falls back to
// profiles specified via the COMPOSE_PROFILES environment variable otherwise. // profiles specified via the COMPOSE_PROFILES environment variable otherwise.
func WithDefaultProfiles(profile ...string) ProjectOptionsFn { func WithDefaultProfiles(profiles ...string) ProjectOptionsFn {
if len(profile) == 0 { return func(o *ProjectOptions) error {
for _, s := range strings.Split(os.Getenv(consts.ComposeProfiles), ",") { if len(profiles) == 0 {
profile = append(profile, strings.TrimSpace(s)) for _, s := range strings.Split(o.Environment[consts.ComposeProfiles], ",") {
profiles = append(profiles, strings.TrimSpace(s))
}
} }
o.loadOptions = append(o.loadOptions, loader.WithProfiles(profiles))
return nil
} }
return WithProfiles(profile)
} }
// WithProfiles sets profiles to be activated // WithProfiles sets profiles to be activated

View File

@@ -39,7 +39,7 @@ import (
"github.com/compose-spec/compose-go/v2/tree" "github.com/compose-spec/compose-go/v2/tree"
"github.com/compose-spec/compose-go/v2/types" "github.com/compose-spec/compose-go/v2/types"
"github.com/compose-spec/compose-go/v2/validation" "github.com/compose-spec/compose-go/v2/validation"
"github.com/mitchellh/mapstructure" "github.com/go-viper/mapstructure/v2"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
"golang.org/x/exp/slices" "golang.org/x/exp/slices"
"gopkg.in/yaml.v3" "gopkg.in/yaml.v3"

View File

@@ -206,7 +206,7 @@ func envFileIndexer(y any, p tree.Path) (string, error) {
if pathValue, ok := value["path"]; ok { if pathValue, ok := value["path"]; ok {
return pathValue.(string), nil return pathValue.(string), nil
} }
return "", fmt.Errorf("environment path attribut %s is missing", p) return "", fmt.Errorf("environment path attribute %s is missing", p)
} }
return "", nil return "", nil
} }

View File

@@ -21,7 +21,7 @@ import (
"github.com/compose-spec/compose-go/v2/tree" "github.com/compose-spec/compose-go/v2/tree"
"github.com/compose-spec/compose-go/v2/types" "github.com/compose-spec/compose-go/v2/types"
"github.com/mitchellh/mapstructure" "github.com/go-viper/mapstructure/v2"
) )
func transformPorts(data any, p tree.Path, ignoreParseError bool) (any, error) { func transformPorts(data any, p tree.Path, ignoreParseError bool) (any, error) {

View File

@@ -21,7 +21,7 @@ import (
"runtime" "runtime"
"strings" "strings"
"github.com/mitchellh/mapstructure" "github.com/go-viper/mapstructure/v2"
) )
var ( var (

View File

@@ -29,7 +29,7 @@ import (
"strconv" "strconv"
"sync" "sync"
"github.com/containerd/containerd/log" "github.com/containerd/log"
"github.com/klauspost/compress/zstd" "github.com/klauspost/compress/zstd"
) )

View File

@@ -24,9 +24,9 @@ import (
"sync" "sync"
"time" "time"
"github.com/containerd/containerd/errdefs"
"github.com/containerd/containerd/log"
"github.com/containerd/containerd/pkg/randutil" "github.com/containerd/containerd/pkg/randutil"
"github.com/containerd/errdefs"
"github.com/containerd/log"
"github.com/opencontainers/go-digest" "github.com/opencontainers/go-digest"
ocispec "github.com/opencontainers/image-spec/specs-go/v1" ocispec "github.com/opencontainers/image-spec/specs-go/v1"
) )

View File

@@ -21,7 +21,7 @@ import (
"sync" "sync"
"time" "time"
"github.com/containerd/containerd/errdefs" "github.com/containerd/errdefs"
) )
// Handles locking references // Handles locking references

View File

@@ -22,7 +22,7 @@ import (
"os" "os"
"github.com/containerd/containerd/content" "github.com/containerd/containerd/content"
"github.com/containerd/containerd/errdefs" "github.com/containerd/errdefs"
) )
// readerat implements io.ReaderAt in a completely stateless manner by opening // readerat implements io.ReaderAt in a completely stateless manner by opening

View File

@@ -28,10 +28,10 @@ import (
"time" "time"
"github.com/containerd/containerd/content" "github.com/containerd/containerd/content"
"github.com/containerd/containerd/errdefs"
"github.com/containerd/containerd/filters" "github.com/containerd/containerd/filters"
"github.com/containerd/containerd/log"
"github.com/containerd/containerd/pkg/randutil" "github.com/containerd/containerd/pkg/randutil"
"github.com/containerd/errdefs"
"github.com/containerd/log"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
"github.com/opencontainers/go-digest" "github.com/opencontainers/go-digest"

View File

@@ -27,8 +27,8 @@ import (
"time" "time"
"github.com/containerd/containerd/content" "github.com/containerd/containerd/content"
"github.com/containerd/containerd/errdefs" "github.com/containerd/errdefs"
"github.com/containerd/containerd/log" "github.com/containerd/log"
"github.com/opencontainers/go-digest" "github.com/opencontainers/go-digest"
) )

View File

@@ -22,9 +22,9 @@ import (
contentapi "github.com/containerd/containerd/api/services/content/v1" contentapi "github.com/containerd/containerd/api/services/content/v1"
"github.com/containerd/containerd/content" "github.com/containerd/containerd/content"
"github.com/containerd/containerd/errdefs"
"github.com/containerd/containerd/protobuf" "github.com/containerd/containerd/protobuf"
protobuftypes "github.com/containerd/containerd/protobuf/types" protobuftypes "github.com/containerd/containerd/protobuf/types"
"github.com/containerd/errdefs"
digest "github.com/opencontainers/go-digest" digest "github.com/opencontainers/go-digest"
ocispec "github.com/opencontainers/image-spec/specs-go/v1" ocispec "github.com/opencontainers/image-spec/specs-go/v1"
) )

View File

@@ -23,8 +23,8 @@ import (
contentapi "github.com/containerd/containerd/api/services/content/v1" contentapi "github.com/containerd/containerd/api/services/content/v1"
"github.com/containerd/containerd/content" "github.com/containerd/containerd/content"
"github.com/containerd/containerd/errdefs"
"github.com/containerd/containerd/protobuf" "github.com/containerd/containerd/protobuf"
"github.com/containerd/errdefs"
digest "github.com/opencontainers/go-digest" digest "github.com/opencontainers/go-digest"
) )

View File

@@ -0,0 +1,116 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package errdefs defines the common errors used throughout containerd
// packages.
//
// Use with fmt.Errorf to add context to an error.
//
// To detect an error class, use the IsXXX functions to tell whether an error
// is of a certain type.
//
// The functions ToGRPC and FromGRPC can be used to map server-side and
// client-side errors to the correct types.
package errdefs
import (
"github.com/containerd/errdefs"
)
// Definitions of common error types used throughout containerd. All containerd
// errors returned by most packages will map into one of these errors classes.
// Packages should return errors of these types when they want to instruct a
// client to take a particular action.
//
// For the most part, we just try to provide local grpc errors. Most conditions
// map very well to those defined by grpc.
var (
ErrUnknown = errdefs.ErrUnknown
ErrInvalidArgument = errdefs.ErrInvalidArgument
ErrNotFound = errdefs.ErrNotFound
ErrAlreadyExists = errdefs.ErrAlreadyExists
ErrFailedPrecondition = errdefs.ErrFailedPrecondition
ErrUnavailable = errdefs.ErrUnavailable
ErrNotImplemented = errdefs.ErrNotImplemented
)
// IsInvalidArgument returns true if the error is due to an invalid argument
func IsInvalidArgument(err error) bool {
return errdefs.IsInvalidArgument(err)
}
// IsNotFound returns true if the error is due to a missing object
func IsNotFound(err error) bool {
return errdefs.IsNotFound(err)
}
// IsAlreadyExists returns true if the error is due to an already existing
// metadata item
func IsAlreadyExists(err error) bool {
return errdefs.IsAlreadyExists(err)
}
// IsFailedPrecondition returns true if an operation could not proceed to the
// lack of a particular condition
func IsFailedPrecondition(err error) bool {
return errdefs.IsFailedPrecondition(err)
}
// IsUnavailable returns true if the error is due to a resource being unavailable
func IsUnavailable(err error) bool {
return errdefs.IsUnavailable(err)
}
// IsNotImplemented returns true if the error is due to not being implemented
func IsNotImplemented(err error) bool {
return errdefs.IsNotImplemented(err)
}
// IsCanceled returns true if the error is due to `context.Canceled`.
func IsCanceled(err error) bool {
return errdefs.IsCanceled(err)
}
// IsDeadlineExceeded returns true if the error is due to
// `context.DeadlineExceeded`.
func IsDeadlineExceeded(err error) bool {
return errdefs.IsDeadlineExceeded(err)
}
// ToGRPC will attempt to map the backend containerd error into a grpc error,
// using the original error message as a description.
//
// Further information may be extracted from certain errors depending on their
// type.
//
// If the error is unmapped, the original error will be returned to be handled
// by the regular grpc error handling stack.
func ToGRPC(err error) error {
return errdefs.ToGRPC(err)
}
// ToGRPCf maps the error to grpc error codes, assembling the formatting string
// and combining it with the target error string.
//
// This is equivalent to errdefs.ToGRPC(fmt.Errorf("%s: %w", fmt.Sprintf(format, args...), err))
func ToGRPCf(err error, format string, args ...interface{}) error {
return errdefs.ToGRPCf(err, format, args...)
}
// FromGRPC returns the underlying error from a grpc service based on the grpc error code
func FromGRPC(err error) error {
return errdefs.FromGRPC(err)
}

View File

@@ -70,7 +70,7 @@ package filters
import ( import (
"regexp" "regexp"
"github.com/containerd/containerd/log" "github.com/containerd/log"
) )
// Filter matches specific resources based the provided filter // Filter matches specific resources based the provided filter

View File

@@ -20,7 +20,7 @@ import (
"fmt" "fmt"
"io" "io"
"github.com/containerd/containerd/errdefs" "github.com/containerd/errdefs"
) )
/* /*

View File

@@ -28,7 +28,7 @@ import (
"fmt" "fmt"
"regexp" "regexp"
"github.com/containerd/containerd/errdefs" "github.com/containerd/errdefs"
) )
const ( const (

View File

@@ -27,10 +27,10 @@ import (
"strings" "strings"
"github.com/containerd/containerd/content" "github.com/containerd/containerd/content"
"github.com/containerd/containerd/errdefs"
"github.com/containerd/containerd/images" "github.com/containerd/containerd/images"
"github.com/containerd/containerd/labels" "github.com/containerd/containerd/labels"
"github.com/containerd/containerd/platforms" "github.com/containerd/containerd/platforms"
"github.com/containerd/errdefs"
"github.com/containerd/log" "github.com/containerd/log"
digest "github.com/opencontainers/go-digest" digest "github.com/opencontainers/go-digest"
ocispecs "github.com/opencontainers/image-spec/specs-go" ocispecs "github.com/opencontainers/image-spec/specs-go"

View File

@@ -29,11 +29,11 @@ import (
"github.com/containerd/containerd/archive/compression" "github.com/containerd/containerd/archive/compression"
"github.com/containerd/containerd/content" "github.com/containerd/containerd/content"
"github.com/containerd/containerd/errdefs"
"github.com/containerd/containerd/images" "github.com/containerd/containerd/images"
"github.com/containerd/containerd/labels" "github.com/containerd/containerd/labels"
"github.com/containerd/containerd/log"
"github.com/containerd/containerd/platforms" "github.com/containerd/containerd/platforms"
"github.com/containerd/errdefs"
"github.com/containerd/log"
digest "github.com/opencontainers/go-digest" digest "github.com/opencontainers/go-digest"
specs "github.com/opencontainers/image-spec/specs-go" specs "github.com/opencontainers/image-spec/specs-go"
ocispec "github.com/opencontainers/image-spec/specs-go/v1" ocispec "github.com/opencontainers/image-spec/specs-go/v1"

View File

@@ -23,8 +23,8 @@ import (
"sort" "sort"
"github.com/containerd/containerd/content" "github.com/containerd/containerd/content"
"github.com/containerd/containerd/errdefs"
"github.com/containerd/containerd/platforms" "github.com/containerd/containerd/platforms"
"github.com/containerd/errdefs"
ocispec "github.com/opencontainers/image-spec/specs-go/v1" ocispec "github.com/opencontainers/image-spec/specs-go/v1"
"golang.org/x/sync/errgroup" "golang.org/x/sync/errgroup"
"golang.org/x/sync/semaphore" "golang.org/x/sync/semaphore"

View File

@@ -24,9 +24,9 @@ import (
"time" "time"
"github.com/containerd/containerd/content" "github.com/containerd/containerd/content"
"github.com/containerd/containerd/errdefs"
"github.com/containerd/containerd/log"
"github.com/containerd/containerd/platforms" "github.com/containerd/containerd/platforms"
"github.com/containerd/errdefs"
"github.com/containerd/log"
digest "github.com/opencontainers/go-digest" digest "github.com/opencontainers/go-digest"
ocispec "github.com/opencontainers/image-spec/specs-go/v1" ocispec "github.com/opencontainers/image-spec/specs-go/v1"
) )
@@ -268,6 +268,9 @@ func Platforms(ctx context.Context, provider content.Provider, image ocispec.Des
var platformSpecs []ocispec.Platform var platformSpecs []ocispec.Platform
return platformSpecs, Walk(ctx, Handlers(HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) { return platformSpecs, Walk(ctx, Handlers(HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
if desc.Platform != nil { if desc.Platform != nil {
if desc.Platform.OS == "unknown" || desc.Platform.Architecture == "unknown" {
return nil, ErrSkipDesc
}
platformSpecs = append(platformSpecs, *desc.Platform) platformSpecs = append(platformSpecs, *desc.Platform)
return nil, ErrSkipDesc return nil, ErrSkipDesc
} }

View File

@@ -22,7 +22,7 @@ import (
"sort" "sort"
"strings" "strings"
"github.com/containerd/containerd/errdefs" "github.com/containerd/errdefs"
ocispec "github.com/opencontainers/image-spec/specs-go/v1" ocispec "github.com/opencontainers/image-spec/specs-go/v1"
) )

View File

@@ -19,7 +19,7 @@ package labels
import ( import (
"fmt" "fmt"
"github.com/containerd/containerd/errdefs" "github.com/containerd/errdefs"
) )
const ( const (

View File

@@ -1,149 +0,0 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package log
import (
"context"
"github.com/containerd/log"
)
// G is a shorthand for [GetLogger].
//
// Deprecated: use [log.G].
var G = log.G
// L is an alias for the standard logger.
//
// Deprecated: use [log.L].
var L = log.L
// Fields type to pass to "WithFields".
//
// Deprecated: use [log.Fields].
type Fields = log.Fields
// Entry is a logging entry.
//
// Deprecated: use [log.Entry].
type Entry = log.Entry
// RFC3339NanoFixed is [time.RFC3339Nano] with nanoseconds padded using
// zeros to ensure the formatted time is always the same number of
// characters.
//
// Deprecated: use [log.RFC3339NanoFixed].
const RFC3339NanoFixed = log.RFC3339NanoFixed
// Level is a logging level.
//
// Deprecated: use [log.Level].
type Level = log.Level
// Supported log levels.
const (
// TraceLevel level.
//
// Deprecated: use [log.TraceLevel].
TraceLevel Level = log.TraceLevel
// DebugLevel level.
//
// Deprecated: use [log.DebugLevel].
DebugLevel Level = log.DebugLevel
// InfoLevel level.
//
// Deprecated: use [log.InfoLevel].
InfoLevel Level = log.InfoLevel
// WarnLevel level.
//
// Deprecated: use [log.WarnLevel].
WarnLevel Level = log.WarnLevel
// ErrorLevel level
//
// Deprecated: use [log.ErrorLevel].
ErrorLevel Level = log.ErrorLevel
// FatalLevel level.
//
// Deprecated: use [log.FatalLevel].
FatalLevel Level = log.FatalLevel
// PanicLevel level.
//
// Deprecated: use [log.PanicLevel].
PanicLevel Level = log.PanicLevel
)
// SetLevel sets log level globally. It returns an error if the given
// level is not supported.
//
// Deprecated: use [log.SetLevel].
func SetLevel(level string) error {
return log.SetLevel(level)
}
// GetLevel returns the current log level.
//
// Deprecated: use [log.GetLevel].
func GetLevel() log.Level {
return log.GetLevel()
}
// OutputFormat specifies a log output format.
//
// Deprecated: use [log.OutputFormat].
type OutputFormat = log.OutputFormat
// Supported log output formats.
const (
// TextFormat represents the text logging format.
//
// Deprecated: use [log.TextFormat].
TextFormat log.OutputFormat = "text"
// JSONFormat represents the JSON logging format.
//
// Deprecated: use [log.JSONFormat].
JSONFormat log.OutputFormat = "json"
)
// SetFormat sets the log output format.
//
// Deprecated: use [log.SetFormat].
func SetFormat(format OutputFormat) error {
return log.SetFormat(format)
}
// WithLogger returns a new context with the provided logger. Use in
// combination with logger.WithField(s) for great effect.
//
// Deprecated: use [log.WithLogger].
func WithLogger(ctx context.Context, logger *log.Entry) context.Context {
return log.WithLogger(ctx, logger)
}
// GetLogger retrieves the current logger from the context. If no logger is
// available, the default logger is returned.
//
// Deprecated: use [log.GetLogger].
func GetLogger(ctx context.Context) *log.Entry {
return log.GetLogger(ctx)
}

View File

@@ -21,8 +21,8 @@ import (
"fmt" "fmt"
"os" "os"
"github.com/containerd/containerd/errdefs"
"github.com/containerd/containerd/identifiers" "github.com/containerd/containerd/identifiers"
"github.com/containerd/errdefs"
) )
const ( const (

View File

@@ -20,7 +20,7 @@ import (
"runtime" "runtime"
"sync" "sync"
"github.com/containerd/containerd/log" "github.com/containerd/log"
) )
// Present the ARM instruction set architecture, eg: v7, v8 // Present the ARM instruction set architecture, eg: v7, v8

View File

@@ -24,7 +24,7 @@ import (
"runtime" "runtime"
"strings" "strings"
"github.com/containerd/containerd/errdefs" "github.com/containerd/errdefs"
"golang.org/x/sys/unix" "golang.org/x/sys/unix"
) )

View File

@@ -22,7 +22,7 @@ import (
"fmt" "fmt"
"runtime" "runtime"
"github.com/containerd/containerd/errdefs" "github.com/containerd/errdefs"
) )
func getCPUVariant() (string, error) { func getCPUVariant() (string, error) {

View File

@@ -116,7 +116,7 @@ import (
specs "github.com/opencontainers/image-spec/specs-go/v1" specs "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/containerd/containerd/errdefs" "github.com/containerd/errdefs"
) )
var ( var (

View File

@@ -26,9 +26,9 @@ import (
"strings" "strings"
"time" "time"
"github.com/containerd/containerd/log"
remoteserrors "github.com/containerd/containerd/remotes/errors" remoteserrors "github.com/containerd/containerd/remotes/errors"
"github.com/containerd/containerd/version" "github.com/containerd/containerd/version"
"github.com/containerd/log"
) )
var ( var (

View File

@@ -25,10 +25,10 @@ import (
"strings" "strings"
"sync" "sync"
"github.com/containerd/containerd/errdefs"
"github.com/containerd/containerd/log"
"github.com/containerd/containerd/remotes/docker/auth" "github.com/containerd/containerd/remotes/docker/auth"
remoteerrors "github.com/containerd/containerd/remotes/errors" remoteerrors "github.com/containerd/containerd/remotes/errors"
"github.com/containerd/errdefs"
"github.com/containerd/log"
) )
type dockerAuthorizer struct { type dockerAuthorizer struct {

View File

@@ -24,8 +24,8 @@ import (
"github.com/containerd/containerd/content" "github.com/containerd/containerd/content"
"github.com/containerd/containerd/images" "github.com/containerd/containerd/images"
"github.com/containerd/containerd/log"
"github.com/containerd/containerd/remotes" "github.com/containerd/containerd/remotes"
"github.com/containerd/log"
digest "github.com/opencontainers/go-digest" digest "github.com/opencontainers/go-digest"
ocispec "github.com/opencontainers/image-spec/specs-go/v1" ocispec "github.com/opencontainers/image-spec/specs-go/v1"
) )

View File

@@ -24,7 +24,7 @@ import (
fuzz "github.com/AdaLogics/go-fuzz-headers" fuzz "github.com/AdaLogics/go-fuzz-headers"
"github.com/containerd/containerd/content/local" "github.com/containerd/containerd/content/local"
"github.com/containerd/containerd/log" "github.com/containerd/log"
ocispec "github.com/opencontainers/image-spec/specs-go/v1" ocispec "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/sirupsen/logrus" "github.com/sirupsen/logrus"
) )

View File

@@ -26,9 +26,9 @@ import (
"net/url" "net/url"
"strings" "strings"
"github.com/containerd/containerd/errdefs"
"github.com/containerd/containerd/images" "github.com/containerd/containerd/images"
"github.com/containerd/containerd/log" "github.com/containerd/errdefs"
"github.com/containerd/log"
digest "github.com/opencontainers/go-digest" digest "github.com/opencontainers/go-digest"
ocispec "github.com/opencontainers/image-spec/specs-go/v1" ocispec "github.com/opencontainers/image-spec/specs-go/v1"
) )

View File

@@ -25,8 +25,8 @@ import (
"github.com/containerd/containerd/content" "github.com/containerd/containerd/content"
"github.com/containerd/containerd/images" "github.com/containerd/containerd/images"
"github.com/containerd/containerd/labels" "github.com/containerd/containerd/labels"
"github.com/containerd/containerd/log"
"github.com/containerd/containerd/reference" "github.com/containerd/containerd/reference"
"github.com/containerd/log"
ocispec "github.com/opencontainers/image-spec/specs-go/v1" ocispec "github.com/opencontainers/image-spec/specs-go/v1"
) )

View File

@@ -21,8 +21,8 @@ import (
"fmt" "fmt"
"io" "io"
"github.com/containerd/containerd/errdefs" "github.com/containerd/errdefs"
"github.com/containerd/containerd/log" "github.com/containerd/log"
) )
const maxRetry = 3 const maxRetry = 3

View File

@@ -29,11 +29,11 @@ import (
"time" "time"
"github.com/containerd/containerd/content" "github.com/containerd/containerd/content"
"github.com/containerd/containerd/errdefs"
"github.com/containerd/containerd/images" "github.com/containerd/containerd/images"
"github.com/containerd/containerd/log"
"github.com/containerd/containerd/remotes" "github.com/containerd/containerd/remotes"
remoteserrors "github.com/containerd/containerd/remotes/errors" remoteserrors "github.com/containerd/containerd/remotes/errors"
"github.com/containerd/errdefs"
"github.com/containerd/log"
digest "github.com/opencontainers/go-digest" digest "github.com/opencontainers/go-digest"
ocispec "github.com/opencontainers/image-spec/specs-go/v1" ocispec "github.com/opencontainers/image-spec/specs-go/v1"
) )

View File

@@ -28,15 +28,15 @@ import (
"path" "path"
"strings" "strings"
"github.com/containerd/containerd/errdefs"
"github.com/containerd/containerd/images" "github.com/containerd/containerd/images"
"github.com/containerd/containerd/log"
"github.com/containerd/containerd/reference" "github.com/containerd/containerd/reference"
"github.com/containerd/containerd/remotes" "github.com/containerd/containerd/remotes"
"github.com/containerd/containerd/remotes/docker/schema1" //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility. "github.com/containerd/containerd/remotes/docker/schema1" //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
remoteerrors "github.com/containerd/containerd/remotes/errors" remoteerrors "github.com/containerd/containerd/remotes/errors"
"github.com/containerd/containerd/tracing" "github.com/containerd/containerd/tracing"
"github.com/containerd/containerd/version" "github.com/containerd/containerd/version"
"github.com/containerd/errdefs"
"github.com/containerd/log"
"github.com/opencontainers/go-digest" "github.com/opencontainers/go-digest"
ocispec "github.com/opencontainers/image-spec/specs-go/v1" ocispec "github.com/opencontainers/image-spec/specs-go/v1"
) )

View File

@@ -34,11 +34,11 @@ import (
"github.com/containerd/containerd/archive/compression" "github.com/containerd/containerd/archive/compression"
"github.com/containerd/containerd/content" "github.com/containerd/containerd/content"
"github.com/containerd/containerd/errdefs"
"github.com/containerd/containerd/images" "github.com/containerd/containerd/images"
"github.com/containerd/containerd/labels" "github.com/containerd/containerd/labels"
"github.com/containerd/containerd/log"
"github.com/containerd/containerd/remotes" "github.com/containerd/containerd/remotes"
"github.com/containerd/errdefs"
"github.com/containerd/log"
digest "github.com/opencontainers/go-digest" digest "github.com/opencontainers/go-digest"
specs "github.com/opencontainers/image-spec/specs-go" specs "github.com/opencontainers/image-spec/specs-go"
ocispec "github.com/opencontainers/image-spec/specs-go/v1" ocispec "github.com/opencontainers/image-spec/specs-go/v1"

View File

@@ -21,7 +21,7 @@ import (
"sync" "sync"
"github.com/containerd/containerd/content" "github.com/containerd/containerd/content"
"github.com/containerd/containerd/errdefs" "github.com/containerd/errdefs"
"github.com/moby/locker" "github.com/moby/locker"
) )

View File

@@ -26,11 +26,11 @@ import (
"sync" "sync"
"github.com/containerd/containerd/content" "github.com/containerd/containerd/content"
"github.com/containerd/containerd/errdefs"
"github.com/containerd/containerd/images" "github.com/containerd/containerd/images"
"github.com/containerd/containerd/labels" "github.com/containerd/containerd/labels"
"github.com/containerd/containerd/log"
"github.com/containerd/containerd/platforms" "github.com/containerd/containerd/platforms"
"github.com/containerd/errdefs"
"github.com/containerd/log"
ocispec "github.com/opencontainers/image-spec/specs-go/v1" ocispec "github.com/opencontainers/image-spec/specs-go/v1"
"golang.org/x/sync/semaphore" "golang.org/x/sync/semaphore"
) )

View File

@@ -24,10 +24,10 @@ import (
api "github.com/containerd/containerd/api/services/content/v1" api "github.com/containerd/containerd/api/services/content/v1"
"github.com/containerd/containerd/content" "github.com/containerd/containerd/content"
"github.com/containerd/containerd/errdefs"
"github.com/containerd/containerd/log"
"github.com/containerd/containerd/protobuf" "github.com/containerd/containerd/protobuf"
ptypes "github.com/containerd/containerd/protobuf/types" ptypes "github.com/containerd/containerd/protobuf/types"
"github.com/containerd/errdefs"
"github.com/containerd/log"
digest "github.com/opencontainers/go-digest" digest "github.com/opencontainers/go-digest"
ocispec "github.com/opencontainers/image-spec/specs-go/v1" ocispec "github.com/opencontainers/image-spec/specs-go/v1"
"google.golang.org/grpc" "google.golang.org/grpc"

View File

@@ -23,7 +23,7 @@ var (
Package = "github.com/containerd/containerd" Package = "github.com/containerd/containerd"
// Version holds the complete version number. Filled in at linking time. // Version holds the complete version number. Filled in at linking time.
Version = "1.7.17+unknown" Version = "1.7.18+unknown"
// Revision is filled with the VCS (e.g. git) revision being used to build // Revision is filled with the VCS (e.g. git) revision being used to build
// the program at linking time. // the program at linking time.

191
vendor/github.com/containerd/errdefs/LICENSE generated vendored Normal file
View File

@@ -0,0 +1,191 @@
Apache License
Version 2.0, January 2004
https://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
Copyright The containerd Authors
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

13
vendor/github.com/containerd/errdefs/README.md generated vendored Normal file
View File

@@ -0,0 +1,13 @@
# errdefs
A Go package for defining and checking common containerd errors.
## Project details
**errdefs** is a containerd sub-project, licensed under the [Apache 2.0 license](./LICENSE).
As a containerd sub-project, you will find the:
* [Project governance](https://github.com/containerd/project/blob/main/GOVERNANCE.md),
* [Maintainers](https://github.com/containerd/project/blob/main/MAINTAINERS),
* and [Contributing guidelines](https://github.com/containerd/project/blob/main/CONTRIBUTING.md)
information in our [`containerd/project`](https://github.com/containerd/project) repository.

View File

@@ -6,6 +6,7 @@ import (
"github.com/docker/cli/cli-plugins/hooks" "github.com/docker/cli/cli-plugins/hooks"
"github.com/docker/cli/cli/command" "github.com/docker/cli/cli/command"
"github.com/sirupsen/logrus"
"github.com/spf13/cobra" "github.com/spf13/cobra"
"github.com/spf13/pflag" "github.com/spf13/pflag"
) )
@@ -92,11 +93,35 @@ func invokeAndCollectHooks(dockerCli command.Cli, rootCmd, subCmd *cobra.Command
if err != nil { if err != nil {
continue continue
} }
nextSteps = append(nextSteps, processedHook...)
var appended bool
nextSteps, appended = appendNextSteps(nextSteps, processedHook)
if !appended {
logrus.Debugf("Plugin %s responded with an empty hook message %q. Ignoring.", pluginName, string(hookReturn))
}
} }
return nextSteps return nextSteps
} }
// appendNextSteps appends the processed hook output to the nextSteps slice.
// If the processed hook output is empty, it is not appended.
// Empty lines are not stripped if there's at least one non-empty line.
func appendNextSteps(nextSteps []string, processed []string) ([]string, bool) {
empty := true
for _, l := range processed {
if strings.TrimSpace(l) != "" {
empty = false
break
}
}
if empty {
return nextSteps, false
}
return append(nextSteps, processed...), true
}
// pluginMatch takes a plugin configuration and a string representing the // pluginMatch takes a plugin configuration and a string representing the
// command being executed (such as 'image ls' the root 'docker' is omitted) // command being executed (such as 'image ls' the root 'docker' is omitted)
// and, if the configuration includes a hook for the invoked command, returns // and, if the configuration includes a hook for the invoked command, returns

View File

@@ -1,5 +1,7 @@
package formatter package formatter
import "encoding/json"
const ( const (
// ClientContextTableFormat is the default client context format. // ClientContextTableFormat is the default client context format.
ClientContextTableFormat = "table {{.Name}}{{if .Current}} *{{end}}\t{{.Description}}\t{{.DockerEndpoint}}\t{{.Error}}" ClientContextTableFormat = "table {{.Name}}{{if .Current}} *{{end}}\t{{.Description}}\t{{.DockerEndpoint}}\t{{.Error}}"
@@ -28,6 +30,13 @@ type ClientContext struct {
DockerEndpoint string DockerEndpoint string
Current bool Current bool
Error string Error string
// ContextType is a temporary field for compatibility with
// Visual Studio, which depends on this from the "cloud integration"
// wrapper.
//
// Deprecated: this type is only for backward-compatibility. Do not use.
ContextType string `json:"ContextType,omitempty"`
} }
// ClientContextWrite writes formatted contexts using the Context // ClientContextWrite writes formatted contexts using the Context
@@ -60,6 +69,13 @@ func newClientContextContext() *clientContextContext {
} }
func (c *clientContextContext) MarshalJSON() ([]byte, error) { func (c *clientContextContext) MarshalJSON() ([]byte, error) {
if c.c.ContextType != "" {
// We only have ContextType set for plain "json" or "{{json .}}" formatting,
// so we should be able to just use the default json.Marshal with no
// special handling.
return json.Marshal(c.c)
}
// FIXME(thaJeztah): why do we need a special marshal function here?
return MarshalJSON(c) return MarshalJSON(c)
} }

View File

@@ -2179,72 +2179,129 @@ definitions:
type: "object" type: "object"
properties: properties:
Name: Name:
description: |
Name of the network.
type: "string" type: "string"
example: "my_network"
Id: Id:
description: |
ID that uniquely identifies a network on a single machine.
type: "string" type: "string"
example: "7d86d31b1478e7cca9ebed7e73aa0fdeec46c5ca29497431d3007d2d9e15ed99"
Created: Created:
description: |
Date and time at which the network was created in
[RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format with nano-seconds.
type: "string" type: "string"
format: "dateTime" format: "dateTime"
example: "2016-10-19T04:33:30.360899459Z"
Scope: Scope:
description: |
The level at which the network exists (e.g. `swarm` for cluster-wide
or `local` for machine level)
type: "string" type: "string"
example: "local"
Driver: Driver:
description: |
The name of the driver used to create the network (e.g. `bridge`,
`overlay`).
type: "string" type: "string"
example: "overlay"
EnableIPv6: EnableIPv6:
description: |
Whether the network was created with IPv6 enabled.
type: "boolean" type: "boolean"
example: false
IPAM: IPAM:
$ref: "#/definitions/IPAM" $ref: "#/definitions/IPAM"
Internal: Internal:
description: |
Whether the network is created to only allow internal networking
connectivity.
type: "boolean" type: "boolean"
default: false
example: false
Attachable: Attachable:
description: |
Wheter a global / swarm scope network is manually attachable by regular
containers from workers in swarm mode.
type: "boolean" type: "boolean"
default: false
example: false
Ingress: Ingress:
description: |
Whether the network is providing the routing-mesh for the swarm cluster.
type: "boolean" type: "boolean"
default: false
example: false
ConfigFrom:
$ref: "#/definitions/ConfigReference"
ConfigOnly:
description: |
Whether the network is a config-only network. Config-only networks are
placeholder networks for network configurations to be used by other
networks. Config-only networks cannot be used directly to run containers
or services.
type: "boolean"
default: false
Containers: Containers:
description: |
Contains endpoints attached to the network.
type: "object" type: "object"
additionalProperties: additionalProperties:
$ref: "#/definitions/NetworkContainer" $ref: "#/definitions/NetworkContainer"
example:
19a4d5d687db25203351ed79d478946f861258f018fe384f229f2efa4b23513c:
Name: "test"
EndpointID: "628cadb8bcb92de107b2a1e516cbffe463e321f548feb37697cce00ad694f21a"
MacAddress: "02:42:ac:13:00:02"
IPv4Address: "172.19.0.2/16"
IPv6Address: ""
Options: Options:
description: |
Network-specific options uses when creating the network.
type: "object" type: "object"
additionalProperties: additionalProperties:
type: "string" type: "string"
example:
com.docker.network.bridge.default_bridge: "true"
com.docker.network.bridge.enable_icc: "true"
com.docker.network.bridge.enable_ip_masquerade: "true"
com.docker.network.bridge.host_binding_ipv4: "0.0.0.0"
com.docker.network.bridge.name: "docker0"
com.docker.network.driver.mtu: "1500"
Labels: Labels:
description: "User-defined key/value metadata."
type: "object" type: "object"
additionalProperties: additionalProperties:
type: "string" type: "string"
example: example:
Name: "net01" com.example.some-label: "some-value"
Id: "7d86d31b1478e7cca9ebed7e73aa0fdeec46c5ca29497431d3007d2d9e15ed99" com.example.some-other-label: "some-other-value"
Created: "2016-10-19T04:33:30.360899459Z" Peers:
Scope: "local" description: |
Driver: "bridge" List of peer nodes for an overlay network. This field is only present
EnableIPv6: false for overlay networks, and omitted for other network types.
IPAM: type: "array"
Driver: "default" items:
Config: $ref: "#/definitions/PeerInfo"
- Subnet: "172.19.0.0/16" x-nullable: true
Gateway: "172.19.0.1" # TODO: Add Services (only present when "verbose" is set).
Options:
foo: "bar" ConfigReference:
Internal: false description: |
Attachable: false The config-only network source to provide the configuration for
Ingress: false this network.
Containers: type: "object"
19a4d5d687db25203351ed79d478946f861258f018fe384f229f2efa4b23513c: properties:
Name: "test" Network:
EndpointID: "628cadb8bcb92de107b2a1e516cbffe463e321f548feb37697cce00ad694f21a" description: |
MacAddress: "02:42:ac:13:00:02" The name of the config-only network that provides the network's
IPv4Address: "172.19.0.2/16" configuration. The specified network must be an existing config-only
IPv6Address: "" network. Only network names are allowed, not network IDs.
Options: type: "string"
com.docker.network.bridge.default_bridge: "true" example: "config_only_network_01"
com.docker.network.bridge.enable_icc: "true"
com.docker.network.bridge.enable_ip_masquerade: "true"
com.docker.network.bridge.host_binding_ipv4: "0.0.0.0"
com.docker.network.bridge.name: "docker0"
com.docker.network.driver.mtu: "1500"
Labels:
com.example.some-label: "some-value"
com.example.some-other-label: "some-other-value"
IPAM: IPAM:
type: "object" type: "object"
properties: properties:
@@ -2252,6 +2309,7 @@ definitions:
description: "Name of the IPAM driver to use." description: "Name of the IPAM driver to use."
type: "string" type: "string"
default: "default" default: "default"
example: "default"
Config: Config:
description: | description: |
List of IPAM configuration options, specified as a map: List of IPAM configuration options, specified as a map:
@@ -2267,16 +2325,21 @@ definitions:
type: "object" type: "object"
additionalProperties: additionalProperties:
type: "string" type: "string"
example:
foo: "bar"
IPAMConfig: IPAMConfig:
type: "object" type: "object"
properties: properties:
Subnet: Subnet:
type: "string" type: "string"
example: "172.20.0.0/16"
IPRange: IPRange:
type: "string" type: "string"
example: "172.20.10.0/24"
Gateway: Gateway:
type: "string" type: "string"
example: "172.20.10.11"
AuxiliaryAddresses: AuxiliaryAddresses:
type: "object" type: "object"
additionalProperties: additionalProperties:
@@ -2287,14 +2350,35 @@ definitions:
properties: properties:
Name: Name:
type: "string" type: "string"
example: "container_1"
EndpointID: EndpointID:
type: "string" type: "string"
example: "628cadb8bcb92de107b2a1e516cbffe463e321f548feb37697cce00ad694f21a"
MacAddress: MacAddress:
type: "string" type: "string"
example: "02:42:ac:13:00:02"
IPv4Address: IPv4Address:
type: "string" type: "string"
example: "172.19.0.2/16"
IPv6Address: IPv6Address:
type: "string" type: "string"
example: ""
PeerInfo:
description: |
PeerInfo represents one peer of an overlay network.
type: "object"
properties:
Name:
description:
ID of the peer-node in the Swarm cluster.
type: "string"
example: "6869d7c1732b"
IP:
description:
IP-address of the peer-node in the Swarm cluster.
type: "string"
example: "10.133.77.91"
BuildInfo: BuildInfo:
type: "object" type: "object"
@@ -10104,14 +10188,22 @@ paths:
Name: Name:
description: "The network's name." description: "The network's name."
type: "string" type: "string"
example: "my_network"
CheckDuplicate: CheckDuplicate:
description: | description: |
Deprecated: CheckDuplicate is now always enabled. Deprecated: CheckDuplicate is now always enabled.
type: "boolean" type: "boolean"
example: true
Driver: Driver:
description: "Name of the network driver plugin to use." description: "Name of the network driver plugin to use."
type: "string" type: "string"
default: "bridge" default: "bridge"
example: "bridge"
Scope:
description: |
The level at which the network exists (e.g. `swarm` for cluster-wide
or `local` for machine level).
type: "string"
Internal: Internal:
description: "Restrict external access to the network." description: "Restrict external access to the network."
type: "boolean" type: "boolean"
@@ -10120,55 +10212,55 @@ paths:
Globally scoped network is manually attachable by regular Globally scoped network is manually attachable by regular
containers from workers in swarm mode. containers from workers in swarm mode.
type: "boolean" type: "boolean"
example: true
Ingress: Ingress:
description: | description: |
Ingress network is the network which provides the routing-mesh Ingress network is the network which provides the routing-mesh
in swarm mode. in swarm mode.
type: "boolean" type: "boolean"
example: false
ConfigOnly:
description: |
Creates a config-only network. Config-only networks are placeholder
networks for network configurations to be used by other networks.
Config-only networks cannot be used directly to run containers
or services.
type: "boolean"
default: false
example: false
ConfigFrom:
description: |
Specifies the source which will provide the configuration for
this network. The specified network must be an existing
config-only network; see ConfigOnly.
$ref: "#/definitions/ConfigReference"
IPAM: IPAM:
description: "Optional custom IP scheme for the network." description: "Optional custom IP scheme for the network."
$ref: "#/definitions/IPAM" $ref: "#/definitions/IPAM"
EnableIPv6: EnableIPv6:
description: "Enable IPv6 on the network." description: "Enable IPv6 on the network."
type: "boolean" type: "boolean"
example: true
Options: Options:
description: "Network specific options to be used by the drivers." description: "Network specific options to be used by the drivers."
type: "object" type: "object"
additionalProperties: additionalProperties:
type: "string" type: "string"
example:
com.docker.network.bridge.default_bridge: "true"
com.docker.network.bridge.enable_icc: "true"
com.docker.network.bridge.enable_ip_masquerade: "true"
com.docker.network.bridge.host_binding_ipv4: "0.0.0.0"
com.docker.network.bridge.name: "docker0"
com.docker.network.driver.mtu: "1500"
Labels: Labels:
description: "User-defined key/value metadata." description: "User-defined key/value metadata."
type: "object" type: "object"
additionalProperties: additionalProperties:
type: "string" type: "string"
example: example:
Name: "isolated_nw" com.example.some-label: "some-value"
CheckDuplicate: false com.example.some-other-label: "some-other-value"
Driver: "bridge"
EnableIPv6: true
IPAM:
Driver: "default"
Config:
- Subnet: "172.20.0.0/16"
IPRange: "172.20.10.0/24"
Gateway: "172.20.10.11"
- Subnet: "2001:db8:abcd::/64"
Gateway: "2001:db8:abcd::1011"
Options:
foo: "bar"
Internal: true
Attachable: false
Ingress: false
Options:
com.docker.network.bridge.default_bridge: "true"
com.docker.network.bridge.enable_icc: "true"
com.docker.network.bridge.enable_ip_masquerade: "true"
com.docker.network.bridge.host_binding_ipv4: "0.0.0.0"
com.docker.network.bridge.name: "docker0"
com.docker.network.driver.mtu: "1500"
Labels:
com.example.some-label: "some-value"
com.example.some-other-label: "some-other-value"
tags: ["Network"] tags: ["Network"]
/networks/{id}/connect: /networks/{id}/connect:

View File

@@ -457,24 +457,24 @@ type EndpointResource struct {
type NetworkCreate struct { type NetworkCreate struct {
// Deprecated: CheckDuplicate is deprecated since API v1.44, but it defaults to true when sent by the client // Deprecated: CheckDuplicate is deprecated since API v1.44, but it defaults to true when sent by the client
// package to older daemons. // package to older daemons.
CheckDuplicate bool `json:",omitempty"` CheckDuplicate bool `json:",omitempty"`
Driver string Driver string // Driver is the driver-name used to create the network (e.g. `bridge`, `overlay`)
Scope string Scope string // Scope describes the level at which the network exists (e.g. `swarm` for cluster-wide or `local` for machine level).
EnableIPv6 bool EnableIPv6 bool // EnableIPv6 represents whether to enable IPv6.
IPAM *network.IPAM IPAM *network.IPAM // IPAM is the network's IP Address Management.
Internal bool Internal bool // Internal represents if the network is used internal only.
Attachable bool Attachable bool // Attachable represents if the global scope is manually attachable by regular containers from workers in swarm mode.
Ingress bool Ingress bool // Ingress indicates the network is providing the routing-mesh for the swarm cluster.
ConfigOnly bool ConfigOnly bool // ConfigOnly creates a config-only network. Config-only networks are place-holder networks for network configurations to be used by other networks. ConfigOnly networks cannot be used directly to run containers or services.
ConfigFrom *network.ConfigReference ConfigFrom *network.ConfigReference // ConfigFrom specifies the source which will provide the configuration for this network. The specified network must be a config-only network; see [NetworkCreate.ConfigOnly].
Options map[string]string Options map[string]string // Options specifies the network-specific options to use for when creating the network.
Labels map[string]string Labels map[string]string // Labels holds metadata specific to the network being created.
} }
// NetworkCreateRequest is the request message sent to the server for network create call. // NetworkCreateRequest is the request message sent to the server for network create call.
type NetworkCreateRequest struct { type NetworkCreateRequest struct {
NetworkCreate NetworkCreate
Name string Name string // Name is the requested name of the network.
} }
// NetworkCreateResponse is the response message sent by the server for network create call // NetworkCreateResponse is the response message sent by the server for network create call

View File

@@ -0,0 +1,18 @@
root = true
[*]
charset = utf-8
end_of_line = lf
indent_size = 4
indent_style = space
insert_final_newline = true
trim_trailing_whitespace = true
[*.go]
indent_style = tab
[{Makefile,*.mk}]
indent_style = tab
[*.nix]
indent_size = 2

4
vendor/github.com/go-viper/mapstructure/v2/.envrc generated vendored Normal file
View File

@@ -0,0 +1,4 @@
if ! has nix_direnv_version || ! nix_direnv_version 3.0.4; then
source_url "https://raw.githubusercontent.com/nix-community/nix-direnv/3.0.4/direnvrc" "sha256-DzlYZ33mWF/Gs8DDeyjr8mnVmQGx7ASYqA5WlxwvBG4="
fi
use flake . --impure

View File

@@ -0,0 +1,6 @@
/.devenv/
/.direnv/
/.pre-commit-config.yaml
/bin/
/build/
/var/

View File

@@ -0,0 +1,23 @@
run:
timeout: 5m
linters-settings:
gci:
sections:
- standard
- default
- prefix(github.com/go-viper/mapstructure)
golint:
min-confidence: 0
goimports:
local-prefixes: github.com/go-viper/maptstructure
linters:
disable-all: true
enable:
- gci
- gofmt
- gofumpt
- goimports
- staticcheck
# - stylecheck

View File

@@ -1,3 +1,11 @@
> [!WARNING]
> As of v2 of this library, change log can be found in GitHub releases.
## 1.5.1
* Wrap errors so they're compatible with `errors.Is` and `errors.As` [GH-282]
* Fix map of slices not decoding properly in certain cases. [GH-266]
## 1.5.0 ## 1.5.0
* New option `IgnoreUntaggedFields` to ignore decoding to any fields * New option `IgnoreUntaggedFields` to ignore decoding to any fields

80
vendor/github.com/go-viper/mapstructure/v2/README.md generated vendored Normal file
View File

@@ -0,0 +1,80 @@
# mapstructure
[![GitHub Workflow Status](https://img.shields.io/github/actions/workflow/status/go-viper/mapstructure/ci.yaml?branch=main&style=flat-square)](https://github.com/go-viper/mapstructure/actions?query=workflow%3ACI)
[![go.dev reference](https://img.shields.io/badge/go.dev-reference-007d9c?logo=go&logoColor=white&style=flat-square)](https://pkg.go.dev/mod/github.com/go-viper/mapstructure/v2)
![Go Version](https://img.shields.io/badge/go%20version-%3E=1.18-61CFDD.svg?style=flat-square)
mapstructure is a Go library for decoding generic map values to structures
and vice versa, while providing helpful error handling.
This library is most useful when decoding values from some data stream (JSON,
Gob, etc.) where you don't _quite_ know the structure of the underlying data
until you read a part of it. You can therefore read a `map[string]interface{}`
and use this library to decode it into the proper underlying native Go
structure.
## Installation
```shell
go get github.com/go-viper/mapstructure/v2
```
## Migrating from `github.com/mitchellh/mapstructure`
[@mitchehllh](https://github.com/mitchellh) announced his intent to archive some of his unmaintained projects (see [here](https://gist.github.com/mitchellh/90029601268e59a29e64e55bab1c5bdc) and [here](https://github.com/mitchellh/mapstructure/issues/349)). This is a repository achieved the "blessed fork" status.
You can migrate to this package by changing your import paths in your Go files to `github.com/go-viper/mapstructure/v2`.
The API is the same, so you don't need to change anything else.
Here is a script that can help you with the migration:
```shell
sed -i 's/github.com\/mitchellh\/mapstructure/github.com\/go-viper\/mapstructure\/v2/g' $(find . -type f -name '*.go')
```
If you need more time to migrate your code, that is absolutely fine.
Some of the latest fixes are backported to the v1 release branch of this package, so you can use the Go modules `replace` feature until you are ready to migrate:
```shell
replace github.com/mitchellh/mapstructure => github.com/go-viper/mapstructure v1.6.0
```
## Usage & Example
For usage and examples see the [documentation](https://pkg.go.dev/mod/github.com/go-viper/mapstructure/v2).
The `Decode` function has examples associated with it there.
## But Why?!
Go offers fantastic standard libraries for decoding formats such as JSON.
The standard method is to have a struct pre-created, and populate that struct
from the bytes of the encoded format. This is great, but the problem is if
you have configuration or an encoding that changes slightly depending on
specific fields. For example, consider this JSON:
```json
{
"type": "person",
"name": "Mitchell"
}
```
Perhaps we can't populate a specific structure without first reading
the "type" field from the JSON. We could always do two passes over the
decoding of the JSON (reading the "type" first, and the rest later).
However, it is much simpler to just decode this into a `map[string]interface{}`
structure, read the "type" key, then use something like this library
to decode it into the proper structure.
## Credits
Mapstructure was originally created by [@mitchellh](https://github.com/mitchellh).
This is a maintained fork of the original library.
Read more about the reasons for the fork [here](https://github.com/mitchellh/mapstructure/issues/349).
## License
The project is licensed under the [MIT License](LICENSE).

View File

@@ -0,0 +1,577 @@
package mapstructure
import (
"encoding"
"errors"
"fmt"
"net"
"net/netip"
"reflect"
"strconv"
"strings"
"time"
)
// typedDecodeHook takes a raw DecodeHookFunc (an interface{}) and turns
// it into the proper DecodeHookFunc type, such as DecodeHookFuncType.
func typedDecodeHook(h DecodeHookFunc) DecodeHookFunc {
// Create variables here so we can reference them with the reflect pkg
var f1 DecodeHookFuncType
var f2 DecodeHookFuncKind
var f3 DecodeHookFuncValue
// Fill in the variables into this interface and the rest is done
// automatically using the reflect package.
potential := []interface{}{f1, f2, f3}
v := reflect.ValueOf(h)
vt := v.Type()
for _, raw := range potential {
pt := reflect.ValueOf(raw).Type()
if vt.ConvertibleTo(pt) {
return v.Convert(pt).Interface()
}
}
return nil
}
// DecodeHookExec executes the given decode hook. This should be used
// since it'll naturally degrade to the older backwards compatible DecodeHookFunc
// that took reflect.Kind instead of reflect.Type.
func DecodeHookExec(
raw DecodeHookFunc,
from reflect.Value, to reflect.Value,
) (interface{}, error) {
switch f := typedDecodeHook(raw).(type) {
case DecodeHookFuncType:
return f(from.Type(), to.Type(), from.Interface())
case DecodeHookFuncKind:
return f(from.Kind(), to.Kind(), from.Interface())
case DecodeHookFuncValue:
return f(from, to)
default:
return nil, errors.New("invalid decode hook signature")
}
}
// ComposeDecodeHookFunc creates a single DecodeHookFunc that
// automatically composes multiple DecodeHookFuncs.
//
// The composed funcs are called in order, with the result of the
// previous transformation.
func ComposeDecodeHookFunc(fs ...DecodeHookFunc) DecodeHookFunc {
return func(f reflect.Value, t reflect.Value) (interface{}, error) {
var err error
data := f.Interface()
newFrom := f
for _, f1 := range fs {
data, err = DecodeHookExec(f1, newFrom, t)
if err != nil {
return nil, err
}
newFrom = reflect.ValueOf(data)
}
return data, nil
}
}
// OrComposeDecodeHookFunc executes all input hook functions until one of them returns no error. In that case its value is returned.
// If all hooks return an error, OrComposeDecodeHookFunc returns an error concatenating all error messages.
func OrComposeDecodeHookFunc(ff ...DecodeHookFunc) DecodeHookFunc {
return func(a, b reflect.Value) (interface{}, error) {
var allErrs string
var out interface{}
var err error
for _, f := range ff {
out, err = DecodeHookExec(f, a, b)
if err != nil {
allErrs += err.Error() + "\n"
continue
}
return out, nil
}
return nil, errors.New(allErrs)
}
}
// StringToSliceHookFunc returns a DecodeHookFunc that converts
// string to []string by splitting on the given sep.
func StringToSliceHookFunc(sep string) DecodeHookFunc {
return func(
f reflect.Type,
t reflect.Type,
data interface{},
) (interface{}, error) {
if f.Kind() != reflect.String {
return data, nil
}
if t != reflect.SliceOf(f) {
return data, nil
}
raw := data.(string)
if raw == "" {
return []string{}, nil
}
return strings.Split(raw, sep), nil
}
}
// StringToTimeDurationHookFunc returns a DecodeHookFunc that converts
// strings to time.Duration.
func StringToTimeDurationHookFunc() DecodeHookFunc {
return func(
f reflect.Type,
t reflect.Type,
data interface{},
) (interface{}, error) {
if f.Kind() != reflect.String {
return data, nil
}
if t != reflect.TypeOf(time.Duration(5)) {
return data, nil
}
// Convert it by parsing
return time.ParseDuration(data.(string))
}
}
// StringToIPHookFunc returns a DecodeHookFunc that converts
// strings to net.IP
func StringToIPHookFunc() DecodeHookFunc {
return func(
f reflect.Type,
t reflect.Type,
data interface{},
) (interface{}, error) {
if f.Kind() != reflect.String {
return data, nil
}
if t != reflect.TypeOf(net.IP{}) {
return data, nil
}
// Convert it by parsing
ip := net.ParseIP(data.(string))
if ip == nil {
return net.IP{}, fmt.Errorf("failed parsing ip %v", data)
}
return ip, nil
}
}
// StringToIPNetHookFunc returns a DecodeHookFunc that converts
// strings to net.IPNet
func StringToIPNetHookFunc() DecodeHookFunc {
return func(
f reflect.Type,
t reflect.Type,
data interface{},
) (interface{}, error) {
if f.Kind() != reflect.String {
return data, nil
}
if t != reflect.TypeOf(net.IPNet{}) {
return data, nil
}
// Convert it by parsing
_, net, err := net.ParseCIDR(data.(string))
return net, err
}
}
// StringToTimeHookFunc returns a DecodeHookFunc that converts
// strings to time.Time.
func StringToTimeHookFunc(layout string) DecodeHookFunc {
return func(
f reflect.Type,
t reflect.Type,
data interface{},
) (interface{}, error) {
if f.Kind() != reflect.String {
return data, nil
}
if t != reflect.TypeOf(time.Time{}) {
return data, nil
}
// Convert it by parsing
return time.Parse(layout, data.(string))
}
}
// WeaklyTypedHook is a DecodeHookFunc which adds support for weak typing to
// the decoder.
//
// Note that this is significantly different from the WeaklyTypedInput option
// of the DecoderConfig.
func WeaklyTypedHook(
f reflect.Kind,
t reflect.Kind,
data interface{},
) (interface{}, error) {
dataVal := reflect.ValueOf(data)
switch t {
case reflect.String:
switch f {
case reflect.Bool:
if dataVal.Bool() {
return "1", nil
}
return "0", nil
case reflect.Float32:
return strconv.FormatFloat(dataVal.Float(), 'f', -1, 64), nil
case reflect.Int:
return strconv.FormatInt(dataVal.Int(), 10), nil
case reflect.Slice:
dataType := dataVal.Type()
elemKind := dataType.Elem().Kind()
if elemKind == reflect.Uint8 {
return string(dataVal.Interface().([]uint8)), nil
}
case reflect.Uint:
return strconv.FormatUint(dataVal.Uint(), 10), nil
}
}
return data, nil
}
func RecursiveStructToMapHookFunc() DecodeHookFunc {
return func(f reflect.Value, t reflect.Value) (interface{}, error) {
if f.Kind() != reflect.Struct {
return f.Interface(), nil
}
var i interface{} = struct{}{}
if t.Type() != reflect.TypeOf(&i).Elem() {
return f.Interface(), nil
}
m := make(map[string]interface{})
t.Set(reflect.ValueOf(m))
return f.Interface(), nil
}
}
// TextUnmarshallerHookFunc returns a DecodeHookFunc that applies
// strings to the UnmarshalText function, when the target type
// implements the encoding.TextUnmarshaler interface
func TextUnmarshallerHookFunc() DecodeHookFuncType {
return func(
f reflect.Type,
t reflect.Type,
data interface{},
) (interface{}, error) {
if f.Kind() != reflect.String {
return data, nil
}
result := reflect.New(t).Interface()
unmarshaller, ok := result.(encoding.TextUnmarshaler)
if !ok {
return data, nil
}
str, ok := data.(string)
if !ok {
str = reflect.Indirect(reflect.ValueOf(&data)).Elem().String()
}
if err := unmarshaller.UnmarshalText([]byte(str)); err != nil {
return nil, err
}
return result, nil
}
}
// StringToNetIPAddrHookFunc returns a DecodeHookFunc that converts
// strings to netip.Addr.
func StringToNetIPAddrHookFunc() DecodeHookFunc {
return func(
f reflect.Type,
t reflect.Type,
data interface{},
) (interface{}, error) {
if f.Kind() != reflect.String {
return data, nil
}
if t != reflect.TypeOf(netip.Addr{}) {
return data, nil
}
// Convert it by parsing
return netip.ParseAddr(data.(string))
}
}
// StringToNetIPAddrPortHookFunc returns a DecodeHookFunc that converts
// strings to netip.AddrPort.
func StringToNetIPAddrPortHookFunc() DecodeHookFunc {
return func(
f reflect.Type,
t reflect.Type,
data interface{},
) (interface{}, error) {
if f.Kind() != reflect.String {
return data, nil
}
if t != reflect.TypeOf(netip.AddrPort{}) {
return data, nil
}
// Convert it by parsing
return netip.ParseAddrPort(data.(string))
}
}
// StringToBasicTypeHookFunc returns a DecodeHookFunc that converts
// strings to basic types.
// int8, uint8, int16, uint16, int32, uint32, int64, uint64, int, uint, float32, float64, bool, byte, rune, complex64, complex128
func StringToBasicTypeHookFunc() DecodeHookFunc {
return ComposeDecodeHookFunc(
StringToInt8HookFunc(),
StringToUint8HookFunc(),
StringToInt16HookFunc(),
StringToUint16HookFunc(),
StringToInt32HookFunc(),
StringToUint32HookFunc(),
StringToInt64HookFunc(),
StringToUint64HookFunc(),
StringToIntHookFunc(),
StringToUintHookFunc(),
StringToFloat32HookFunc(),
StringToFloat64HookFunc(),
StringToBoolHookFunc(),
// byte and rune are aliases for uint8 and int32 respectively
// StringToByteHookFunc(),
// StringToRuneHookFunc(),
StringToComplex64HookFunc(),
StringToComplex128HookFunc(),
)
}
// StringToInt8HookFunc returns a DecodeHookFunc that converts
// strings to int8.
func StringToInt8HookFunc() DecodeHookFunc {
return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) {
if f.Kind() != reflect.String || t.Kind() != reflect.Int8 {
return data, nil
}
// Convert it by parsing
i64, err := strconv.ParseInt(data.(string), 0, 8)
return int8(i64), err
}
}
// StringToUint8HookFunc returns a DecodeHookFunc that converts
// strings to uint8.
func StringToUint8HookFunc() DecodeHookFunc {
return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) {
if f.Kind() != reflect.String || t.Kind() != reflect.Uint8 {
return data, nil
}
// Convert it by parsing
u64, err := strconv.ParseUint(data.(string), 0, 8)
return uint8(u64), err
}
}
// StringToInt16HookFunc returns a DecodeHookFunc that converts
// strings to int16.
func StringToInt16HookFunc() DecodeHookFunc {
return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) {
if f.Kind() != reflect.String || t.Kind() != reflect.Int16 {
return data, nil
}
// Convert it by parsing
i64, err := strconv.ParseInt(data.(string), 0, 16)
return int16(i64), err
}
}
// StringToUint16HookFunc returns a DecodeHookFunc that converts
// strings to uint16.
func StringToUint16HookFunc() DecodeHookFunc {
return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) {
if f.Kind() != reflect.String || t.Kind() != reflect.Uint16 {
return data, nil
}
// Convert it by parsing
u64, err := strconv.ParseUint(data.(string), 0, 16)
return uint16(u64), err
}
}
// StringToInt32HookFunc returns a DecodeHookFunc that converts
// strings to int32.
func StringToInt32HookFunc() DecodeHookFunc {
return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) {
if f.Kind() != reflect.String || t.Kind() != reflect.Int32 {
return data, nil
}
// Convert it by parsing
i64, err := strconv.ParseInt(data.(string), 0, 32)
return int32(i64), err
}
}
// StringToUint32HookFunc returns a DecodeHookFunc that converts
// strings to uint32.
func StringToUint32HookFunc() DecodeHookFunc {
return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) {
if f.Kind() != reflect.String || t.Kind() != reflect.Uint32 {
return data, nil
}
// Convert it by parsing
u64, err := strconv.ParseUint(data.(string), 0, 32)
return uint32(u64), err
}
}
// StringToInt64HookFunc returns a DecodeHookFunc that converts
// strings to int64.
func StringToInt64HookFunc() DecodeHookFunc {
return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) {
if f.Kind() != reflect.String || t.Kind() != reflect.Int64 {
return data, nil
}
// Convert it by parsing
return strconv.ParseInt(data.(string), 0, 64)
}
}
// StringToUint64HookFunc returns a DecodeHookFunc that converts
// strings to uint64.
func StringToUint64HookFunc() DecodeHookFunc {
return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) {
if f.Kind() != reflect.String || t.Kind() != reflect.Uint64 {
return data, nil
}
// Convert it by parsing
return strconv.ParseUint(data.(string), 0, 64)
}
}
// StringToIntHookFunc returns a DecodeHookFunc that converts
// strings to int.
func StringToIntHookFunc() DecodeHookFunc {
return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) {
if f.Kind() != reflect.String || t.Kind() != reflect.Int {
return data, nil
}
// Convert it by parsing
i64, err := strconv.ParseInt(data.(string), 0, 0)
return int(i64), err
}
}
// StringToUintHookFunc returns a DecodeHookFunc that converts
// strings to uint.
func StringToUintHookFunc() DecodeHookFunc {
return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) {
if f.Kind() != reflect.String || t.Kind() != reflect.Uint {
return data, nil
}
// Convert it by parsing
u64, err := strconv.ParseUint(data.(string), 0, 0)
return uint(u64), err
}
}
// StringToFloat32HookFunc returns a DecodeHookFunc that converts
// strings to float32.
func StringToFloat32HookFunc() DecodeHookFunc {
return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) {
if f.Kind() != reflect.String || t.Kind() != reflect.Float32 {
return data, nil
}
// Convert it by parsing
f64, err := strconv.ParseFloat(data.(string), 32)
return float32(f64), err
}
}
// StringToFloat64HookFunc returns a DecodeHookFunc that converts
// strings to float64.
func StringToFloat64HookFunc() DecodeHookFunc {
return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) {
if f.Kind() != reflect.String || t.Kind() != reflect.Float64 {
return data, nil
}
// Convert it by parsing
return strconv.ParseFloat(data.(string), 64)
}
}
// StringToBoolHookFunc returns a DecodeHookFunc that converts
// strings to bool.
func StringToBoolHookFunc() DecodeHookFunc {
return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) {
if f.Kind() != reflect.String || t.Kind() != reflect.Bool {
return data, nil
}
// Convert it by parsing
return strconv.ParseBool(data.(string))
}
}
// StringToByteHookFunc returns a DecodeHookFunc that converts
// strings to byte.
func StringToByteHookFunc() DecodeHookFunc {
return StringToUint8HookFunc()
}
// StringToRuneHookFunc returns a DecodeHookFunc that converts
// strings to rune.
func StringToRuneHookFunc() DecodeHookFunc {
return StringToInt32HookFunc()
}
// StringToComplex64HookFunc returns a DecodeHookFunc that converts
// strings to complex64.
func StringToComplex64HookFunc() DecodeHookFunc {
return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) {
if f.Kind() != reflect.String || t.Kind() != reflect.Complex64 {
return data, nil
}
// Convert it by parsing
c128, err := strconv.ParseComplex(data.(string), 64)
return complex64(c128), err
}
}
// StringToComplex128HookFunc returns a DecodeHookFunc that converts
// strings to complex128.
func StringToComplex128HookFunc() DecodeHookFunc {
return func(f reflect.Type, t reflect.Type, data interface{}) (interface{}, error) {
if f.Kind() != reflect.String || t.Kind() != reflect.Complex128 {
return data, nil
}
// Convert it by parsing
return strconv.ParseComplex(data.(string), 128)
}
}

472
vendor/github.com/go-viper/mapstructure/v2/flake.lock generated vendored Normal file
View File

@@ -0,0 +1,472 @@
{
"nodes": {
"cachix": {
"inputs": {
"devenv": "devenv_2",
"flake-compat": [
"devenv",
"flake-compat"
],
"nixpkgs": [
"devenv",
"nixpkgs"
],
"pre-commit-hooks": [
"devenv",
"pre-commit-hooks"
]
},
"locked": {
"lastModified": 1712055811,
"narHash": "sha256-7FcfMm5A/f02yyzuavJe06zLa9hcMHsagE28ADcmQvk=",
"owner": "cachix",
"repo": "cachix",
"rev": "02e38da89851ec7fec3356a5c04bc8349cae0e30",
"type": "github"
},
"original": {
"owner": "cachix",
"repo": "cachix",
"type": "github"
}
},
"devenv": {
"inputs": {
"cachix": "cachix",
"flake-compat": "flake-compat_2",
"nix": "nix_2",
"nixpkgs": "nixpkgs_2",
"pre-commit-hooks": "pre-commit-hooks"
},
"locked": {
"lastModified": 1717245169,
"narHash": "sha256-+mW3rTBjGU8p1THJN0lX/Dd/8FbnF+3dB+mJuSaxewE=",
"owner": "cachix",
"repo": "devenv",
"rev": "c3f9f053c077c6f88a3de5276d9178c62baa3fc3",
"type": "github"
},
"original": {
"owner": "cachix",
"repo": "devenv",
"type": "github"
}
},
"devenv_2": {
"inputs": {
"flake-compat": [
"devenv",
"cachix",
"flake-compat"
],
"nix": "nix",
"nixpkgs": "nixpkgs",
"poetry2nix": "poetry2nix",
"pre-commit-hooks": [
"devenv",
"cachix",
"pre-commit-hooks"
]
},
"locked": {
"lastModified": 1708704632,
"narHash": "sha256-w+dOIW60FKMaHI1q5714CSibk99JfYxm0CzTinYWr+Q=",
"owner": "cachix",
"repo": "devenv",
"rev": "2ee4450b0f4b95a1b90f2eb5ffea98b90e48c196",
"type": "github"
},
"original": {
"owner": "cachix",
"ref": "python-rewrite",
"repo": "devenv",
"type": "github"
}
},
"flake-compat": {
"flake": false,
"locked": {
"lastModified": 1673956053,
"narHash": "sha256-4gtG9iQuiKITOjNQQeQIpoIB6b16fm+504Ch3sNKLd8=",
"owner": "edolstra",
"repo": "flake-compat",
"rev": "35bb57c0c8d8b62bbfd284272c928ceb64ddbde9",
"type": "github"
},
"original": {
"owner": "edolstra",
"repo": "flake-compat",
"type": "github"
}
},
"flake-compat_2": {
"flake": false,
"locked": {
"lastModified": 1696426674,
"narHash": "sha256-kvjfFW7WAETZlt09AgDn1MrtKzP7t90Vf7vypd3OL1U=",
"owner": "edolstra",
"repo": "flake-compat",
"rev": "0f9255e01c2351cc7d116c072cb317785dd33b33",
"type": "github"
},
"original": {
"owner": "edolstra",
"repo": "flake-compat",
"type": "github"
}
},
"flake-parts": {
"inputs": {
"nixpkgs-lib": "nixpkgs-lib"
},
"locked": {
"lastModified": 1717285511,
"narHash": "sha256-iKzJcpdXih14qYVcZ9QC9XuZYnPc6T8YImb6dX166kw=",
"owner": "hercules-ci",
"repo": "flake-parts",
"rev": "2a55567fcf15b1b1c7ed712a2c6fadaec7412ea8",
"type": "github"
},
"original": {
"owner": "hercules-ci",
"repo": "flake-parts",
"type": "github"
}
},
"flake-utils": {
"inputs": {
"systems": "systems"
},
"locked": {
"lastModified": 1689068808,
"narHash": "sha256-6ixXo3wt24N/melDWjq70UuHQLxGV8jZvooRanIHXw0=",
"owner": "numtide",
"repo": "flake-utils",
"rev": "919d646de7be200f3bf08cb76ae1f09402b6f9b4",
"type": "github"
},
"original": {
"owner": "numtide",
"repo": "flake-utils",
"type": "github"
}
},
"flake-utils_2": {
"inputs": {
"systems": "systems_2"
},
"locked": {
"lastModified": 1710146030,
"narHash": "sha256-SZ5L6eA7HJ/nmkzGG7/ISclqe6oZdOZTNoesiInkXPQ=",
"owner": "numtide",
"repo": "flake-utils",
"rev": "b1d9ab70662946ef0850d488da1c9019f3a9752a",
"type": "github"
},
"original": {
"owner": "numtide",
"repo": "flake-utils",
"type": "github"
}
},
"gitignore": {
"inputs": {
"nixpkgs": [
"devenv",
"pre-commit-hooks",
"nixpkgs"
]
},
"locked": {
"lastModified": 1709087332,
"narHash": "sha256-HG2cCnktfHsKV0s4XW83gU3F57gaTljL9KNSuG6bnQs=",
"owner": "hercules-ci",
"repo": "gitignore.nix",
"rev": "637db329424fd7e46cf4185293b9cc8c88c95394",
"type": "github"
},
"original": {
"owner": "hercules-ci",
"repo": "gitignore.nix",
"type": "github"
}
},
"nix": {
"inputs": {
"flake-compat": "flake-compat",
"nixpkgs": [
"devenv",
"cachix",
"devenv",
"nixpkgs"
],
"nixpkgs-regression": "nixpkgs-regression"
},
"locked": {
"lastModified": 1712911606,
"narHash": "sha256-BGvBhepCufsjcUkXnEEXhEVjwdJAwPglCC2+bInc794=",
"owner": "domenkozar",
"repo": "nix",
"rev": "b24a9318ea3f3600c1e24b4a00691ee912d4de12",
"type": "github"
},
"original": {
"owner": "domenkozar",
"ref": "devenv-2.21",
"repo": "nix",
"type": "github"
}
},
"nix-github-actions": {
"inputs": {
"nixpkgs": [
"devenv",
"cachix",
"devenv",
"poetry2nix",
"nixpkgs"
]
},
"locked": {
"lastModified": 1688870561,
"narHash": "sha256-4UYkifnPEw1nAzqqPOTL2MvWtm3sNGw1UTYTalkTcGY=",
"owner": "nix-community",
"repo": "nix-github-actions",
"rev": "165b1650b753316aa7f1787f3005a8d2da0f5301",
"type": "github"
},
"original": {
"owner": "nix-community",
"repo": "nix-github-actions",
"type": "github"
}
},
"nix_2": {
"inputs": {
"flake-compat": [
"devenv",
"flake-compat"
],
"nixpkgs": [
"devenv",
"nixpkgs"
],
"nixpkgs-regression": "nixpkgs-regression_2"
},
"locked": {
"lastModified": 1712911606,
"narHash": "sha256-BGvBhepCufsjcUkXnEEXhEVjwdJAwPglCC2+bInc794=",
"owner": "domenkozar",
"repo": "nix",
"rev": "b24a9318ea3f3600c1e24b4a00691ee912d4de12",
"type": "github"
},
"original": {
"owner": "domenkozar",
"ref": "devenv-2.21",
"repo": "nix",
"type": "github"
}
},
"nixpkgs": {
"locked": {
"lastModified": 1692808169,
"narHash": "sha256-x9Opq06rIiwdwGeK2Ykj69dNc2IvUH1fY55Wm7atwrE=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "9201b5ff357e781bf014d0330d18555695df7ba8",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "nixpkgs-unstable",
"repo": "nixpkgs",
"type": "github"
}
},
"nixpkgs-lib": {
"locked": {
"lastModified": 1717284937,
"narHash": "sha256-lIbdfCsf8LMFloheeE6N31+BMIeixqyQWbSr2vk79EQ=",
"type": "tarball",
"url": "https://github.com/NixOS/nixpkgs/archive/eb9ceca17df2ea50a250b6b27f7bf6ab0186f198.tar.gz"
},
"original": {
"type": "tarball",
"url": "https://github.com/NixOS/nixpkgs/archive/eb9ceca17df2ea50a250b6b27f7bf6ab0186f198.tar.gz"
}
},
"nixpkgs-regression": {
"locked": {
"lastModified": 1643052045,
"narHash": "sha256-uGJ0VXIhWKGXxkeNnq4TvV3CIOkUJ3PAoLZ3HMzNVMw=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2",
"type": "github"
},
"original": {
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2",
"type": "github"
}
},
"nixpkgs-regression_2": {
"locked": {
"lastModified": 1643052045,
"narHash": "sha256-uGJ0VXIhWKGXxkeNnq4TvV3CIOkUJ3PAoLZ3HMzNVMw=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2",
"type": "github"
},
"original": {
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "215d4d0fd80ca5163643b03a33fde804a29cc1e2",
"type": "github"
}
},
"nixpkgs-stable": {
"locked": {
"lastModified": 1710695816,
"narHash": "sha256-3Eh7fhEID17pv9ZxrPwCLfqXnYP006RKzSs0JptsN84=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "614b4613980a522ba49f0d194531beddbb7220d3",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "nixos-23.11",
"repo": "nixpkgs",
"type": "github"
}
},
"nixpkgs_2": {
"locked": {
"lastModified": 1713361204,
"narHash": "sha256-TA6EDunWTkc5FvDCqU3W2T3SFn0gRZqh6D/hJnM02MM=",
"owner": "cachix",
"repo": "devenv-nixpkgs",
"rev": "285676e87ad9f0ca23d8714a6ab61e7e027020c6",
"type": "github"
},
"original": {
"owner": "cachix",
"ref": "rolling",
"repo": "devenv-nixpkgs",
"type": "github"
}
},
"nixpkgs_3": {
"locked": {
"lastModified": 1717112898,
"narHash": "sha256-7R2ZvOnvd9h8fDd65p0JnB7wXfUvreox3xFdYWd1BnY=",
"owner": "NixOS",
"repo": "nixpkgs",
"rev": "6132b0f6e344ce2fe34fc051b72fb46e34f668e0",
"type": "github"
},
"original": {
"owner": "NixOS",
"ref": "nixpkgs-unstable",
"repo": "nixpkgs",
"type": "github"
}
},
"poetry2nix": {
"inputs": {
"flake-utils": "flake-utils",
"nix-github-actions": "nix-github-actions",
"nixpkgs": [
"devenv",
"cachix",
"devenv",
"nixpkgs"
]
},
"locked": {
"lastModified": 1692876271,
"narHash": "sha256-IXfZEkI0Mal5y1jr6IRWMqK8GW2/f28xJenZIPQqkY0=",
"owner": "nix-community",
"repo": "poetry2nix",
"rev": "d5006be9c2c2417dafb2e2e5034d83fabd207ee3",
"type": "github"
},
"original": {
"owner": "nix-community",
"repo": "poetry2nix",
"type": "github"
}
},
"pre-commit-hooks": {
"inputs": {
"flake-compat": [
"devenv",
"flake-compat"
],
"flake-utils": "flake-utils_2",
"gitignore": "gitignore",
"nixpkgs": [
"devenv",
"nixpkgs"
],
"nixpkgs-stable": "nixpkgs-stable"
},
"locked": {
"lastModified": 1713775815,
"narHash": "sha256-Wu9cdYTnGQQwtT20QQMg7jzkANKQjwBD9iccfGKkfls=",
"owner": "cachix",
"repo": "pre-commit-hooks.nix",
"rev": "2ac4dcbf55ed43f3be0bae15e181f08a57af24a4",
"type": "github"
},
"original": {
"owner": "cachix",
"repo": "pre-commit-hooks.nix",
"type": "github"
}
},
"root": {
"inputs": {
"devenv": "devenv",
"flake-parts": "flake-parts",
"nixpkgs": "nixpkgs_3"
}
},
"systems": {
"locked": {
"lastModified": 1681028828,
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
"owner": "nix-systems",
"repo": "default",
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
"type": "github"
},
"original": {
"owner": "nix-systems",
"repo": "default",
"type": "github"
}
},
"systems_2": {
"locked": {
"lastModified": 1681028828,
"narHash": "sha256-Vy1rq5AaRuLzOxct8nz4T6wlgyUR7zLU309k9mBC768=",
"owner": "nix-systems",
"repo": "default",
"rev": "da67096a3b9bf56a91d16901293e51ba5b49a27e",
"type": "github"
},
"original": {
"owner": "nix-systems",
"repo": "default",
"type": "github"
}
}
},
"root": "root",
"version": 7
}

39
vendor/github.com/go-viper/mapstructure/v2/flake.nix generated vendored Normal file
View File

@@ -0,0 +1,39 @@
{
inputs = {
nixpkgs.url = "github:NixOS/nixpkgs/nixpkgs-unstable";
flake-parts.url = "github:hercules-ci/flake-parts";
devenv.url = "github:cachix/devenv";
};
outputs = inputs@{ flake-parts, ... }:
flake-parts.lib.mkFlake { inherit inputs; } {
imports = [
inputs.devenv.flakeModule
];
systems = [ "x86_64-linux" "x86_64-darwin" "aarch64-darwin" ];
perSystem = { config, self', inputs', pkgs, system, ... }: rec {
devenv.shells = {
default = {
languages = {
go.enable = true;
};
pre-commit.hooks = {
nixpkgs-fmt.enable = true;
};
packages = with pkgs; [
golangci-lint
];
# https://github.com/cachix/devenv/issues/528#issuecomment-1556108767
containers = pkgs.lib.mkForce { };
};
ci = devenv.shells.default;
};
};
};
}

View File

@@ -0,0 +1,11 @@
package errors
import "errors"
func New(text string) error {
return errors.New(text)
}
func As(err error, target interface{}) bool {
return errors.As(err, target)
}

View File

@@ -0,0 +1,9 @@
//go:build go1.20
package errors
import "errors"
func Join(errs ...error) error {
return errors.Join(errs...)
}

View File

@@ -0,0 +1,61 @@
//go:build !go1.20
// Copyright 2022 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package errors
// Join returns an error that wraps the given errors.
// Any nil error values are discarded.
// Join returns nil if every value in errs is nil.
// The error formats as the concatenation of the strings obtained
// by calling the Error method of each element of errs, with a newline
// between each string.
//
// A non-nil error returned by Join implements the Unwrap() []error method.
func Join(errs ...error) error {
n := 0
for _, err := range errs {
if err != nil {
n++
}
}
if n == 0 {
return nil
}
e := &joinError{
errs: make([]error, 0, n),
}
for _, err := range errs {
if err != nil {
e.errs = append(e.errs, err)
}
}
return e
}
type joinError struct {
errs []error
}
func (e *joinError) Error() string {
// Since Join returns nil if every value in errs is nil,
// e.errs cannot be empty.
if len(e.errs) == 1 {
return e.errs[0].Error()
}
b := []byte(e.errs[0].Error())
for _, err := range e.errs[1:] {
b = append(b, '\n')
b = append(b, err.Error()...)
}
// At this point, b has at least one byte '\n'.
// return unsafe.String(&b[0], len(b))
return string(b)
}
func (e *joinError) Unwrap() []error {
return e.errs
}

View File

@@ -9,84 +9,84 @@
// //
// The simplest function to start with is Decode. // The simplest function to start with is Decode.
// //
// Field Tags // # Field Tags
// //
// When decoding to a struct, mapstructure will use the field name by // When decoding to a struct, mapstructure will use the field name by
// default to perform the mapping. For example, if a struct has a field // default to perform the mapping. For example, if a struct has a field
// "Username" then mapstructure will look for a key in the source value // "Username" then mapstructure will look for a key in the source value
// of "username" (case insensitive). // of "username" (case insensitive).
// //
// type User struct { // type User struct {
// Username string // Username string
// } // }
// //
// You can change the behavior of mapstructure by using struct tags. // You can change the behavior of mapstructure by using struct tags.
// The default struct tag that mapstructure looks for is "mapstructure" // The default struct tag that mapstructure looks for is "mapstructure"
// but you can customize it using DecoderConfig. // but you can customize it using DecoderConfig.
// //
// Renaming Fields // # Renaming Fields
// //
// To rename the key that mapstructure looks for, use the "mapstructure" // To rename the key that mapstructure looks for, use the "mapstructure"
// tag and set a value directly. For example, to change the "username" example // tag and set a value directly. For example, to change the "username" example
// above to "user": // above to "user":
// //
// type User struct { // type User struct {
// Username string `mapstructure:"user"` // Username string `mapstructure:"user"`
// } // }
// //
// Embedded Structs and Squashing // # Embedded Structs and Squashing
// //
// Embedded structs are treated as if they're another field with that name. // Embedded structs are treated as if they're another field with that name.
// By default, the two structs below are equivalent when decoding with // By default, the two structs below are equivalent when decoding with
// mapstructure: // mapstructure:
// //
// type Person struct { // type Person struct {
// Name string // Name string
// } // }
// //
// type Friend struct { // type Friend struct {
// Person // Person
// } // }
// //
// type Friend struct { // type Friend struct {
// Person Person // Person Person
// } // }
// //
// This would require an input that looks like below: // This would require an input that looks like below:
// //
// map[string]interface{}{ // map[string]interface{}{
// "person": map[string]interface{}{"name": "alice"}, // "person": map[string]interface{}{"name": "alice"},
// } // }
// //
// If your "person" value is NOT nested, then you can append ",squash" to // If your "person" value is NOT nested, then you can append ",squash" to
// your tag value and mapstructure will treat it as if the embedded struct // your tag value and mapstructure will treat it as if the embedded struct
// were part of the struct directly. Example: // were part of the struct directly. Example:
// //
// type Friend struct { // type Friend struct {
// Person `mapstructure:",squash"` // Person `mapstructure:",squash"`
// } // }
// //
// Now the following input would be accepted: // Now the following input would be accepted:
// //
// map[string]interface{}{ // map[string]interface{}{
// "name": "alice", // "name": "alice",
// } // }
// //
// When decoding from a struct to a map, the squash tag squashes the struct // When decoding from a struct to a map, the squash tag squashes the struct
// fields into a single map. Using the example structs from above: // fields into a single map. Using the example structs from above:
// //
// Friend{Person: Person{Name: "alice"}} // Friend{Person: Person{Name: "alice"}}
// //
// Will be decoded into a map: // Will be decoded into a map:
// //
// map[string]interface{}{ // map[string]interface{}{
// "name": "alice", // "name": "alice",
// } // }
// //
// DecoderConfig has a field that changes the behavior of mapstructure // DecoderConfig has a field that changes the behavior of mapstructure
// to always squash embedded structs. // to always squash embedded structs.
// //
// Remainder Values // # Remainder Values
// //
// If there are any unmapped keys in the source value, mapstructure by // If there are any unmapped keys in the source value, mapstructure by
// default will silently ignore them. You can error by setting ErrorUnused // default will silently ignore them. You can error by setting ErrorUnused
@@ -98,20 +98,20 @@
// probably be a "map[string]interface{}" or "map[interface{}]interface{}". // probably be a "map[string]interface{}" or "map[interface{}]interface{}".
// See example below: // See example below:
// //
// type Friend struct { // type Friend struct {
// Name string // Name string
// Other map[string]interface{} `mapstructure:",remain"` // Other map[string]interface{} `mapstructure:",remain"`
// } // }
// //
// Given the input below, Other would be populated with the other // Given the input below, Other would be populated with the other
// values that weren't used (everything but "name"): // values that weren't used (everything but "name"):
// //
// map[string]interface{}{ // map[string]interface{}{
// "name": "bob", // "name": "bob",
// "address": "123 Maple St.", // "address": "123 Maple St.",
// } // }
// //
// Omit Empty Values // # Omit Empty Values
// //
// When decoding from a struct to any other value, you may use the // When decoding from a struct to any other value, you may use the
// ",omitempty" suffix on your tag to omit that value if it equates to // ",omitempty" suffix on your tag to omit that value if it equates to
@@ -122,37 +122,37 @@
// field value is zero and a numeric type, the field is empty, and it won't // field value is zero and a numeric type, the field is empty, and it won't
// be encoded into the destination type. // be encoded into the destination type.
// //
// type Source struct { // type Source struct {
// Age int `mapstructure:",omitempty"` // Age int `mapstructure:",omitempty"`
// } // }
// //
// Unexported fields // # Unexported fields
// //
// Since unexported (private) struct fields cannot be set outside the package // Since unexported (private) struct fields cannot be set outside the package
// where they are defined, the decoder will simply skip them. // where they are defined, the decoder will simply skip them.
// //
// For this output type definition: // For this output type definition:
// //
// type Exported struct { // type Exported struct {
// private string // this unexported field will be skipped // private string // this unexported field will be skipped
// Public string // Public string
// } // }
// //
// Using this map as input: // Using this map as input:
// //
// map[string]interface{}{ // map[string]interface{}{
// "private": "I will be ignored", // "private": "I will be ignored",
// "Public": "I made it through!", // "Public": "I made it through!",
// } // }
// //
// The following struct will be decoded: // The following struct will be decoded:
// //
// type Exported struct { // type Exported struct {
// private: "" // field is left with an empty string (zero value) // private: "" // field is left with an empty string (zero value)
// Public: "I made it through!" // Public: "I made it through!"
// } // }
// //
// Other Configuration // # Other Configuration
// //
// mapstructure is highly configurable. See the DecoderConfig struct // mapstructure is highly configurable. See the DecoderConfig struct
// for other features and options that are supported. // for other features and options that are supported.
@@ -160,12 +160,13 @@ package mapstructure
import ( import (
"encoding/json" "encoding/json"
"errors"
"fmt" "fmt"
"reflect" "reflect"
"sort" "sort"
"strconv" "strconv"
"strings" "strings"
"github.com/go-viper/mapstructure/v2/internal/errors"
) )
// DecodeHookFunc is the callback function that can be used for // DecodeHookFunc is the callback function that can be used for
@@ -414,7 +415,15 @@ func NewDecoder(config *DecoderConfig) (*Decoder, error) {
// Decode decodes the given raw interface to the target pointer specified // Decode decodes the given raw interface to the target pointer specified
// by the configuration. // by the configuration.
func (d *Decoder) Decode(input interface{}) error { func (d *Decoder) Decode(input interface{}) error {
return d.decode("", input, reflect.ValueOf(d.config.Result).Elem()) err := d.decode("", input, reflect.ValueOf(d.config.Result).Elem())
// Retain some of the original behavior when multiple errors ocurr
var joinedErr interface{ Unwrap() []error }
if errors.As(err, &joinedErr) {
return fmt.Errorf("decoding failed due to the following error(s):\n\n%w", err)
}
return err
} }
// Decodes an unknown data type into a specific reflection value. // Decodes an unknown data type into a specific reflection value.
@@ -458,7 +467,7 @@ func (d *Decoder) decode(name string, input interface{}, outVal reflect.Value) e
var err error var err error
input, err = DecodeHookExec(d.config.DecodeHook, inputVal, outVal) input, err = DecodeHookExec(d.config.DecodeHook, inputVal, outVal)
if err != nil { if err != nil {
return fmt.Errorf("error decoding '%s': %s", name, err) return fmt.Errorf("error decoding '%s': %w", name, err)
} }
} }
@@ -478,6 +487,8 @@ func (d *Decoder) decode(name string, input interface{}, outVal reflect.Value) e
err = d.decodeUint(name, input, outVal) err = d.decodeUint(name, input, outVal)
case reflect.Float32: case reflect.Float32:
err = d.decodeFloat(name, input, outVal) err = d.decodeFloat(name, input, outVal)
case reflect.Complex64:
err = d.decodeComplex(name, input, outVal)
case reflect.Struct: case reflect.Struct:
err = d.decodeStruct(name, input, outVal) err = d.decodeStruct(name, input, outVal)
case reflect.Map: case reflect.Map:
@@ -796,6 +807,22 @@ func (d *Decoder) decodeFloat(name string, data interface{}, val reflect.Value)
return nil return nil
} }
func (d *Decoder) decodeComplex(name string, data interface{}, val reflect.Value) error {
dataVal := reflect.Indirect(reflect.ValueOf(data))
dataKind := getKind(dataVal)
switch {
case dataKind == reflect.Complex64:
val.SetComplex(dataVal.Complex())
default:
return fmt.Errorf(
"'%s' expected type '%s', got unconvertible type '%s', value: '%v'",
name, val.Type(), dataVal.Type(), data)
}
return nil
}
func (d *Decoder) decodeMap(name string, data interface{}, val reflect.Value) error { func (d *Decoder) decodeMap(name string, data interface{}, val reflect.Value) error {
valType := val.Type() valType := val.Type()
valKeyType := valType.Key() valKeyType := valType.Key()
@@ -811,8 +838,14 @@ func (d *Decoder) decodeMap(name string, data interface{}, val reflect.Value) er
valMap = reflect.MakeMap(mapType) valMap = reflect.MakeMap(mapType)
} }
dataVal := reflect.ValueOf(data)
// Resolve any levels of indirection
for dataVal.Kind() == reflect.Pointer {
dataVal = reflect.Indirect(dataVal)
}
// Check input type and based on the input type jump to the proper func // Check input type and based on the input type jump to the proper func
dataVal := reflect.Indirect(reflect.ValueOf(data))
switch dataVal.Kind() { switch dataVal.Kind() {
case reflect.Map: case reflect.Map:
return d.decodeMapFromMap(name, dataVal, val, valMap) return d.decodeMapFromMap(name, dataVal, val, valMap)
@@ -857,7 +890,7 @@ func (d *Decoder) decodeMapFromMap(name string, dataVal reflect.Value, val refle
valElemType := valType.Elem() valElemType := valType.Elem()
// Accumulate errors // Accumulate errors
errors := make([]string, 0) var errs []error
// If the input data is empty, then we just match what the input data is. // If the input data is empty, then we just match what the input data is.
if dataVal.Len() == 0 { if dataVal.Len() == 0 {
@@ -879,7 +912,7 @@ func (d *Decoder) decodeMapFromMap(name string, dataVal reflect.Value, val refle
// First decode the key into the proper type // First decode the key into the proper type
currentKey := reflect.Indirect(reflect.New(valKeyType)) currentKey := reflect.Indirect(reflect.New(valKeyType))
if err := d.decode(fieldName, k.Interface(), currentKey); err != nil { if err := d.decode(fieldName, k.Interface(), currentKey); err != nil {
errors = appendErrors(errors, err) errs = append(errs, err)
continue continue
} }
@@ -887,7 +920,7 @@ func (d *Decoder) decodeMapFromMap(name string, dataVal reflect.Value, val refle
v := dataVal.MapIndex(k).Interface() v := dataVal.MapIndex(k).Interface()
currentVal := reflect.Indirect(reflect.New(valElemType)) currentVal := reflect.Indirect(reflect.New(valElemType))
if err := d.decode(fieldName, v, currentVal); err != nil { if err := d.decode(fieldName, v, currentVal); err != nil {
errors = appendErrors(errors, err) errs = append(errs, err)
continue continue
} }
@@ -897,12 +930,7 @@ func (d *Decoder) decodeMapFromMap(name string, dataVal reflect.Value, val refle
// Set the built up map to the value // Set the built up map to the value
val.Set(valMap) val.Set(valMap)
// If we had errors, return those return errors.Join(errs...)
if len(errors) > 0 {
return &Error{errors}
}
return nil
} }
func (d *Decoder) decodeMapFromStruct(name string, dataVal reflect.Value, val reflect.Value, valMap reflect.Value) error { func (d *Decoder) decodeMapFromStruct(name string, dataVal reflect.Value, val reflect.Value, valMap reflect.Value) error {
@@ -956,6 +984,18 @@ func (d *Decoder) decodeMapFromStruct(name string, dataVal reflect.Value, val re
if v.Kind() != reflect.Struct { if v.Kind() != reflect.Struct {
return fmt.Errorf("cannot squash non-struct type '%s'", v.Type()) return fmt.Errorf("cannot squash non-struct type '%s'", v.Type())
} }
} else {
if strings.Index(tagValue[index+1:], "remain") != -1 {
if v.Kind() != reflect.Map {
return fmt.Errorf("error remain-tag field with invalid type: '%s'", v.Type())
}
ptr := v.MapRange()
for ptr.Next() {
valMap.SetMapIndex(ptr.Key(), ptr.Value())
}
continue
}
} }
if keyNameTagValue := tagValue[:index]; keyNameTagValue != "" { if keyNameTagValue := tagValue[:index]; keyNameTagValue != "" {
keyName = keyNameTagValue keyName = keyNameTagValue
@@ -1123,10 +1163,12 @@ func (d *Decoder) decodeSlice(name string, data interface{}, val reflect.Value)
if valSlice.IsNil() || d.config.ZeroFields { if valSlice.IsNil() || d.config.ZeroFields {
// Make a new slice to hold our result, same size as the original data. // Make a new slice to hold our result, same size as the original data.
valSlice = reflect.MakeSlice(sliceType, dataVal.Len(), dataVal.Len()) valSlice = reflect.MakeSlice(sliceType, dataVal.Len(), dataVal.Len())
} else if valSlice.Len() > dataVal.Len() {
valSlice = valSlice.Slice(0, dataVal.Len())
} }
// Accumulate any errors // Accumulate any errors
errors := make([]string, 0) var errs []error
for i := 0; i < dataVal.Len(); i++ { for i := 0; i < dataVal.Len(); i++ {
currentData := dataVal.Index(i).Interface() currentData := dataVal.Index(i).Interface()
@@ -1137,19 +1179,14 @@ func (d *Decoder) decodeSlice(name string, data interface{}, val reflect.Value)
fieldName := name + "[" + strconv.Itoa(i) + "]" fieldName := name + "[" + strconv.Itoa(i) + "]"
if err := d.decode(fieldName, currentData, currentField); err != nil { if err := d.decode(fieldName, currentData, currentField); err != nil {
errors = appendErrors(errors, err) errs = append(errs, err)
} }
} }
// Finally, set the value to the slice we built up // Finally, set the value to the slice we built up
val.Set(valSlice) val.Set(valSlice)
// If there were errors, we return those return errors.Join(errs...)
if len(errors) > 0 {
return &Error{errors}
}
return nil
} }
func (d *Decoder) decodeArray(name string, data interface{}, val reflect.Value) error { func (d *Decoder) decodeArray(name string, data interface{}, val reflect.Value) error {
@@ -1161,7 +1198,7 @@ func (d *Decoder) decodeArray(name string, data interface{}, val reflect.Value)
valArray := val valArray := val
if valArray.Interface() == reflect.Zero(valArray.Type()).Interface() || d.config.ZeroFields { if isComparable(valArray) && valArray.Interface() == reflect.Zero(valArray.Type()).Interface() || d.config.ZeroFields {
// Check input type // Check input type
if dataValKind != reflect.Array && dataValKind != reflect.Slice { if dataValKind != reflect.Array && dataValKind != reflect.Slice {
if d.config.WeaklyTypedInput { if d.config.WeaklyTypedInput {
@@ -1188,7 +1225,6 @@ func (d *Decoder) decodeArray(name string, data interface{}, val reflect.Value)
if dataVal.Len() > arrayType.Len() { if dataVal.Len() > arrayType.Len() {
return fmt.Errorf( return fmt.Errorf(
"'%s': expected source data to have length less or equal to %d, got %d", name, arrayType.Len(), dataVal.Len()) "'%s': expected source data to have length less or equal to %d, got %d", name, arrayType.Len(), dataVal.Len())
} }
// Make a new array to hold our result, same size as the original data. // Make a new array to hold our result, same size as the original data.
@@ -1196,7 +1232,7 @@ func (d *Decoder) decodeArray(name string, data interface{}, val reflect.Value)
} }
// Accumulate any errors // Accumulate any errors
errors := make([]string, 0) var errs []error
for i := 0; i < dataVal.Len(); i++ { for i := 0; i < dataVal.Len(); i++ {
currentData := dataVal.Index(i).Interface() currentData := dataVal.Index(i).Interface()
@@ -1204,19 +1240,14 @@ func (d *Decoder) decodeArray(name string, data interface{}, val reflect.Value)
fieldName := name + "[" + strconv.Itoa(i) + "]" fieldName := name + "[" + strconv.Itoa(i) + "]"
if err := d.decode(fieldName, currentData, currentField); err != nil { if err := d.decode(fieldName, currentData, currentField); err != nil {
errors = appendErrors(errors, err) errs = append(errs, err)
} }
} }
// Finally, set the value to the array we built up // Finally, set the value to the array we built up
val.Set(valArray) val.Set(valArray)
// If there were errors, we return those return errors.Join(errs...)
if len(errors) > 0 {
return &Error{errors}
}
return nil
} }
func (d *Decoder) decodeStruct(name string, data interface{}, val reflect.Value) error { func (d *Decoder) decodeStruct(name string, data interface{}, val reflect.Value) error {
@@ -1278,7 +1309,8 @@ func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) e
} }
targetValKeysUnused := make(map[interface{}]struct{}) targetValKeysUnused := make(map[interface{}]struct{})
errors := make([]string, 0)
var errs []error
// This slice will keep track of all the structs we'll be decoding. // This slice will keep track of all the structs we'll be decoding.
// There can be more than one struct if there are embedded structs // There can be more than one struct if there are embedded structs
@@ -1332,8 +1364,7 @@ func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) e
if squash { if squash {
if fieldVal.Kind() != reflect.Struct { if fieldVal.Kind() != reflect.Struct {
errors = appendErrors(errors, errs = append(errs, fmt.Errorf("%s: unsupported type for squash: %s", fieldType.Name, fieldVal.Kind()))
fmt.Errorf("%s: unsupported type for squash: %s", fieldType.Name, fieldVal.Kind()))
} else { } else {
structs = append(structs, fieldVal) structs = append(structs, fieldVal)
} }
@@ -1356,6 +1387,9 @@ func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) e
fieldName := field.Name fieldName := field.Name
tagValue := field.Tag.Get(d.config.TagName) tagValue := field.Tag.Get(d.config.TagName)
if tagValue == "" && d.config.IgnoreUntaggedFields {
continue
}
tagValue = strings.SplitN(tagValue, ",", 2)[0] tagValue = strings.SplitN(tagValue, ",", 2)[0]
if tagValue != "" { if tagValue != "" {
fieldName = tagValue fieldName = tagValue
@@ -1409,7 +1443,7 @@ func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) e
} }
if err := d.decode(fieldName, rawMapVal.Interface(), fieldValue); err != nil { if err := d.decode(fieldName, rawMapVal.Interface(), fieldValue); err != nil {
errors = appendErrors(errors, err) errs = append(errs, err)
} }
} }
@@ -1424,7 +1458,7 @@ func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) e
// Decode it as-if we were just decoding this map onto our map. // Decode it as-if we were just decoding this map onto our map.
if err := d.decodeMap(name, remain, remainField.val); err != nil { if err := d.decodeMap(name, remain, remainField.val); err != nil {
errors = appendErrors(errors, err) errs = append(errs, err)
} }
// Set the map to nil so we have none so that the next check will // Set the map to nil so we have none so that the next check will
@@ -1440,7 +1474,7 @@ func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) e
sort.Strings(keys) sort.Strings(keys)
err := fmt.Errorf("'%s' has invalid keys: %s", name, strings.Join(keys, ", ")) err := fmt.Errorf("'%s' has invalid keys: %s", name, strings.Join(keys, ", "))
errors = appendErrors(errors, err) errs = append(errs, err)
} }
if d.config.ErrorUnset && len(targetValKeysUnused) > 0 { if d.config.ErrorUnset && len(targetValKeysUnused) > 0 {
@@ -1451,11 +1485,11 @@ func (d *Decoder) decodeStructFromMap(name string, dataVal, val reflect.Value) e
sort.Strings(keys) sort.Strings(keys)
err := fmt.Errorf("'%s' has unset fields: %s", name, strings.Join(keys, ", ")) err := fmt.Errorf("'%s' has unset fields: %s", name, strings.Join(keys, ", "))
errors = appendErrors(errors, err) errs = append(errs, err)
} }
if len(errors) > 0 { if err := errors.Join(errs...); err != nil {
return &Error{errors} return err
} }
// Add the unused keys to the list of unused keys if we're tracking metadata // Add the unused keys to the list of unused keys if we're tracking metadata
@@ -1509,6 +1543,8 @@ func getKind(val reflect.Value) reflect.Kind {
return reflect.Uint return reflect.Uint
case kind >= reflect.Float32 && kind <= reflect.Float64: case kind >= reflect.Float32 && kind <= reflect.Float64:
return reflect.Float32 return reflect.Float32
case kind >= reflect.Complex64 && kind <= reflect.Complex128:
return reflect.Complex64
default: default:
return kind return kind
} }

View File

@@ -0,0 +1,44 @@
//go:build !go1.20
package mapstructure
import "reflect"
func isComparable(v reflect.Value) bool {
k := v.Kind()
switch k {
case reflect.Invalid:
return false
case reflect.Array:
switch v.Type().Elem().Kind() {
case reflect.Interface, reflect.Array, reflect.Struct:
for i := 0; i < v.Type().Len(); i++ {
// if !v.Index(i).Comparable() {
if !isComparable(v.Index(i)) {
return false
}
}
return true
}
return v.Type().Comparable()
case reflect.Interface:
// return v.Elem().Comparable()
return isComparable(v.Elem())
case reflect.Struct:
for i := 0; i < v.NumField(); i++ {
return false
// if !v.Field(i).Comparable() {
if !isComparable(v.Field(i)) {
return false
}
}
return true
default:
return v.Type().Comparable()
}
}

View File

@@ -0,0 +1,10 @@
//go:build go1.20
package mapstructure
import "reflect"
// TODO: remove once we drop support for Go <1.20
func isComparable(v reflect.Value) bool {
return v.Comparable()
}

View File

@@ -1,46 +0,0 @@
# mapstructure [![Godoc](https://godoc.org/github.com/mitchellh/mapstructure?status.svg)](https://godoc.org/github.com/mitchellh/mapstructure)
mapstructure is a Go library for decoding generic map values to structures
and vice versa, while providing helpful error handling.
This library is most useful when decoding values from some data stream (JSON,
Gob, etc.) where you don't _quite_ know the structure of the underlying data
until you read a part of it. You can therefore read a `map[string]interface{}`
and use this library to decode it into the proper underlying native Go
structure.
## Installation
Standard `go get`:
```
$ go get github.com/mitchellh/mapstructure
```
## Usage & Example
For usage and examples see the [Godoc](http://godoc.org/github.com/mitchellh/mapstructure).
The `Decode` function has examples associated with it there.
## But Why?!
Go offers fantastic standard libraries for decoding formats such as JSON.
The standard method is to have a struct pre-created, and populate that struct
from the bytes of the encoded format. This is great, but the problem is if
you have configuration or an encoding that changes slightly depending on
specific fields. For example, consider this JSON:
```json
{
"type": "person",
"name": "Mitchell"
}
```
Perhaps we can't populate a specific structure without first reading
the "type" field from the JSON. We could always do two passes over the
decoding of the JSON (reading the "type" first, and the rest later).
However, it is much simpler to just decode this into a `map[string]interface{}`
structure, read the "type" key, then use something like this library
to decode it into the proper structure.

View File

@@ -1,279 +0,0 @@
package mapstructure
import (
"encoding"
"errors"
"fmt"
"net"
"reflect"
"strconv"
"strings"
"time"
)
// typedDecodeHook takes a raw DecodeHookFunc (an interface{}) and turns
// it into the proper DecodeHookFunc type, such as DecodeHookFuncType.
func typedDecodeHook(h DecodeHookFunc) DecodeHookFunc {
// Create variables here so we can reference them with the reflect pkg
var f1 DecodeHookFuncType
var f2 DecodeHookFuncKind
var f3 DecodeHookFuncValue
// Fill in the variables into this interface and the rest is done
// automatically using the reflect package.
potential := []interface{}{f1, f2, f3}
v := reflect.ValueOf(h)
vt := v.Type()
for _, raw := range potential {
pt := reflect.ValueOf(raw).Type()
if vt.ConvertibleTo(pt) {
return v.Convert(pt).Interface()
}
}
return nil
}
// DecodeHookExec executes the given decode hook. This should be used
// since it'll naturally degrade to the older backwards compatible DecodeHookFunc
// that took reflect.Kind instead of reflect.Type.
func DecodeHookExec(
raw DecodeHookFunc,
from reflect.Value, to reflect.Value) (interface{}, error) {
switch f := typedDecodeHook(raw).(type) {
case DecodeHookFuncType:
return f(from.Type(), to.Type(), from.Interface())
case DecodeHookFuncKind:
return f(from.Kind(), to.Kind(), from.Interface())
case DecodeHookFuncValue:
return f(from, to)
default:
return nil, errors.New("invalid decode hook signature")
}
}
// ComposeDecodeHookFunc creates a single DecodeHookFunc that
// automatically composes multiple DecodeHookFuncs.
//
// The composed funcs are called in order, with the result of the
// previous transformation.
func ComposeDecodeHookFunc(fs ...DecodeHookFunc) DecodeHookFunc {
return func(f reflect.Value, t reflect.Value) (interface{}, error) {
var err error
data := f.Interface()
newFrom := f
for _, f1 := range fs {
data, err = DecodeHookExec(f1, newFrom, t)
if err != nil {
return nil, err
}
newFrom = reflect.ValueOf(data)
}
return data, nil
}
}
// OrComposeDecodeHookFunc executes all input hook functions until one of them returns no error. In that case its value is returned.
// If all hooks return an error, OrComposeDecodeHookFunc returns an error concatenating all error messages.
func OrComposeDecodeHookFunc(ff ...DecodeHookFunc) DecodeHookFunc {
return func(a, b reflect.Value) (interface{}, error) {
var allErrs string
var out interface{}
var err error
for _, f := range ff {
out, err = DecodeHookExec(f, a, b)
if err != nil {
allErrs += err.Error() + "\n"
continue
}
return out, nil
}
return nil, errors.New(allErrs)
}
}
// StringToSliceHookFunc returns a DecodeHookFunc that converts
// string to []string by splitting on the given sep.
func StringToSliceHookFunc(sep string) DecodeHookFunc {
return func(
f reflect.Kind,
t reflect.Kind,
data interface{}) (interface{}, error) {
if f != reflect.String || t != reflect.Slice {
return data, nil
}
raw := data.(string)
if raw == "" {
return []string{}, nil
}
return strings.Split(raw, sep), nil
}
}
// StringToTimeDurationHookFunc returns a DecodeHookFunc that converts
// strings to time.Duration.
func StringToTimeDurationHookFunc() DecodeHookFunc {
return func(
f reflect.Type,
t reflect.Type,
data interface{}) (interface{}, error) {
if f.Kind() != reflect.String {
return data, nil
}
if t != reflect.TypeOf(time.Duration(5)) {
return data, nil
}
// Convert it by parsing
return time.ParseDuration(data.(string))
}
}
// StringToIPHookFunc returns a DecodeHookFunc that converts
// strings to net.IP
func StringToIPHookFunc() DecodeHookFunc {
return func(
f reflect.Type,
t reflect.Type,
data interface{}) (interface{}, error) {
if f.Kind() != reflect.String {
return data, nil
}
if t != reflect.TypeOf(net.IP{}) {
return data, nil
}
// Convert it by parsing
ip := net.ParseIP(data.(string))
if ip == nil {
return net.IP{}, fmt.Errorf("failed parsing ip %v", data)
}
return ip, nil
}
}
// StringToIPNetHookFunc returns a DecodeHookFunc that converts
// strings to net.IPNet
func StringToIPNetHookFunc() DecodeHookFunc {
return func(
f reflect.Type,
t reflect.Type,
data interface{}) (interface{}, error) {
if f.Kind() != reflect.String {
return data, nil
}
if t != reflect.TypeOf(net.IPNet{}) {
return data, nil
}
// Convert it by parsing
_, net, err := net.ParseCIDR(data.(string))
return net, err
}
}
// StringToTimeHookFunc returns a DecodeHookFunc that converts
// strings to time.Time.
func StringToTimeHookFunc(layout string) DecodeHookFunc {
return func(
f reflect.Type,
t reflect.Type,
data interface{}) (interface{}, error) {
if f.Kind() != reflect.String {
return data, nil
}
if t != reflect.TypeOf(time.Time{}) {
return data, nil
}
// Convert it by parsing
return time.Parse(layout, data.(string))
}
}
// WeaklyTypedHook is a DecodeHookFunc which adds support for weak typing to
// the decoder.
//
// Note that this is significantly different from the WeaklyTypedInput option
// of the DecoderConfig.
func WeaklyTypedHook(
f reflect.Kind,
t reflect.Kind,
data interface{}) (interface{}, error) {
dataVal := reflect.ValueOf(data)
switch t {
case reflect.String:
switch f {
case reflect.Bool:
if dataVal.Bool() {
return "1", nil
}
return "0", nil
case reflect.Float32:
return strconv.FormatFloat(dataVal.Float(), 'f', -1, 64), nil
case reflect.Int:
return strconv.FormatInt(dataVal.Int(), 10), nil
case reflect.Slice:
dataType := dataVal.Type()
elemKind := dataType.Elem().Kind()
if elemKind == reflect.Uint8 {
return string(dataVal.Interface().([]uint8)), nil
}
case reflect.Uint:
return strconv.FormatUint(dataVal.Uint(), 10), nil
}
}
return data, nil
}
func RecursiveStructToMapHookFunc() DecodeHookFunc {
return func(f reflect.Value, t reflect.Value) (interface{}, error) {
if f.Kind() != reflect.Struct {
return f.Interface(), nil
}
var i interface{} = struct{}{}
if t.Type() != reflect.TypeOf(&i).Elem() {
return f.Interface(), nil
}
m := make(map[string]interface{})
t.Set(reflect.ValueOf(m))
return f.Interface(), nil
}
}
// TextUnmarshallerHookFunc returns a DecodeHookFunc that applies
// strings to the UnmarshalText function, when the target type
// implements the encoding.TextUnmarshaler interface
func TextUnmarshallerHookFunc() DecodeHookFuncType {
return func(
f reflect.Type,
t reflect.Type,
data interface{}) (interface{}, error) {
if f.Kind() != reflect.String {
return data, nil
}
result := reflect.New(t).Interface()
unmarshaller, ok := result.(encoding.TextUnmarshaler)
if !ok {
return data, nil
}
if err := unmarshaller.UnmarshalText([]byte(data.(string))); err != nil {
return nil, err
}
return result, nil
}
}

View File

@@ -1,50 +0,0 @@
package mapstructure
import (
"errors"
"fmt"
"sort"
"strings"
)
// Error implements the error interface and can represents multiple
// errors that occur in the course of a single decode.
type Error struct {
Errors []string
}
func (e *Error) Error() string {
points := make([]string, len(e.Errors))
for i, err := range e.Errors {
points[i] = fmt.Sprintf("* %s", err)
}
sort.Strings(points)
return fmt.Sprintf(
"%d error(s) decoding:\n\n%s",
len(e.Errors), strings.Join(points, "\n"))
}
// WrappedErrors implements the errwrap.Wrapper interface to make this
// return value more useful with the errwrap and go-multierror libraries.
func (e *Error) WrappedErrors() []error {
if e == nil {
return nil
}
result := make([]error, len(e.Errors))
for i, e := range e.Errors {
result[i] = errors.New(e)
}
return result
}
func appendErrors(errors []string, err error) []string {
switch e := err.(type) {
case *Error:
return append(errors, e.Errors...)
default:
return append(errors, e.Error())
}
}

View File

@@ -338,7 +338,7 @@ func Parse(rwc io.Reader) (*Result, error) {
warnings = append(warnings, Warning{ warnings = append(warnings, Warning{
Short: "Empty continuation line found in: " + line, Short: "Empty continuation line found in: " + line,
Detail: [][]byte{[]byte("Empty continuation lines will become errors in a future release")}, Detail: [][]byte{[]byte("Empty continuation lines will become errors in a future release")},
URL: "https://github.com/moby/moby/pull/33719", URL: "https://docs.docker.com/go/dockerfile/rule/no-empty-continuations/",
Location: &Range{Start: Position{Line: currentLine}, End: Position{Line: currentLine}}, Location: &Range{Start: Position{Line: currentLine}, End: Position{Line: currentLine}},
}) })
} }

View File

@@ -169,10 +169,12 @@ func PrintLintViolations(dt []byte, w io.Writer) error {
}) })
for _, warning := range results.Warnings { for _, warning := range results.Warnings {
fmt.Fprintf(w, "\n- %s\n%s\n", warning.Detail, warning.Description) fmt.Fprintf(w, "%s", warning.RuleName)
if warning.URL != "" { if warning.URL != "" {
fmt.Fprintf(w, "URL: %s\n", warning.URL) fmt.Fprintf(w, " - %s", warning.URL)
} }
fmt.Fprintf(w, "\n%s\n", warning.Description)
if warning.Location.SourceIndex < 0 { if warning.Location.SourceIndex < 0 {
continue continue
} }
@@ -185,6 +187,7 @@ func PrintLintViolations(dt []byte, w io.Writer) error {
if err != nil { if err != nil {
return err return err
} }
fmt.Fprintln(w)
} }
return nil return nil
} }

View File

@@ -5,8 +5,8 @@ import (
api "github.com/containerd/containerd/api/services/content/v1" api "github.com/containerd/containerd/api/services/content/v1"
"github.com/containerd/containerd/content" "github.com/containerd/containerd/content"
"github.com/containerd/containerd/errdefs"
"github.com/containerd/containerd/services/content/contentserver" "github.com/containerd/containerd/services/content/contentserver"
cerrdefs "github.com/containerd/errdefs"
"github.com/moby/buildkit/session" "github.com/moby/buildkit/session"
digest "github.com/opencontainers/go-digest" digest "github.com/opencontainers/go-digest"
ocispecs "github.com/opencontainers/image-spec/specs-go/v1" ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
@@ -25,17 +25,17 @@ type attachableContentStore struct {
func (cs *attachableContentStore) choose(ctx context.Context) (content.Store, error) { func (cs *attachableContentStore) choose(ctx context.Context) (content.Store, error) {
md, ok := metadata.FromIncomingContext(ctx) md, ok := metadata.FromIncomingContext(ctx)
if !ok { if !ok {
return nil, errors.Wrap(errdefs.ErrInvalidArgument, "request lacks metadata") return nil, errors.Wrap(cerrdefs.ErrInvalidArgument, "request lacks metadata")
} }
values := md[GRPCHeaderID] values := md[GRPCHeaderID]
if len(values) == 0 { if len(values) == 0 {
return nil, errors.Wrapf(errdefs.ErrInvalidArgument, "request lacks metadata %q", GRPCHeaderID) return nil, errors.Wrapf(cerrdefs.ErrInvalidArgument, "request lacks metadata %q", GRPCHeaderID)
} }
id := values[0] id := values[0]
store, ok := cs.stores[id] store, ok := cs.stores[id]
if !ok { if !ok {
return nil, errors.Wrapf(errdefs.ErrNotFound, "unknown store %s", id) return nil, errors.Wrapf(cerrdefs.ErrNotFound, "unknown store %s", id)
} }
return store, nil return store, nil
} }

View File

@@ -9,7 +9,7 @@ import (
"time" "time"
"github.com/containerd/containerd/content" "github.com/containerd/containerd/content"
"github.com/containerd/containerd/errdefs" cerrdefs "github.com/containerd/errdefs"
digest "github.com/opencontainers/go-digest" digest "github.com/opencontainers/go-digest"
ocispecs "github.com/opencontainers/image-spec/specs-go/v1" ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors" "github.com/pkg/errors"
@@ -43,7 +43,7 @@ func (b *buffer) Info(ctx context.Context, dgst digest.Digest) (content.Info, er
v, ok := b.infos[dgst] v, ok := b.infos[dgst]
b.mu.Unlock() b.mu.Unlock()
if !ok { if !ok {
return content.Info{}, errdefs.ErrNotFound return content.Info{}, cerrdefs.ErrNotFound
} }
return v, nil return v, nil
} }
@@ -54,7 +54,7 @@ func (b *buffer) Update(ctx context.Context, new content.Info, fieldpaths ...str
updated, ok := b.infos[new.Digest] updated, ok := b.infos[new.Digest]
if !ok { if !ok {
return content.Info{}, errdefs.ErrNotFound return content.Info{}, cerrdefs.ErrNotFound
} }
if len(fieldpaths) == 0 { if len(fieldpaths) == 0 {
@@ -96,7 +96,7 @@ func (b *buffer) Writer(ctx context.Context, opts ...content.WriterOpt) (content
} }
b.mu.Lock() b.mu.Lock()
if _, ok := b.refs[wOpts.Ref]; ok { if _, ok := b.refs[wOpts.Ref]; ok {
return nil, errors.Wrapf(errdefs.ErrUnavailable, "ref %s locked", wOpts.Ref) return nil, errors.Wrapf(cerrdefs.ErrUnavailable, "ref %s locked", wOpts.Ref)
} }
b.mu.Unlock() b.mu.Unlock()
return &bufferedWriter{ return &bufferedWriter{
@@ -128,7 +128,7 @@ func (b *buffer) getBytesReader(dgst digest.Digest) (*bytes.Reader, error) {
return bytes.NewReader(dt), nil return bytes.NewReader(dt), nil
} }
return nil, errors.Wrapf(errdefs.ErrNotFound, "content %v", dgst) return nil, errors.Wrapf(cerrdefs.ErrNotFound, "content %v", dgst)
} }
func (b *buffer) addValue(k digest.Digest, dt []byte) { func (b *buffer) addValue(k digest.Digest, dt []byte) {

View File

@@ -5,7 +5,7 @@ import (
"sync" "sync"
"github.com/containerd/containerd/content" "github.com/containerd/containerd/content"
"github.com/containerd/containerd/errdefs" cerrdefs "github.com/containerd/errdefs"
"github.com/moby/buildkit/session" "github.com/moby/buildkit/session"
digest "github.com/opencontainers/go-digest" digest "github.com/opencontainers/go-digest"
ocispecs "github.com/opencontainers/image-spec/specs-go/v1" ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
@@ -60,7 +60,7 @@ func (mp *MultiProvider) ReaderAt(ctx context.Context, desc ocispecs.Descriptor)
} }
mp.mu.RUnlock() mp.mu.RUnlock()
if mp.base == nil { if mp.base == nil {
return nil, errors.Wrapf(errdefs.ErrNotFound, "content %v", desc.Digest) return nil, errors.Wrapf(cerrdefs.ErrNotFound, "content %v", desc.Digest)
} }
return mp.base.ReaderAt(ctx, desc) return mp.base.ReaderAt(ctx, desc)
} }
@@ -74,7 +74,7 @@ func (mp *MultiProvider) Info(ctx context.Context, dgst digest.Digest) (content.
} }
mp.mu.RUnlock() mp.mu.RUnlock()
if mp.base == nil { if mp.base == nil {
return content.Info{}, errors.Wrapf(errdefs.ErrNotFound, "content %v", dgst) return content.Info{}, errors.Wrapf(cerrdefs.ErrNotFound, "content %v", dgst)
} }
return mp.base.Info(ctx, dgst) return mp.base.Info(ctx, dgst)
} }

View File

@@ -7,8 +7,8 @@ import (
"time" "time"
"github.com/containerd/containerd/content" "github.com/containerd/containerd/content"
"github.com/containerd/containerd/errdefs"
"github.com/containerd/containerd/remotes" "github.com/containerd/containerd/remotes"
cerrdefs "github.com/containerd/errdefs"
digest "github.com/opencontainers/go-digest" digest "github.com/opencontainers/go-digest"
"github.com/pkg/errors" "github.com/pkg/errors"
) )
@@ -41,7 +41,7 @@ func (i *pushingIngester) Writer(ctx context.Context, opts ...content.WriterOpt)
} }
} }
if wOpts.Ref == "" { if wOpts.Ref == "" {
return nil, errors.Wrap(errdefs.ErrInvalidArgument, "ref must not be empty") return nil, errors.Wrap(cerrdefs.ErrInvalidArgument, "ref must not be empty")
} }
st := time.Now() st := time.Now()
@@ -50,7 +50,7 @@ func (i *pushingIngester) Writer(ctx context.Context, opts ...content.WriterOpt)
for { for {
if time.Since(st) > time.Hour { if time.Since(st) > time.Hour {
i.mu.Unlock() i.mu.Unlock()
return nil, errors.Wrapf(errdefs.ErrUnavailable, "ref %v locked", wOpts.Desc.Digest) return nil, errors.Wrapf(cerrdefs.ErrUnavailable, "ref %v locked", wOpts.Desc.Digest)
} }
if _, ok := i.active[wOpts.Desc.Digest]; ok { if _, ok := i.active[wOpts.Desc.Digest]; ok {
i.c.Wait() i.c.Wait()

View File

@@ -6,9 +6,9 @@ import (
"sync" "sync"
"github.com/containerd/containerd/content" "github.com/containerd/containerd/content"
"github.com/containerd/containerd/errdefs"
"github.com/containerd/containerd/remotes" "github.com/containerd/containerd/remotes"
"github.com/containerd/containerd/remotes/docker" "github.com/containerd/containerd/remotes/docker"
cerrdefs "github.com/containerd/errdefs"
"github.com/moby/buildkit/version" "github.com/moby/buildkit/version"
"github.com/moby/locker" "github.com/moby/locker"
digest "github.com/opencontainers/go-digest" digest "github.com/opencontainers/go-digest"
@@ -70,7 +70,7 @@ func (w *ingester) Writer(ctx context.Context, opts ...content.WriterOpt) (conte
} }
} }
if wo.Ref == "" { if wo.Ref == "" {
return nil, errors.Wrap(errdefs.ErrInvalidArgument, "ref must not be empty") return nil, errors.Wrap(cerrdefs.ErrInvalidArgument, "ref must not be empty")
} }
w.locker.Lock(wo.Ref) w.locker.Lock(wo.Ref)
var once sync.Once var once sync.Once

View File

@@ -4,7 +4,7 @@ import (
"net/url" "net/url"
"strings" "strings"
"github.com/containerd/containerd/errdefs" cerrdefs "github.com/containerd/errdefs"
"github.com/pkg/errors" "github.com/pkg/errors"
) )
@@ -58,7 +58,7 @@ func ParseGitRef(ref string) (*GitRef, error) {
) )
if strings.HasPrefix(ref, "./") || strings.HasPrefix(ref, "../") { if strings.HasPrefix(ref, "./") || strings.HasPrefix(ref, "../") {
return nil, errdefs.ErrInvalidArgument return nil, cerrdefs.ErrInvalidArgument
} else if strings.HasPrefix(ref, "github.com/") { } else if strings.HasPrefix(ref, "github.com/") {
res.IndistinguishableFromLocal = true // Deprecated res.IndistinguishableFromLocal = true // Deprecated
remote = fromURL(&url.URL{ remote = fromURL(&url.URL{
@@ -84,7 +84,7 @@ func ParseGitRef(ref string) (*GitRef, error) {
// An HTTP(S) URL is considered to be a valid git ref only when it has the ".git[...]" suffix. // An HTTP(S) URL is considered to be a valid git ref only when it has the ".git[...]" suffix.
case HTTPProtocol, HTTPSProtocol: case HTTPProtocol, HTTPSProtocol:
if !strings.HasSuffix(remote.Path, ".git") { if !strings.HasSuffix(remote.Path, ".git") {
return nil, errdefs.ErrInvalidArgument return nil, cerrdefs.ErrInvalidArgument
} }
} }
} }

View File

@@ -46,22 +46,23 @@ func Helper() {
func Traces(err error) []*Stack { func Traces(err error) []*Stack {
var st []*Stack var st []*Stack
wrapped, ok := err.(interface { switch e := err.(type) {
Unwrap() error case interface{ Unwrap() error }:
}) st = Traces(e.Unwrap())
if ok { case interface{ Unwrap() []error }:
st = Traces(wrapped.Unwrap()) for _, ue := range e.Unwrap() {
st = Traces(ue)
// Only take first stack
if len(st) > 0 {
break
}
}
} }
if ste, ok := err.(interface { switch ste := err.(type) {
StackTrace() errors.StackTrace case interface{ StackTrace() errors.StackTrace }:
}); ok {
st = append(st, convertStack(ste.StackTrace())) st = append(st, convertStack(ste.StackTrace()))
} case interface{ StackTrace() *Stack }:
if ste, ok := err.(interface {
StackTrace() *Stack
}); ok {
st = append(st, ste.StackTrace()) st = append(st, ste.StackTrace())
} }

View File

@@ -6,7 +6,6 @@ import (
"net/http" "net/http"
"net/http/httptrace" "net/http/httptrace"
"github.com/moby/buildkit/util/bklog"
"go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace" "go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace"
"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
"go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/attribute"
@@ -15,6 +14,11 @@ import (
semconv "go.opentelemetry.io/otel/semconv/v1.17.0" semconv "go.opentelemetry.io/otel/semconv/v1.17.0"
"go.opentelemetry.io/otel/trace" "go.opentelemetry.io/otel/trace"
"go.opentelemetry.io/otel/trace/noop" "go.opentelemetry.io/otel/trace/noop"
"github.com/pkg/errors"
"github.com/moby/buildkit/util/bklog"
"github.com/moby/buildkit/util/stack"
) )
// StartSpan starts a new span as a child of the span in context. // StartSpan starts a new span as a child of the span in context.
@@ -30,14 +34,30 @@ func StartSpan(ctx context.Context, operationName string, opts ...trace.SpanStar
return span, ctx return span, ctx
} }
func hasStacktrace(err error) bool {
switch e := err.(type) {
case interface{ StackTrace() *stack.Stack }:
return true
case interface{ StackTrace() errors.StackTrace }:
return true
case interface{ Unwrap() error }:
return hasStacktrace(e.Unwrap())
case interface{ Unwrap() []error }:
for _, ue := range e.Unwrap() {
if hasStacktrace(ue) {
return true
}
}
}
return false
}
// FinishWithError finalizes the span and sets the error if one is passed // FinishWithError finalizes the span and sets the error if one is passed
func FinishWithError(span trace.Span, err error) { func FinishWithError(span trace.Span, err error) {
if err != nil { if err != nil {
span.RecordError(err) span.RecordError(err)
if _, ok := err.(interface { if hasStacktrace(err) {
Cause() error span.SetAttributes(attribute.String(string(semconv.ExceptionStacktraceKey), fmt.Sprintf("%+v", stack.Formatter(err))))
}); ok {
span.SetAttributes(attribute.String(string(semconv.ExceptionStacktraceKey), fmt.Sprintf("%+v", err)))
} }
span.SetStatus(codes.Error, err.Error()) span.SetStatus(codes.Error, err.Error())
} }

21
vendor/modules.txt vendored
View File

@@ -131,7 +131,7 @@ github.com/cenkalti/backoff/v4
# github.com/cespare/xxhash/v2 v2.2.0 # github.com/cespare/xxhash/v2 v2.2.0
## explicit; go 1.11 ## explicit; go 1.11
github.com/cespare/xxhash/v2 github.com/cespare/xxhash/v2
# github.com/compose-spec/compose-go/v2 v2.1.1 # github.com/compose-spec/compose-go/v2 v2.1.2
## explicit; go 1.21 ## explicit; go 1.21
github.com/compose-spec/compose-go/v2/cli github.com/compose-spec/compose-go/v2/cli
github.com/compose-spec/compose-go/v2/consts github.com/compose-spec/compose-go/v2/consts
@@ -153,7 +153,7 @@ github.com/compose-spec/compose-go/v2/validation
# github.com/containerd/console v1.0.4 # github.com/containerd/console v1.0.4
## explicit; go 1.13 ## explicit; go 1.13
github.com/containerd/console github.com/containerd/console
# github.com/containerd/containerd v1.7.17 # github.com/containerd/containerd v1.7.18
## explicit; go 1.21 ## explicit; go 1.21
github.com/containerd/containerd/api/services/content/v1 github.com/containerd/containerd/api/services/content/v1
github.com/containerd/containerd/archive/compression github.com/containerd/containerd/archive/compression
@@ -168,7 +168,6 @@ github.com/containerd/containerd/images
github.com/containerd/containerd/images/archive github.com/containerd/containerd/images/archive
github.com/containerd/containerd/labels github.com/containerd/containerd/labels
github.com/containerd/containerd/leases github.com/containerd/containerd/leases
github.com/containerd/containerd/log
github.com/containerd/containerd/namespaces github.com/containerd/containerd/namespaces
github.com/containerd/containerd/pkg/dialer github.com/containerd/containerd/pkg/dialer
github.com/containerd/containerd/pkg/randutil github.com/containerd/containerd/pkg/randutil
@@ -197,6 +196,9 @@ github.com/containerd/continuity/fs/fstest
github.com/containerd/continuity/pathdriver github.com/containerd/continuity/pathdriver
github.com/containerd/continuity/proto github.com/containerd/continuity/proto
github.com/containerd/continuity/sysx github.com/containerd/continuity/sysx
# github.com/containerd/errdefs v0.1.0
## explicit; go 1.20
github.com/containerd/errdefs
# github.com/containerd/log v0.1.0 # github.com/containerd/log v0.1.0
## explicit; go 1.20 ## explicit; go 1.20
github.com/containerd/log github.com/containerd/log
@@ -215,7 +217,7 @@ github.com/davecgh/go-spew/spew
# github.com/distribution/reference v0.5.0 # github.com/distribution/reference v0.5.0
## explicit; go 1.20 ## explicit; go 1.20
github.com/distribution/reference github.com/distribution/reference
# github.com/docker/cli v26.1.3+incompatible # github.com/docker/cli v26.1.4+incompatible
## explicit ## explicit
github.com/docker/cli/cli github.com/docker/cli/cli
github.com/docker/cli/cli-plugins/hooks github.com/docker/cli/cli-plugins/hooks
@@ -269,7 +271,7 @@ github.com/docker/distribution/registry/client/transport
github.com/docker/distribution/registry/storage/cache github.com/docker/distribution/registry/storage/cache
github.com/docker/distribution/registry/storage/cache/memory github.com/docker/distribution/registry/storage/cache/memory
github.com/docker/distribution/uuid github.com/docker/distribution/uuid
# github.com/docker/docker v26.1.3+incompatible # github.com/docker/docker v26.1.4+incompatible
## explicit ## explicit
github.com/docker/docker/api github.com/docker/docker/api
github.com/docker/docker/api/types github.com/docker/docker/api/types
@@ -350,6 +352,10 @@ github.com/go-openapi/jsonreference/internal
# github.com/go-openapi/swag v0.22.3 # github.com/go-openapi/swag v0.22.3
## explicit; go 1.18 ## explicit; go 1.18
github.com/go-openapi/swag github.com/go-openapi/swag
# github.com/go-viper/mapstructure/v2 v2.0.0
## explicit; go 1.18
github.com/go-viper/mapstructure/v2
github.com/go-viper/mapstructure/v2/internal/errors
# github.com/gofrs/flock v0.8.1 # github.com/gofrs/flock v0.8.1
## explicit ## explicit
github.com/gofrs/flock github.com/gofrs/flock
@@ -511,11 +517,10 @@ github.com/mitchellh/copystructure
github.com/mitchellh/go-wordwrap github.com/mitchellh/go-wordwrap
# github.com/mitchellh/mapstructure v1.5.0 # github.com/mitchellh/mapstructure v1.5.0
## explicit; go 1.14 ## explicit; go 1.14
github.com/mitchellh/mapstructure
# github.com/mitchellh/reflectwalk v1.0.2 # github.com/mitchellh/reflectwalk v1.0.2
## explicit ## explicit
github.com/mitchellh/reflectwalk github.com/mitchellh/reflectwalk
# github.com/moby/buildkit v0.14.0-rc1 # github.com/moby/buildkit v0.14.0-rc2
## explicit; go 1.21 ## explicit; go 1.21
github.com/moby/buildkit/api/services/control github.com/moby/buildkit/api/services/control
github.com/moby/buildkit/api/types github.com/moby/buildkit/api/types
@@ -860,7 +865,7 @@ golang.org/x/crypto/ssh/internal/bcrypt_pbkdf
golang.org/x/exp/constraints golang.org/x/exp/constraints
golang.org/x/exp/maps golang.org/x/exp/maps
golang.org/x/exp/slices golang.org/x/exp/slices
# golang.org/x/mod v0.14.0 # golang.org/x/mod v0.17.0
## explicit; go 1.18 ## explicit; go 1.18
golang.org/x/mod/internal/lazyregexp golang.org/x/mod/internal/lazyregexp
golang.org/x/mod/module golang.org/x/mod/module