mirror of
https://gitea.com/Lydanne/buildx.git
synced 2025-08-15 16:25:54 +08:00
Compare commits
32 Commits
v0.8.0-rc1
...
v0.8
Author | SHA1 | Date | |
---|---|---|---|
![]() |
6224def4dd | ||
![]() |
5abee699a6 | ||
![]() |
54e497dfba | ||
![]() |
dfbd226285 | ||
![]() |
a78c2957a2 | ||
![]() |
e80890ec89 | ||
![]() |
10fe8f380c | ||
![]() |
5fac64c2c4 | ||
![]() |
24ad37a5d2 | ||
![]() |
106651877d | ||
![]() |
35bcd88f08 | ||
![]() |
c8f7c1e93f | ||
![]() |
b78c680207 | ||
![]() |
d7412c9420 | ||
![]() |
a7fba7bf3a | ||
![]() |
19ff7cdadc | ||
![]() |
c255c04eed | ||
![]() |
9fcea76dea | ||
![]() |
1416bc1d83 | ||
![]() |
215a128fc1 | ||
![]() |
4e4eea7814 | ||
![]() |
8079bd2841 | ||
![]() |
2d5368cccc | ||
![]() |
a1256c6bb2 | ||
![]() |
e7863eb664 | ||
![]() |
171c4375a1 | ||
![]() |
45844805ec | ||
![]() |
b77d7864fa | ||
![]() |
6efcee28d5 | ||
![]() |
3ad24524c4 | ||
![]() |
971b5d2b73 | ||
![]() |
94c5dde85a |
@@ -95,7 +95,7 @@ Rename the relevant binary and copy it to the destination matching your OS:
|
||||
| -------- | -------------------- | -----------------------------------------|
|
||||
| Linux | `docker-buildx` | `$HOME/.docker/cli-plugins` |
|
||||
| macOS | `docker-buildx` | `$HOME/.docker/cli-plugins` |
|
||||
| Windows | `docker-buildx.exe` | `%USERPROFILE%\.docker\cli-plugin` |
|
||||
| Windows | `docker-buildx.exe` | `%USERPROFILE%\.docker\cli-plugins` |
|
||||
|
||||
Or copy it into one of these folders for installing it system-wide.
|
||||
|
||||
|
18
bake/bake.go
18
bake/bake.go
@@ -29,7 +29,9 @@ var (
|
||||
gitURLPathWithFragmentSuffix = regexp.MustCompile(`\.git(?:#.+)?$`)
|
||||
|
||||
validTargetNameChars = `[a-zA-Z0-9_-]+`
|
||||
validTargetNameCharsCompose = `[a-zA-Z0-9._-]+`
|
||||
targetNamePattern = regexp.MustCompile(`^` + validTargetNameChars + `$`)
|
||||
targetNamePatternCompose = regexp.MustCompile(`^` + validTargetNameCharsCompose + `$`)
|
||||
)
|
||||
|
||||
type File struct {
|
||||
@@ -412,12 +414,12 @@ func (c Config) newOverrides(v []string) (map[string]map[string]Override, error)
|
||||
}
|
||||
|
||||
func (c Config) ResolveGroup(name string) []string {
|
||||
return c.group(name, map[string]struct{}{})
|
||||
return dedupString(c.group(name, map[string][]string{}))
|
||||
}
|
||||
|
||||
func (c Config) group(name string, visited map[string]struct{}) []string {
|
||||
func (c Config) group(name string, visited map[string][]string) []string {
|
||||
if _, ok := visited[name]; ok {
|
||||
return nil
|
||||
return visited[name]
|
||||
}
|
||||
var g *Group
|
||||
for _, group := range c.Groups {
|
||||
@@ -429,7 +431,7 @@ func (c Config) group(name string, visited map[string]struct{}) []string {
|
||||
if g == nil {
|
||||
return []string{name}
|
||||
}
|
||||
visited[name] = struct{}{}
|
||||
visited[name] = []string{}
|
||||
targets := make([]string, 0, len(g.Targets))
|
||||
for _, t := range g.Targets {
|
||||
tgroup := c.group(t, visited)
|
||||
@@ -439,6 +441,7 @@ func (c Config) group(name string, visited map[string]struct{}) []string {
|
||||
targets = append(targets, t)
|
||||
}
|
||||
}
|
||||
visited[name] = targets
|
||||
return targets
|
||||
}
|
||||
|
||||
@@ -968,6 +971,13 @@ func validateTargetName(name string) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func validateTargetNameCompose(name string) error {
|
||||
if !targetNamePatternCompose.MatchString(name) {
|
||||
return errors.Errorf("only %q are allowed", validTargetNameCharsCompose)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func sliceEqual(s1, s2 []string) bool {
|
||||
if len(s1) != len(s2) {
|
||||
return false
|
||||
|
@@ -1045,3 +1045,79 @@ func TestTargetName(t *testing.T) {
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestNestedGroupsWithSameTarget(t *testing.T) {
|
||||
ctx := context.TODO()
|
||||
|
||||
f := File{
|
||||
Name: "docker-bake.hcl",
|
||||
Data: []byte(`
|
||||
group "a" {
|
||||
targets = ["b", "c"]
|
||||
}
|
||||
|
||||
group "b" {
|
||||
targets = ["d"]
|
||||
}
|
||||
|
||||
group "c" {
|
||||
targets = ["b"]
|
||||
}
|
||||
|
||||
target "d" {
|
||||
context = "."
|
||||
dockerfile = "./testdockerfile"
|
||||
}
|
||||
|
||||
group "e" {
|
||||
targets = ["a", "f"]
|
||||
}
|
||||
|
||||
target "f" {
|
||||
context = "./foo"
|
||||
}`)}
|
||||
|
||||
cases := []struct {
|
||||
name string
|
||||
targets []string
|
||||
ntargets int
|
||||
}{
|
||||
{
|
||||
name: "a",
|
||||
targets: []string{"b", "c"},
|
||||
ntargets: 1,
|
||||
},
|
||||
{
|
||||
name: "b",
|
||||
targets: []string{"d"},
|
||||
ntargets: 1,
|
||||
},
|
||||
{
|
||||
name: "c",
|
||||
targets: []string{"b"},
|
||||
ntargets: 1,
|
||||
},
|
||||
{
|
||||
name: "d",
|
||||
targets: []string{"d"},
|
||||
ntargets: 1,
|
||||
},
|
||||
{
|
||||
name: "e",
|
||||
targets: []string{"a", "f"},
|
||||
ntargets: 2,
|
||||
},
|
||||
}
|
||||
for _, tt := range cases {
|
||||
tt := tt
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
m, g, err := ReadTargets(ctx, []File{f}, []string{tt.name}, nil, nil)
|
||||
require.NoError(t, err)
|
||||
require.Equal(t, 1, len(g))
|
||||
require.Equal(t, tt.targets, g[0].Targets)
|
||||
require.Equal(t, tt.ntargets, len(m))
|
||||
require.Equal(t, ".", *m["d"].Context)
|
||||
require.Equal(t, "./testdockerfile", *m["d"].Dockerfile)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
@@ -60,7 +60,7 @@ func ParseCompose(dt []byte) (*Config, error) {
|
||||
continue
|
||||
}
|
||||
|
||||
if err = validateTargetName(s.Name); err != nil {
|
||||
if err = validateTargetNameCompose(s.Name); err != nil {
|
||||
return nil, errors.Wrapf(err, "invalid service name %q", s.Name)
|
||||
}
|
||||
|
||||
|
@@ -310,6 +310,27 @@ services:
|
||||
require.Equal(t, c.Targets[0].Args, map[string]string{"CT_ECR": "foo", "FOO": "bsdf -csdf", "NODE_ENV": "test"})
|
||||
}
|
||||
|
||||
func TestPorts(t *testing.T) {
|
||||
var dt = []byte(`
|
||||
services:
|
||||
foo:
|
||||
build:
|
||||
context: .
|
||||
ports:
|
||||
- 3306:3306
|
||||
bar:
|
||||
build:
|
||||
context: .
|
||||
ports:
|
||||
- mode: ingress
|
||||
target: 3306
|
||||
published: "3306"
|
||||
protocol: tcp
|
||||
`)
|
||||
_, err := ParseCompose(dt)
|
||||
require.NoError(t, err)
|
||||
}
|
||||
|
||||
func newBool(val bool) *bool {
|
||||
b := val
|
||||
return &b
|
||||
@@ -330,6 +351,10 @@ func TestServiceName(t *testing.T) {
|
||||
},
|
||||
{
|
||||
svc: "a.b",
|
||||
wantErr: false,
|
||||
},
|
||||
{
|
||||
svc: "a?b",
|
||||
wantErr: true,
|
||||
},
|
||||
{
|
||||
|
@@ -189,6 +189,10 @@ func splitToDriverPairs(availablePlatforms map[string]int, opt map[string]Option
|
||||
pp = append(pp, p)
|
||||
mm[idx] = pp
|
||||
}
|
||||
// if no platform is specified, use first driver
|
||||
if len(mm) == 0 {
|
||||
mm[0] = nil
|
||||
}
|
||||
dps := make([]driverPair, 0, 2)
|
||||
for idx, pp := range mm {
|
||||
dps = append(dps, driverPair{driverIndex: idx, platforms: pp})
|
||||
@@ -762,9 +766,12 @@ func Build(ctx context.Context, drivers []DriverInfo, opt map[string]Options, do
|
||||
resp[k] = res[0]
|
||||
respMu.Unlock()
|
||||
if len(res) == 1 {
|
||||
digest := res[0].ExporterResponse["containerimage.digest"]
|
||||
dgst := res[0].ExporterResponse[exptypes.ExporterImageDigestKey]
|
||||
if v, ok := res[0].ExporterResponse[exptypes.ExporterImageConfigDigestKey]; ok {
|
||||
dgst = v
|
||||
}
|
||||
if opt.ImageIDFile != "" {
|
||||
return ioutil.WriteFile(opt.ImageIDFile, []byte(digest), 0644)
|
||||
return ioutil.WriteFile(opt.ImageIDFile, []byte(dgst), 0644)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -774,7 +781,7 @@ func Build(ctx context.Context, drivers []DriverInfo, opt map[string]Options, do
|
||||
descs := make([]specs.Descriptor, 0, len(res))
|
||||
|
||||
for _, r := range res {
|
||||
s, ok := r.ExporterResponse["containerimage.digest"]
|
||||
s, ok := r.ExporterResponse[exptypes.ExporterImageDigestKey]
|
||||
if ok {
|
||||
descs = append(descs, specs.Descriptor{
|
||||
Digest: digest.Digest(s),
|
||||
@@ -930,13 +937,27 @@ func Build(ctx context.Context, drivers []DriverInfo, opt map[string]Options, do
|
||||
return errors.Errorf("tag is needed when pushing to registry")
|
||||
}
|
||||
pw := progress.ResetTime(pw)
|
||||
for _, name := range strings.Split(pushNames, ",") {
|
||||
pushList := strings.Split(pushNames, ",")
|
||||
for _, name := range pushList {
|
||||
if err := progress.Wrap(fmt.Sprintf("pushing %s with docker", name), pw.Write, func(l progress.SubLogger) error {
|
||||
return pushWithMoby(ctx, d, name, l)
|
||||
}); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
remoteDigest, err := remoteDigestWithMoby(ctx, d, pushList[0])
|
||||
if err == nil && remoteDigest != "" {
|
||||
// old daemons might not have containerimage.config.digest set
|
||||
// in response so use containerimage.digest value for it if available
|
||||
if _, ok := rr.ExporterResponse[exptypes.ExporterImageConfigDigestKey]; !ok {
|
||||
if v, ok := rr.ExporterResponse[exptypes.ExporterImageDigestKey]; ok {
|
||||
rr.ExporterResponse[exptypes.ExporterImageConfigDigestKey] = v
|
||||
}
|
||||
}
|
||||
rr.ExporterResponse[exptypes.ExporterImageDigestKey] = remoteDigest
|
||||
} else if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1041,6 +1062,29 @@ func pushWithMoby(ctx context.Context, d driver.Driver, name string, l progress.
|
||||
return nil
|
||||
}
|
||||
|
||||
func remoteDigestWithMoby(ctx context.Context, d driver.Driver, name string) (string, error) {
|
||||
api := d.Config().DockerAPI
|
||||
if api == nil {
|
||||
return "", errors.Errorf("invalid empty Docker API reference") // should never happen
|
||||
}
|
||||
creds, err := imagetools.RegistryAuthForRef(name, d.Config().Auth)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
image, _, err := api.ImageInspectWithRaw(ctx, name)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if len(image.RepoDigests) == 0 {
|
||||
return "", nil
|
||||
}
|
||||
remoteImage, err := api.DistributionInspect(ctx, name, creds)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return remoteImage.Descriptor.Digest.String(), nil
|
||||
}
|
||||
|
||||
func createTempDockerfile(r io.Reader) (string, error) {
|
||||
dir, err := ioutil.TempDir("", "dockerfile")
|
||||
if err != nil {
|
||||
@@ -1263,6 +1307,7 @@ func waitContextDeps(ctx context.Context, index int, results *waitmap.Map, so *c
|
||||
so.FrontendAttrs["input-metadata:"+k+"::"+platform] = string(dt)
|
||||
}
|
||||
}
|
||||
delete(so.FrontendAttrs, v)
|
||||
}
|
||||
if rr.Ref != nil {
|
||||
st, err := rr.Ref.ToState()
|
||||
|
@@ -43,8 +43,7 @@ func init() {
|
||||
}
|
||||
|
||||
func main() {
|
||||
if os.Getenv("DOCKER_CLI_PLUGIN_ORIGINAL_CLI_COMMAND") == "" {
|
||||
if len(os.Args) < 2 || os.Args[1] != manager.MetadataSubcommandName {
|
||||
if plugin.RunningStandalone() {
|
||||
dockerCli, err := command.NewDockerCli()
|
||||
if err != nil {
|
||||
fmt.Fprintln(os.Stderr, err)
|
||||
@@ -58,7 +57,6 @@ func main() {
|
||||
}
|
||||
os.Exit(0)
|
||||
}
|
||||
}
|
||||
|
||||
dockerCli, err := command.NewDockerCli()
|
||||
if err != nil {
|
||||
|
@@ -150,14 +150,7 @@ func runBake(dockerCli command.Cli, targets []string, in bakeOptions) (err error
|
||||
return wrapBuildError(err, true)
|
||||
}
|
||||
|
||||
if len(in.metadataFile) > 0 && resp != nil {
|
||||
if len(resp) == 1 {
|
||||
for _, r := range resp {
|
||||
if err := writeMetadataFile(in.metadataFile, decodeExporterResponse(r.ExporterResponse)); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if len(in.metadataFile) > 0 {
|
||||
dt := make(map[string]interface{})
|
||||
for t, r := range resp {
|
||||
dt[t] = decodeExporterResponse(r.ExporterResponse)
|
||||
@@ -166,7 +159,6 @@ func runBake(dockerCli command.Cli, targets []string, in bakeOptions) (err error
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
@@ -444,7 +444,7 @@ func buildCmd(dockerCli command.Cli, rootOpts *rootOptions) *cobra.Command {
|
||||
func commonBuildFlags(options *commonOptions, flags *pflag.FlagSet) {
|
||||
options.noCache = flags.Bool("no-cache", false, "Do not use cache when building the image")
|
||||
flags.StringVar(&options.progress, "progress", "auto", `Set type of progress output ("auto", "plain", "tty"). Use plain to show container output`)
|
||||
options.pull = flags.Bool("pull", false, "Always attempt to pull a newer version of the image")
|
||||
options.pull = flags.Bool("pull", false, "Always attempt to pull all referenced images")
|
||||
flags.StringVar(&options.metadataFile, "metadata-file", "", "Write build result metadata to the file")
|
||||
}
|
||||
|
||||
|
@@ -8,13 +8,13 @@ import (
|
||||
|
||||
"github.com/docker/buildx/build"
|
||||
"github.com/docker/buildx/driver"
|
||||
ctxkube "github.com/docker/buildx/driver/kubernetes/context"
|
||||
"github.com/docker/buildx/store"
|
||||
"github.com/docker/buildx/store/storeutil"
|
||||
"github.com/docker/buildx/util/platformutil"
|
||||
"github.com/docker/buildx/util/progress"
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/cli/cli/context/docker"
|
||||
"github.com/docker/cli/cli/context/kubernetes"
|
||||
ctxstore "github.com/docker/cli/cli/context/store"
|
||||
dopts "github.com/docker/cli/opts"
|
||||
dockerclient "github.com/docker/docker/client"
|
||||
@@ -150,7 +150,7 @@ func configFromContext(endpointName string, s ctxstore.Reader) (clientcmd.Client
|
||||
}
|
||||
return clientcmd.NewDefaultClientConfig(*apiConfig, &clientcmd.ConfigOverrides{}), nil
|
||||
}
|
||||
return kubernetes.ConfigFromContext(endpointName, s)
|
||||
return ctxkube.ConfigFromContext(endpointName, s)
|
||||
}
|
||||
|
||||
// clientForEndpoint returns a docker client for an endpoint
|
||||
|
@@ -22,7 +22,7 @@ Build from a file
|
||||
| [`--no-cache`](#no-cache) | | | Do not use cache when building the image |
|
||||
| [`--print`](#print) | | | Print the options without building |
|
||||
| [`--progress`](#progress) | `string` | `auto` | Set type of progress output (`auto`, `plain`, `tty`). Use plain to show container output |
|
||||
| [`--pull`](#pull) | | | Always attempt to pull a newer version of the image |
|
||||
| [`--pull`](#pull) | | | Always attempt to pull all referenced images |
|
||||
| `--push` | | | Shorthand for `--set=*.output=type=registry` |
|
||||
| [`--set`](#set) | `stringArray` | | Override target value (e.g., `targetpattern.key=value`) |
|
||||
|
||||
|
@@ -34,7 +34,7 @@ Start a build
|
||||
| [`-o`](#output), [`--output`](#output) | `stringArray` | | Output destination (format: `type=local,dest=path`) |
|
||||
| [`--platform`](#platform) | `stringArray` | | Set target platform for build |
|
||||
| [`--progress`](#progress) | `string` | `auto` | Set type of progress output (`auto`, `plain`, `tty`). Use plain to show container output |
|
||||
| `--pull` | | | Always attempt to pull a newer version of the image |
|
||||
| `--pull` | | | Always attempt to pull all referenced images |
|
||||
| [`--push`](#push) | | | Shorthand for `--output=type=registry` |
|
||||
| `-q`, `--quiet` | | | Suppress the build output and print image ID on success |
|
||||
| [`--secret`](#secret) | `stringArray` | | Secret to expose to the build (format: `id=mysecret[,src=/local/secret]`) |
|
||||
@@ -54,19 +54,7 @@ to the UI of `docker build` command and takes the same flags and arguments.
|
||||
|
||||
For documentation on most of these flags, refer to the [`docker build`
|
||||
documentation](https://docs.docker.com/engine/reference/commandline/build/). In
|
||||
here we’ll document a subset of the new flags.
|
||||
|
||||
### Built-in build args
|
||||
|
||||
* `BUILDKIT_INLINE_BUILDINFO_ATTRS=<bool>` inline build info attributes in image config or not
|
||||
* `BUILDKIT_INLINE_CACHE=<bool>` inline cache metadata to image config or not
|
||||
* `BUILDKIT_MULTI_PLATFORM=<bool>` opt into determnistic output regardless of multi-platform output or not
|
||||
|
||||
```shell
|
||||
docker buildx build --build-arg BUILDKIT_MULTI_PLATFORM=1 .
|
||||
```
|
||||
|
||||
Other useful built-in args can be found in [dockerfile frontend docs](https://github.com/moby/buildkit/blob/master/frontend/dockerfile/docs/syntax.md#built-in-build-args).
|
||||
here we'll document a subset of the new flags.
|
||||
|
||||
## Examples
|
||||
|
||||
@@ -99,6 +87,7 @@ Same as [`docker build` command](https://docs.docker.com/engine/reference/comman
|
||||
There are also useful built-in build args like:
|
||||
|
||||
* `BUILDKIT_CONTEXT_KEEP_GIT_DIR=<bool>` trigger git context to keep the `.git` directory
|
||||
* `BUILDKIT_INLINE_BUILDINFO_ATTRS=<bool>` inline build info attributes in image config or not
|
||||
* `BUILDKIT_INLINE_CACHE=<bool>` inline cache metadata to image config or not
|
||||
* `BUILDKIT_MULTI_PLATFORM=<bool>` opt into determnistic output regardless of multi-platform output or not
|
||||
|
||||
|
@@ -178,6 +178,8 @@ $ docker buildx imagetools inspect moby/buildkit:master --format "{{json .Manife
|
||||
{
|
||||
"schemaVersion": 2,
|
||||
"mediaType": "application/vnd.docker.distribution.manifest.list.v2+json",
|
||||
"digest": "sha256:79d97f205e2799d99a3a8ae2a1ef17acb331e11784262c3faada847dc6972c52",
|
||||
"size": 2010,
|
||||
"manifests": [
|
||||
{
|
||||
"mediaType": "application/vnd.docker.distribution.manifest.v2+json",
|
||||
|
@@ -1,4 +1,4 @@
|
||||
package kubernetes
|
||||
package context
|
||||
|
||||
const (
|
||||
// KubernetesEndpoint is the kubernetes endpoint name in a stored context
|
225
driver/kubernetes/context/endpoint_test.go
Normal file
225
driver/kubernetes/context/endpoint_test.go
Normal file
@@ -0,0 +1,225 @@
|
||||
package context
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/cli/cli/context"
|
||||
"github.com/docker/cli/cli/context/store"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
|
||||
)
|
||||
|
||||
func testEndpoint(server, defaultNamespace string, ca, cert, key []byte, skipTLSVerify bool) Endpoint {
|
||||
var tlsData *context.TLSData
|
||||
if ca != nil || cert != nil || key != nil {
|
||||
tlsData = &context.TLSData{
|
||||
CA: ca,
|
||||
Cert: cert,
|
||||
Key: key,
|
||||
}
|
||||
}
|
||||
return Endpoint{
|
||||
EndpointMeta: EndpointMeta{
|
||||
EndpointMetaBase: context.EndpointMetaBase{
|
||||
Host: server,
|
||||
SkipTLSVerify: skipTLSVerify,
|
||||
},
|
||||
DefaultNamespace: defaultNamespace,
|
||||
},
|
||||
TLSData: tlsData,
|
||||
}
|
||||
}
|
||||
|
||||
var testStoreCfg = store.NewConfig(
|
||||
func() interface{} {
|
||||
return &map[string]interface{}{}
|
||||
},
|
||||
store.EndpointTypeGetter(KubernetesEndpoint, func() interface{} { return &EndpointMeta{} }),
|
||||
)
|
||||
|
||||
func TestSaveLoadContexts(t *testing.T) {
|
||||
storeDir, err := ioutil.TempDir("", "test-load-save-k8-context")
|
||||
require.NoError(t, err)
|
||||
defer os.RemoveAll(storeDir)
|
||||
store := store.New(storeDir, testStoreCfg)
|
||||
require.NoError(t, save(store, testEndpoint("https://test", "test", nil, nil, nil, false), "raw-notls"))
|
||||
require.NoError(t, save(store, testEndpoint("https://test", "test", nil, nil, nil, true), "raw-notls-skip"))
|
||||
require.NoError(t, save(store, testEndpoint("https://test", "test", []byte("ca"), []byte("cert"), []byte("key"), true), "raw-tls"))
|
||||
|
||||
kcFile, err := ioutil.TempFile(os.TempDir(), "test-load-save-k8-context")
|
||||
require.NoError(t, err)
|
||||
defer os.Remove(kcFile.Name())
|
||||
defer kcFile.Close()
|
||||
cfg := clientcmdapi.NewConfig()
|
||||
cfg.AuthInfos["user"] = clientcmdapi.NewAuthInfo()
|
||||
cfg.Contexts["context1"] = clientcmdapi.NewContext()
|
||||
cfg.Clusters["cluster1"] = clientcmdapi.NewCluster()
|
||||
cfg.Contexts["context2"] = clientcmdapi.NewContext()
|
||||
cfg.Clusters["cluster2"] = clientcmdapi.NewCluster()
|
||||
cfg.AuthInfos["user"].ClientCertificateData = []byte("cert")
|
||||
cfg.AuthInfos["user"].ClientKeyData = []byte("key")
|
||||
cfg.Clusters["cluster1"].Server = "https://server1"
|
||||
cfg.Clusters["cluster1"].InsecureSkipTLSVerify = true
|
||||
cfg.Clusters["cluster2"].Server = "https://server2"
|
||||
cfg.Clusters["cluster2"].CertificateAuthorityData = []byte("ca")
|
||||
cfg.Contexts["context1"].AuthInfo = "user"
|
||||
cfg.Contexts["context1"].Cluster = "cluster1"
|
||||
cfg.Contexts["context1"].Namespace = "namespace1"
|
||||
cfg.Contexts["context2"].AuthInfo = "user"
|
||||
cfg.Contexts["context2"].Cluster = "cluster2"
|
||||
cfg.Contexts["context2"].Namespace = "namespace2"
|
||||
cfg.CurrentContext = "context1"
|
||||
cfgData, err := clientcmd.Write(*cfg)
|
||||
require.NoError(t, err)
|
||||
_, err = kcFile.Write(cfgData)
|
||||
require.NoError(t, err)
|
||||
kcFile.Close()
|
||||
|
||||
epDefault, err := FromKubeConfig(kcFile.Name(), "", "")
|
||||
require.NoError(t, err)
|
||||
epContext2, err := FromKubeConfig(kcFile.Name(), "context2", "namespace-override")
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, save(store, epDefault, "embed-default-context"))
|
||||
require.NoError(t, save(store, epContext2, "embed-context2"))
|
||||
|
||||
rawNoTLSMeta, err := store.GetMetadata("raw-notls")
|
||||
require.NoError(t, err)
|
||||
rawNoTLSSkipMeta, err := store.GetMetadata("raw-notls-skip")
|
||||
require.NoError(t, err)
|
||||
rawTLSMeta, err := store.GetMetadata("raw-tls")
|
||||
require.NoError(t, err)
|
||||
embededDefaultMeta, err := store.GetMetadata("embed-default-context")
|
||||
require.NoError(t, err)
|
||||
embededContext2Meta, err := store.GetMetadata("embed-context2")
|
||||
require.NoError(t, err)
|
||||
|
||||
rawNoTLS := EndpointFromContext(rawNoTLSMeta)
|
||||
rawNoTLSSkip := EndpointFromContext(rawNoTLSSkipMeta)
|
||||
rawTLS := EndpointFromContext(rawTLSMeta)
|
||||
embededDefault := EndpointFromContext(embededDefaultMeta)
|
||||
embededContext2 := EndpointFromContext(embededContext2Meta)
|
||||
|
||||
rawNoTLSEP, err := rawNoTLS.WithTLSData(store, "raw-notls")
|
||||
require.NoError(t, err)
|
||||
checkClientConfig(t, rawNoTLSEP, "https://test", "test", nil, nil, nil, false)
|
||||
rawNoTLSSkipEP, err := rawNoTLSSkip.WithTLSData(store, "raw-notls-skip")
|
||||
require.NoError(t, err)
|
||||
checkClientConfig(t, rawNoTLSSkipEP, "https://test", "test", nil, nil, nil, true)
|
||||
rawTLSEP, err := rawTLS.WithTLSData(store, "raw-tls")
|
||||
require.NoError(t, err)
|
||||
checkClientConfig(t, rawTLSEP, "https://test", "test", []byte("ca"), []byte("cert"), []byte("key"), true)
|
||||
embededDefaultEP, err := embededDefault.WithTLSData(store, "embed-default-context")
|
||||
require.NoError(t, err)
|
||||
checkClientConfig(t, embededDefaultEP, "https://server1", "namespace1", nil, []byte("cert"), []byte("key"), true)
|
||||
embededContext2EP, err := embededContext2.WithTLSData(store, "embed-context2")
|
||||
require.NoError(t, err)
|
||||
checkClientConfig(t, embededContext2EP, "https://server2", "namespace-override", []byte("ca"), []byte("cert"), []byte("key"), false)
|
||||
}
|
||||
|
||||
func checkClientConfig(t *testing.T, ep Endpoint, server, namespace string, ca, cert, key []byte, skipTLSVerify bool) {
|
||||
config := ep.KubernetesConfig()
|
||||
cfg, err := config.ClientConfig()
|
||||
require.NoError(t, err)
|
||||
ns, _, _ := config.Namespace()
|
||||
assert.Equal(t, server, cfg.Host)
|
||||
assert.Equal(t, namespace, ns)
|
||||
assert.Equal(t, ca, cfg.CAData)
|
||||
assert.Equal(t, cert, cfg.CertData)
|
||||
assert.Equal(t, key, cfg.KeyData)
|
||||
assert.Equal(t, skipTLSVerify, cfg.Insecure)
|
||||
}
|
||||
|
||||
func save(s store.Writer, ep Endpoint, name string) error {
|
||||
meta := store.Metadata{
|
||||
Endpoints: map[string]interface{}{
|
||||
KubernetesEndpoint: ep.EndpointMeta,
|
||||
},
|
||||
Name: name,
|
||||
}
|
||||
if err := s.CreateOrUpdate(meta); err != nil {
|
||||
return err
|
||||
}
|
||||
return s.ResetEndpointTLSMaterial(name, KubernetesEndpoint, ep.TLSData.ToStoreTLSData())
|
||||
}
|
||||
|
||||
func TestSaveLoadGKEConfig(t *testing.T) {
|
||||
storeDir, err := ioutil.TempDir("", t.Name())
|
||||
require.NoError(t, err)
|
||||
defer os.RemoveAll(storeDir)
|
||||
store := store.New(storeDir, testStoreCfg)
|
||||
cfg, err := clientcmd.LoadFromFile("fixtures/gke-kubeconfig")
|
||||
require.NoError(t, err)
|
||||
clientCfg := clientcmd.NewDefaultClientConfig(*cfg, &clientcmd.ConfigOverrides{})
|
||||
expectedCfg, err := clientCfg.ClientConfig()
|
||||
require.NoError(t, err)
|
||||
ep, err := FromKubeConfig("fixtures/gke-kubeconfig", "", "")
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, save(store, ep, "gke-context"))
|
||||
persistedMetadata, err := store.GetMetadata("gke-context")
|
||||
require.NoError(t, err)
|
||||
persistedEPMeta := EndpointFromContext(persistedMetadata)
|
||||
assert.True(t, persistedEPMeta != nil)
|
||||
persistedEP, err := persistedEPMeta.WithTLSData(store, "gke-context")
|
||||
require.NoError(t, err)
|
||||
persistedCfg := persistedEP.KubernetesConfig()
|
||||
actualCfg, err := persistedCfg.ClientConfig()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, expectedCfg.AuthProvider, actualCfg.AuthProvider)
|
||||
}
|
||||
|
||||
func TestSaveLoadEKSConfig(t *testing.T) {
|
||||
storeDir, err := ioutil.TempDir("", t.Name())
|
||||
require.NoError(t, err)
|
||||
defer os.RemoveAll(storeDir)
|
||||
store := store.New(storeDir, testStoreCfg)
|
||||
cfg, err := clientcmd.LoadFromFile("fixtures/eks-kubeconfig")
|
||||
require.NoError(t, err)
|
||||
clientCfg := clientcmd.NewDefaultClientConfig(*cfg, &clientcmd.ConfigOverrides{})
|
||||
expectedCfg, err := clientCfg.ClientConfig()
|
||||
require.NoError(t, err)
|
||||
ep, err := FromKubeConfig("fixtures/eks-kubeconfig", "", "")
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, save(store, ep, "eks-context"))
|
||||
persistedMetadata, err := store.GetMetadata("eks-context")
|
||||
require.NoError(t, err)
|
||||
persistedEPMeta := EndpointFromContext(persistedMetadata)
|
||||
assert.True(t, persistedEPMeta != nil)
|
||||
persistedEP, err := persistedEPMeta.WithTLSData(store, "eks-context")
|
||||
require.NoError(t, err)
|
||||
persistedCfg := persistedEP.KubernetesConfig()
|
||||
actualCfg, err := persistedCfg.ClientConfig()
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, expectedCfg.ExecProvider, actualCfg.ExecProvider)
|
||||
}
|
||||
|
||||
func TestSaveLoadK3SConfig(t *testing.T) {
|
||||
storeDir, err := ioutil.TempDir("", t.Name())
|
||||
require.NoError(t, err)
|
||||
defer os.RemoveAll(storeDir)
|
||||
store := store.New(storeDir, testStoreCfg)
|
||||
cfg, err := clientcmd.LoadFromFile("fixtures/k3s-kubeconfig")
|
||||
require.NoError(t, err)
|
||||
clientCfg := clientcmd.NewDefaultClientConfig(*cfg, &clientcmd.ConfigOverrides{})
|
||||
expectedCfg, err := clientCfg.ClientConfig()
|
||||
require.NoError(t, err)
|
||||
ep, err := FromKubeConfig("fixtures/k3s-kubeconfig", "", "")
|
||||
require.NoError(t, err)
|
||||
require.NoError(t, save(store, ep, "k3s-context"))
|
||||
persistedMetadata, err := store.GetMetadata("k3s-context")
|
||||
require.NoError(t, err)
|
||||
persistedEPMeta := EndpointFromContext(persistedMetadata)
|
||||
assert.True(t, persistedEPMeta != nil)
|
||||
persistedEP, err := persistedEPMeta.WithTLSData(store, "k3s-context")
|
||||
require.NoError(t, err)
|
||||
persistedCfg := persistedEP.KubernetesConfig()
|
||||
actualCfg, err := persistedCfg.ClientConfig()
|
||||
require.NoError(t, err)
|
||||
assert.True(t, len(actualCfg.Username) > 0)
|
||||
assert.True(t, len(actualCfg.Password) > 0)
|
||||
assert.Equal(t, expectedCfg.Username, actualCfg.Username)
|
||||
assert.Equal(t, expectedCfg.Password, actualCfg.Password)
|
||||
}
|
23
driver/kubernetes/context/fixtures/eks-kubeconfig
Normal file
23
driver/kubernetes/context/fixtures/eks-kubeconfig
Normal file
@@ -0,0 +1,23 @@
|
||||
apiVersion: v1
|
||||
clusters:
|
||||
- cluster:
|
||||
server: https://some-server
|
||||
name: kubernetes
|
||||
contexts:
|
||||
- context:
|
||||
cluster: kubernetes
|
||||
user: aws
|
||||
name: aws
|
||||
current-context: aws
|
||||
kind: Config
|
||||
preferences: {}
|
||||
users:
|
||||
- name: aws
|
||||
user:
|
||||
exec:
|
||||
apiVersion: client.authentication.k8s.io/v1alpha1
|
||||
command: heptio-authenticator-aws
|
||||
args:
|
||||
- "token"
|
||||
- "-i"
|
||||
- "eks-cf"
|
23
driver/kubernetes/context/fixtures/gke-kubeconfig
Normal file
23
driver/kubernetes/context/fixtures/gke-kubeconfig
Normal file
@@ -0,0 +1,23 @@
|
||||
apiVersion: v1
|
||||
clusters:
|
||||
- cluster:
|
||||
server: https://some-server
|
||||
name: gke_sample
|
||||
contexts:
|
||||
- context:
|
||||
cluster: gke_sample
|
||||
user: gke_sample
|
||||
name: gke_sample
|
||||
current-context: gke_sample
|
||||
kind: Config
|
||||
preferences: {}
|
||||
users:
|
||||
- name: gke_sample
|
||||
user:
|
||||
auth-provider:
|
||||
config:
|
||||
cmd-args: config config-helper --format=json
|
||||
cmd-path: /google/google-cloud-sdk/bin/gcloud
|
||||
expiry-key: '{.credential.token_expiry}'
|
||||
token-key: '{.credential.access_token}'
|
||||
name: gcp
|
20
driver/kubernetes/context/fixtures/k3s-kubeconfig
Normal file
20
driver/kubernetes/context/fixtures/k3s-kubeconfig
Normal file
@@ -0,0 +1,20 @@
|
||||
apiVersion: v1
|
||||
clusters:
|
||||
- cluster:
|
||||
certificate-authority-data: dGhlLWNh
|
||||
server: https://someserver
|
||||
name: test-cluster
|
||||
contexts:
|
||||
- context:
|
||||
cluster: test-cluster
|
||||
user: test-user
|
||||
namespace: zoinx
|
||||
name: test
|
||||
current-context: test
|
||||
kind: Config
|
||||
preferences: {}
|
||||
users:
|
||||
- name: test-user
|
||||
user:
|
||||
username: admin
|
||||
password: testpwd
|
20
driver/kubernetes/context/fixtures/test-kubeconfig
Normal file
20
driver/kubernetes/context/fixtures/test-kubeconfig
Normal file
@@ -0,0 +1,20 @@
|
||||
apiVersion: v1
|
||||
clusters:
|
||||
- cluster:
|
||||
certificate-authority-data: dGhlLWNh
|
||||
server: https://someserver
|
||||
name: test-cluster
|
||||
contexts:
|
||||
- context:
|
||||
cluster: test-cluster
|
||||
user: test-user
|
||||
namespace: zoinx
|
||||
name: test
|
||||
current-context: test
|
||||
kind: Config
|
||||
preferences: {}
|
||||
users:
|
||||
- name: test-user
|
||||
user:
|
||||
client-certificate-data: dGhlLWNlcnQ=
|
||||
client-key-data: dGhlLWtleQ==
|
@@ -1,4 +1,4 @@
|
||||
package kubernetes
|
||||
package context
|
||||
|
||||
import (
|
||||
"os"
|
||||
@@ -7,9 +7,7 @@ import (
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/cli/cli/context"
|
||||
"github.com/docker/cli/cli/context/store"
|
||||
api "github.com/docker/compose-on-kubernetes/api"
|
||||
"github.com/docker/docker/pkg/homedir"
|
||||
"github.com/pkg/errors"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
clientcmdapi "k8s.io/client-go/tools/clientcmd/api"
|
||||
)
|
||||
@@ -88,23 +86,18 @@ func (c *Endpoint) KubernetesConfig() clientcmd.ClientConfig {
|
||||
|
||||
// ResolveDefault returns endpoint metadata for the default Kubernetes
|
||||
// endpoint, which is derived from the env-based kubeconfig.
|
||||
func (c *EndpointMeta) ResolveDefault(stackOrchestrator command.Orchestrator) (interface{}, *store.EndpointTLSData, error) {
|
||||
func (c *EndpointMeta) ResolveDefault() (interface{}, *store.EndpointTLSData, error) {
|
||||
kubeconfig := os.Getenv("KUBECONFIG")
|
||||
if kubeconfig == "" {
|
||||
kubeconfig = filepath.Join(homedir.Get(), ".kube/config")
|
||||
}
|
||||
kubeEP, err := FromKubeConfig(kubeconfig, "", "")
|
||||
if err != nil {
|
||||
if stackOrchestrator == command.OrchestratorKubernetes || stackOrchestrator == command.OrchestratorAll {
|
||||
return nil, nil, errors.Wrapf(err, "default orchestrator is %s but unable to resolve kubernetes endpoint", stackOrchestrator)
|
||||
}
|
||||
|
||||
// We deliberately quash the error here, returning nil
|
||||
// for the first argument is sufficient to indicate we weren't able to
|
||||
// provide a default
|
||||
return nil, nil, nil
|
||||
}
|
||||
|
||||
var tls *store.EndpointTLSData
|
||||
if kubeEP.TLSData != nil {
|
||||
tls = kubeEP.TLSData.ToStoreTLSData()
|
||||
@@ -142,5 +135,21 @@ func ConfigFromContext(name string, s store.Reader) (clientcmd.ClientConfig, err
|
||||
return ep.KubernetesConfig(), nil
|
||||
}
|
||||
// context has no kubernetes endpoint
|
||||
return api.NewKubernetesConfig(""), nil
|
||||
return NewKubernetesConfig(""), nil
|
||||
}
|
||||
|
||||
// NewKubernetesConfig resolves the path to the desired Kubernetes configuration
|
||||
// file based on the KUBECONFIG environment variable and command line flags.
|
||||
func NewKubernetesConfig(configPath string) clientcmd.ClientConfig {
|
||||
kubeConfig := configPath
|
||||
if kubeConfig == "" {
|
||||
if config := os.Getenv("KUBECONFIG"); config != "" {
|
||||
kubeConfig = config
|
||||
} else {
|
||||
kubeConfig = filepath.Join(homedir.Get(), ".kube/config")
|
||||
}
|
||||
}
|
||||
return clientcmd.NewNonInteractiveDeferredLoadingClientConfig(
|
||||
&clientcmd.ClientConfigLoadingRules{ExplicitPath: kubeConfig},
|
||||
&clientcmd.ConfigOverrides{})
|
||||
}
|
23
driver/kubernetes/context/load_test.go
Normal file
23
driver/kubernetes/context/load_test.go
Normal file
@@ -0,0 +1,23 @@
|
||||
package context
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
|
||||
"github.com/docker/cli/cli/command"
|
||||
"github.com/docker/cli/cli/config/configfile"
|
||||
cliflags "github.com/docker/cli/cli/flags"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestDefaultContextInitializer(t *testing.T) {
|
||||
cli, err := command.NewDockerCli()
|
||||
require.NoError(t, err)
|
||||
os.Setenv("KUBECONFIG", "./fixtures/test-kubeconfig")
|
||||
defer os.Unsetenv("KUBECONFIG")
|
||||
ctx, err := command.ResolveDefaultContext(&cliflags.CommonOptions{}, &configfile.ConfigFile{}, command.DefaultContextStoreConfig(), cli.Err())
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "default", ctx.Meta.Name)
|
||||
assert.Equal(t, "zoinx", ctx.Meta.Endpoints[KubernetesEndpoint].(EndpointMeta).DefaultNamespace)
|
||||
}
|
@@ -1,4 +1,4 @@
|
||||
package kubernetes
|
||||
package context
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
10
go.mod
10
go.mod
@@ -8,12 +8,11 @@ require (
|
||||
github.com/bugsnag/panicwrap v1.2.0 // indirect
|
||||
github.com/cenkalti/backoff v2.1.1+incompatible // indirect
|
||||
github.com/cloudflare/cfssl v0.0.0-20181213083726-b94e044bb51e // indirect
|
||||
github.com/compose-spec/compose-go v1.0.8
|
||||
github.com/compose-spec/compose-go v1.2.1
|
||||
github.com/containerd/console v1.0.3
|
||||
github.com/containerd/containerd v1.6.0
|
||||
github.com/containerd/containerd v1.6.1
|
||||
github.com/docker/cli v20.10.12+incompatible
|
||||
github.com/docker/cli-docs-tool v0.4.0
|
||||
github.com/docker/compose-on-kubernetes v0.4.19-0.20190128150448-356b2919c496 // indirect
|
||||
github.com/docker/distribution v2.8.0+incompatible
|
||||
github.com/docker/docker v20.10.7+incompatible
|
||||
github.com/docker/go v1.5.1-1.0.20160303222718-d30aec9fd63c // indirect
|
||||
@@ -31,7 +30,7 @@ require (
|
||||
github.com/jinzhu/gorm v1.9.2 // indirect
|
||||
github.com/jinzhu/inflection v0.0.0-20180308033659-04140366298a // indirect
|
||||
github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 // indirect
|
||||
github.com/moby/buildkit v0.10.0-rc1.0.20220225190804-0692ad797425
|
||||
github.com/moby/buildkit v0.10.1-0.20220403220257-10e6f94bf90d
|
||||
github.com/morikuni/aec v1.0.0
|
||||
github.com/opencontainers/go-digest v1.0.0
|
||||
github.com/opencontainers/image-spec v1.0.2-0.20211117181255-693428a734f5
|
||||
@@ -43,6 +42,7 @@ require (
|
||||
github.com/spf13/pflag v1.0.5
|
||||
github.com/stretchr/testify v1.7.0
|
||||
github.com/theupdateframework/notary v0.6.1 // indirect
|
||||
github.com/tonistiigi/fsutil v0.0.0-20220315205639-9ed612626da3 // indirect
|
||||
github.com/zclconf/go-cty v1.10.0
|
||||
go.opentelemetry.io/otel v1.4.1
|
||||
go.opentelemetry.io/otel/trace v1.4.1
|
||||
@@ -57,7 +57,7 @@ require (
|
||||
)
|
||||
|
||||
replace (
|
||||
github.com/docker/cli => github.com/docker/cli v20.10.3-0.20210702143511-f782d1355eff+incompatible
|
||||
github.com/docker/cli => github.com/docker/cli v20.10.3-0.20220226190722-8667ccd1124c+incompatible
|
||||
github.com/docker/docker => github.com/docker/docker v20.10.3-0.20220121014307-40bb9831756f+incompatible
|
||||
k8s.io/api => k8s.io/api v0.22.4
|
||||
k8s.io/apimachinery => k8s.io/apimachinery v0.22.4
|
||||
|
39
go.sum
39
go.sum
@@ -280,10 +280,8 @@ github.com/cockroachdb/datadriven v0.0.0-20200714090401-bf6692d28da5/go.mod h1:h
|
||||
github.com/cockroachdb/errors v1.2.4/go.mod h1:rQD95gz6FARkaKkQXUksEje/d9a6wBJoCr5oaCLELYA=
|
||||
github.com/cockroachdb/logtags v0.0.0-20190617123548-eb05cc24525f/go.mod h1:i/u985jwjWRlyHXQbwatDASoW0RMlZ/3i9yJHE2xLkI=
|
||||
github.com/codahale/hdrhistogram v0.0.0-20160425231609-f8ad88b59a58/go.mod h1:sE/e/2PUdi/liOCUjSTXgM1o87ZssimdTWN964YiIeI=
|
||||
github.com/compose-spec/compose-go v1.0.8 h1:fgT7mYYu5Sp37i2lUIAAvwJpkAHk6dP5ITHy/LlutUk=
|
||||
github.com/compose-spec/compose-go v1.0.8/go.mod h1:REnCbBugoIdHB7S1sfkN/aJ7AJpNApGNjNiVjA9L8x4=
|
||||
github.com/compose-spec/godotenv v1.1.1 h1:lp+WpAInnw06YN9sV/XLUOV/9z4C+6wjJdWlrdVac7o=
|
||||
github.com/compose-spec/godotenv v1.1.1/go.mod h1:zF/3BOa18Z24tts5qnO/E9YURQanJTBUf7nlcCTNsyc=
|
||||
github.com/compose-spec/compose-go v1.2.1 h1:8+DAP7Mt/Ohl5y6YbZdilLMvIhMxvuSZcNZyywjQmJE=
|
||||
github.com/compose-spec/compose-go v1.2.1/go.mod h1:pAy7Mikpeft4pxkFU565/DRHEbDfR84G6AQuiL+Hdg8=
|
||||
github.com/containerd/aufs v0.0.0-20200908144142-dab0cbea06f4/go.mod h1:nukgQABAEopAHvB6j7cnP5zJ+/3aVcE7hCYqvIwAHyE=
|
||||
github.com/containerd/aufs v0.0.0-20201003224125-76a6863f2989/go.mod h1:AkGGQs9NM2vtYHaUen+NljV0/baGCAPELGm2q9ZXpWU=
|
||||
github.com/containerd/aufs v0.0.0-20210316121734-20793ff83c97/go.mod h1:kL5kd6KM5TzQjR79jljyi4olc1Vrx6XBlcyj3gNv2PU=
|
||||
@@ -326,8 +324,8 @@ github.com/containerd/containerd v1.5.0-rc.0/go.mod h1:V/IXoMqNGgBlabz3tHD2TWDoT
|
||||
github.com/containerd/containerd v1.5.1/go.mod h1:0DOxVqwDy2iZvrZp2JUx/E+hS0UNTVn7dJnIOwtYR4g=
|
||||
github.com/containerd/containerd v1.5.7/go.mod h1:gyvv6+ugqY25TiXxcZC3L5yOeYgEw0QMhscqVp1AR9c=
|
||||
github.com/containerd/containerd v1.5.8/go.mod h1:YdFSv5bTFLpG2HIYmfqDpSYYTDX+mc5qtSuYx1YUb/s=
|
||||
github.com/containerd/containerd v1.6.0 h1:CLa12ZcV0d2ZTRKq1ssioeJpTnPJBMyndpEKA+UtzJg=
|
||||
github.com/containerd/containerd v1.6.0/go.mod h1:1nJz5xCZPusx6jJU8Frfct988y0NpumIq9ODB0kLtoE=
|
||||
github.com/containerd/containerd v1.6.1 h1:oa2uY0/0G+JX4X7hpGCYvkp9FjUancz56kSNnb1sG3o=
|
||||
github.com/containerd/containerd v1.6.1/go.mod h1:1nJz5xCZPusx6jJU8Frfct988y0NpumIq9ODB0kLtoE=
|
||||
github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
|
||||
github.com/containerd/continuity v0.0.0-20190815185530-f2a389ac0a02/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
|
||||
github.com/containerd/continuity v0.0.0-20191127005431-f65d91d395eb/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
|
||||
@@ -363,10 +361,9 @@ github.com/containerd/nri v0.0.0-20201007170849-eb1350a75164/go.mod h1:+2wGSDGFY
|
||||
github.com/containerd/nri v0.0.0-20210316161719-dbaa18c31c14/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY=
|
||||
github.com/containerd/nri v0.1.0/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY=
|
||||
github.com/containerd/stargz-snapshotter v0.0.0-20201027054423-3a04e4c2c116/go.mod h1:o59b3PCKVAf9jjiKtCc/9hLAd+5p/rfhBfm6aBcTEr4=
|
||||
github.com/containerd/stargz-snapshotter v0.11.2-0.20220223051521-b1ce4c8d8294/go.mod h1:3PJpOcsh0wqu+p/U/HBbUnG6wzIuw5hP6oRn1hXzQhc=
|
||||
github.com/containerd/stargz-snapshotter v0.11.2/go.mod h1:HfhsbZ98KIoqA2GLmibTpRwMF/lq3utZ0ElV9ARqU7M=
|
||||
github.com/containerd/stargz-snapshotter/estargz v0.4.1/go.mod h1:x7Q9dg9QYb4+ELgxmo4gBUeJB0tl5dqH1Sdz0nJU1QM=
|
||||
github.com/containerd/stargz-snapshotter/estargz v0.11.1/go.mod h1:6VoPcf4M1wvnogWxqc4TqBWWErCS+R+ucnPZId2VbpQ=
|
||||
github.com/containerd/stargz-snapshotter/estargz v0.11.2-0.20220223051521-b1ce4c8d8294/go.mod h1:6VoPcf4M1wvnogWxqc4TqBWWErCS+R+ucnPZId2VbpQ=
|
||||
github.com/containerd/stargz-snapshotter/estargz v0.11.2/go.mod h1:rjbdAXaytDSIrAy2WAy2kUrJ4ehzDS0eUQLlIb5UCY0=
|
||||
github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
|
||||
github.com/containerd/ttrpc v0.0.0-20190828172938-92c8520ef9f8/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
|
||||
github.com/containerd/ttrpc v0.0.0-20191028202541-4f1b8fe65a5c/go.mod h1:LPm1u0xBw8r8NOKoOdNMeVHSawSsltak+Ihv+etqsE8=
|
||||
@@ -443,12 +440,10 @@ github.com/dimchansky/utfbom v1.1.1/go.mod h1:SxdoEBH5qIqFocHMyGOXVAybYJdr71b1Q/
|
||||
github.com/distribution/distribution/v3 v3.0.0-20210316161203-a01c71e2477e h1:n81KvOMrLZa+VWHwST7dun9f0G98X3zREHS1ztYzZKU=
|
||||
github.com/distribution/distribution/v3 v3.0.0-20210316161203-a01c71e2477e/go.mod h1:xpWTC2KnJMiDLkoawhsPQcXjvwATEBcbq0xevG2YR9M=
|
||||
github.com/dnaeon/go-vcr v1.0.1/go.mod h1:aBB1+wY4s93YsC3HHjMBMrwTj2R9FHDzUr9KyGc8n1E=
|
||||
github.com/docker/cli v20.10.3-0.20210702143511-f782d1355eff+incompatible h1:CaaxCD/l9Dxogu6lxf7AQautlv3sHULrasPadayp0fM=
|
||||
github.com/docker/cli v20.10.3-0.20210702143511-f782d1355eff+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
|
||||
github.com/docker/cli v20.10.3-0.20220226190722-8667ccd1124c+incompatible h1:0Kr33P67t7g42iFMU3uzizk+bek+a5MThEqmt4bqVGM=
|
||||
github.com/docker/cli v20.10.3-0.20220226190722-8667ccd1124c+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
|
||||
github.com/docker/cli-docs-tool v0.4.0 h1:MdfKoErGEbFqIxQ8an9BsZ+YzKUGd58RBVkV+Q82GPo=
|
||||
github.com/docker/cli-docs-tool v0.4.0/go.mod h1:rgW5KKdNpLMBIuH4WQ/1RNh38nH+/Ay5jgL4P0ZMPpY=
|
||||
github.com/docker/compose-on-kubernetes v0.4.19-0.20190128150448-356b2919c496 h1:90ytrX1dbzL7Uf/hHiuWwvywC+gikHv4hkAy4CwRTbs=
|
||||
github.com/docker/compose-on-kubernetes v0.4.19-0.20190128150448-356b2919c496/go.mod h1:iT2pYfi580XlpaV4KmK0T6+4/9+XoKmk/fhoDod1emE=
|
||||
github.com/docker/distribution v0.0.0-20190905152932-14b96e55d84c/go.mod h1:0+TTO4EOBfRPhZXAeF1Vu+W3hHZ8eLp8PgKVZlcvtFY=
|
||||
github.com/docker/distribution v2.6.0-rc.1.0.20180327202408-83389a148052+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||
github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||
@@ -883,8 +878,8 @@ github.com/klauspost/compress v1.4.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0
|
||||
github.com/klauspost/compress v1.4.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
|
||||
github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
|
||||
github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
|
||||
github.com/klauspost/compress v1.14.3 h1:DQv1WP+iS4srNjibdnHtqu8JNWCDMluj5NzPnFJsnvk=
|
||||
github.com/klauspost/compress v1.14.3/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
|
||||
github.com/klauspost/compress v1.15.0 h1:xqfchp4whNFxn5A4XFyyYtitiWI8Hy5EW59jEwcyL6U=
|
||||
github.com/klauspost/compress v1.15.0/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
|
||||
github.com/klauspost/cpuid v0.0.0-20180405133222-e7e905edc00e/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
|
||||
github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
@@ -969,12 +964,12 @@ github.com/mitchellh/mapstructure v0.0.0-20160808181253-ca63d7c062ee/go.mod h1:F
|
||||
github.com/mitchellh/mapstructure v1.1.2/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
|
||||
github.com/mitchellh/mapstructure v1.3.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
github.com/mitchellh/mapstructure v1.4.1/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
github.com/mitchellh/mapstructure v1.4.2 h1:6h7AQ0yhTcIsmFmnAwQls75jp2Gzs4iB8W7pjMO+rqo=
|
||||
github.com/mitchellh/mapstructure v1.4.2/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
github.com/mitchellh/mapstructure v1.4.3 h1:OVowDSCllw/YjdLkam3/sm7wEtOy59d8ndGgCcyj8cs=
|
||||
github.com/mitchellh/mapstructure v1.4.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A=
|
||||
github.com/moby/buildkit v0.8.1/go.mod h1:/kyU1hKy/aYCuP39GZA9MaKioovHku57N6cqlKZIaiQ=
|
||||
github.com/moby/buildkit v0.10.0-rc1.0.20220225190804-0692ad797425 h1:4muA0f6pWdKTHPJvdwWGTYYu+whyg39gqwTDVcV4i/U=
|
||||
github.com/moby/buildkit v0.10.0-rc1.0.20220225190804-0692ad797425/go.mod h1:vG6thoKi/B564lEn03YQQc1rbOlAohvgD3/X7Mf9Wz0=
|
||||
github.com/moby/buildkit v0.10.1-0.20220403220257-10e6f94bf90d h1:6pLVBJO3V/lMegbVD5kh2QrpZwqS4ZrxEm/MyifCPaY=
|
||||
github.com/moby/buildkit v0.10.1-0.20220403220257-10e6f94bf90d/go.mod h1:WvwAZv8aRScHkqc/+X46cRC2CKMKpqcaX+pRvUTtPes=
|
||||
github.com/moby/locker v1.0.1 h1:fOXqR41zeveg4fFODix+1Ch4mj/gT0NE1XJbp/epuBg=
|
||||
github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc=
|
||||
github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8=
|
||||
@@ -1289,8 +1284,9 @@ github.com/tmc/grpc-websocket-proxy v0.0.0-20201229170055-e5319fda7802/go.mod h1
|
||||
github.com/tommy-muehle/go-mnd v1.1.1/go.mod h1:dSUh0FtTP8VhvkL1S+gUR1OKd9ZnSaozuI6r3m6wOig=
|
||||
github.com/tommy-muehle/go-mnd v1.3.1-0.20200224220436-e6f9a994e8fa/go.mod h1:dSUh0FtTP8VhvkL1S+gUR1OKd9ZnSaozuI6r3m6wOig=
|
||||
github.com/tonistiigi/fsutil v0.0.0-20201103201449-0834f99b7b85/go.mod h1:a7cilN64dG941IOXfhJhlH0qB92hxJ9A1ewrdUmJ6xo=
|
||||
github.com/tonistiigi/fsutil v0.0.0-20220115021204-b19f7f9cb274 h1:wbyZxD6IPFp0sl5uscMOJRsz5UKGFiNiD16e+MVfKZY=
|
||||
github.com/tonistiigi/fsutil v0.0.0-20220115021204-b19f7f9cb274/go.mod h1:oPAfvw32vlUJSjyDcQ3Bu0nb2ON2B+G0dtVN/SZNJiA=
|
||||
github.com/tonistiigi/fsutil v0.0.0-20220315205639-9ed612626da3 h1:T1pEe+WB3SCPVAfVquvfPfagKZU2Z8c1OP3SuGB+id0=
|
||||
github.com/tonistiigi/fsutil v0.0.0-20220315205639-9ed612626da3/go.mod h1:oPAfvw32vlUJSjyDcQ3Bu0nb2ON2B+G0dtVN/SZNJiA=
|
||||
github.com/tonistiigi/go-actions-cache v0.0.0-20211202175116-9642704158ff/go.mod h1:qqvyZqkfwkoJuPU/bw61bItaoO0SJ8YSW0vSVRRvsRg=
|
||||
github.com/tonistiigi/go-archvariant v1.0.0/go.mod h1:TxFmO5VS6vMq2kvs3ht04iPXtu2rUT/erOnGFYfk5Ho=
|
||||
github.com/tonistiigi/units v0.0.0-20180711220420-6950e57a87ea h1:SXhTLE6pb6eld/v/cCndK0AMpt1wiVFb/YYmqB3/QG0=
|
||||
@@ -2044,8 +2040,9 @@ gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C
|
||||
gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=
|
||||
gotest.tools v2.2.0+incompatible/go.mod h1:DsYFclhRJ6vuDpmuTbkuFWG+y2sxOXAzmJt81HFBacw=
|
||||
gotest.tools/v3 v3.0.2/go.mod h1:3SzNCllyD9/Y+b5r9JIKQ474KzkZyqLqEfYqMsX94Bk=
|
||||
gotest.tools/v3 v3.0.3 h1:4AuOwCGf4lLR9u3YOe2awrHygurzhO/HeQ6laiA6Sx0=
|
||||
gotest.tools/v3 v3.0.3/go.mod h1:Z7Lb0S5l+klDB31fvDQX8ss/FlKDxtlFlw3Oa8Ymbl8=
|
||||
gotest.tools/v3 v3.1.0 h1:rVV8Tcg/8jHUkPUorwjaMTtemIMVXfIPKiOqnhEhakk=
|
||||
gotest.tools/v3 v3.1.0/go.mod h1:fHy7eyTmJFO5bQbUsEGQ1v4m2J3Jz9eWL54TP2/ZuYQ=
|
||||
grpc.go4.org v0.0.0-20170609214715-11d0a25b4919/go.mod h1:77eQGdRu53HpSqPFJFmuJdjuHRquDANNeA4x7B8WQ9o=
|
||||
honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
honnef.co/go/tools v0.0.0-20180920025451-e3ad64cb4ed3/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
|
@@ -7,6 +7,7 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"sort"
|
||||
"strings"
|
||||
"text/tabwriter"
|
||||
"text/template"
|
||||
@@ -15,8 +16,10 @@ import (
|
||||
"github.com/containerd/containerd/platforms"
|
||||
"github.com/docker/distribution/reference"
|
||||
binfotypes "github.com/moby/buildkit/util/buildinfo/types"
|
||||
"github.com/opencontainers/go-digest"
|
||||
ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
const defaultPfx = " "
|
||||
@@ -109,18 +112,28 @@ func (p *Printer) Print(raw bool, out io.Writer) error {
|
||||
|
||||
imageconfigs := make(map[string]*ocispecs.Image)
|
||||
buildinfos := make(map[string]*binfotypes.BuildInfo)
|
||||
for _, pform := range p.platforms {
|
||||
img, dtic, err := p.getImageConfig(&pform)
|
||||
|
||||
eg, _ := errgroup.WithContext(p.ctx)
|
||||
for _, platform := range p.platforms {
|
||||
func(platform ocispecs.Platform) {
|
||||
eg.Go(func() error {
|
||||
img, dtic, err := p.getImageConfig(&platform)
|
||||
if err != nil {
|
||||
return err
|
||||
} else if img != nil {
|
||||
imageconfigs[platforms.Format(pform)] = img
|
||||
imageconfigs[platforms.Format(platform)] = img
|
||||
}
|
||||
if bi, err := p.getBuildInfo(dtic); err != nil {
|
||||
return err
|
||||
} else if bi != nil {
|
||||
buildinfos[platforms.Format(pform)] = bi
|
||||
buildinfos[platforms.Format(platform)] = bi
|
||||
}
|
||||
return nil
|
||||
})
|
||||
}(platform)
|
||||
}
|
||||
if err := eg.Wait(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
format := tpl.Root.String()
|
||||
@@ -130,7 +143,21 @@ func (p *Printer) Print(raw bool, out io.Writer) error {
|
||||
case images.MediaTypeDockerSchema2Manifest, ocispecs.MediaTypeImageManifest:
|
||||
manifest = p.manifest
|
||||
case images.MediaTypeDockerSchema2ManifestList, ocispecs.MediaTypeImageIndex:
|
||||
manifest = p.index
|
||||
manifest = struct {
|
||||
SchemaVersion int `json:"schemaVersion"`
|
||||
MediaType string `json:"mediaType,omitempty"`
|
||||
Digest digest.Digest `json:"digest"`
|
||||
Size int64 `json:"size"`
|
||||
Manifests []ocispecs.Descriptor `json:"manifests"`
|
||||
Annotations map[string]string `json:"annotations,omitempty"`
|
||||
}{
|
||||
SchemaVersion: p.index.Versioned.SchemaVersion,
|
||||
MediaType: p.index.MediaType,
|
||||
Digest: p.manifest.Digest,
|
||||
Size: p.manifest.Size,
|
||||
Manifests: p.index.Manifests,
|
||||
Annotations: p.index.Annotations,
|
||||
}
|
||||
}
|
||||
|
||||
switch {
|
||||
@@ -164,13 +191,13 @@ func (p *Printer) Print(raw bool, out io.Writer) error {
|
||||
BuildInfo: buildinfos,
|
||||
})
|
||||
}
|
||||
var imageconfig *ocispecs.Image
|
||||
for _, ic := range imageconfigs {
|
||||
imageconfig = ic
|
||||
var ic *ocispecs.Image
|
||||
for _, v := range imageconfigs {
|
||||
ic = v
|
||||
}
|
||||
var buildinfo *binfotypes.BuildInfo
|
||||
for _, bi := range buildinfos {
|
||||
buildinfo = bi
|
||||
var bi *binfotypes.BuildInfo
|
||||
for _, v := range buildinfos {
|
||||
bi = v
|
||||
}
|
||||
return tpl.Execute(out, struct {
|
||||
Name string `json:"name,omitempty"`
|
||||
@@ -180,8 +207,8 @@ func (p *Printer) Print(raw bool, out io.Writer) error {
|
||||
}{
|
||||
Name: p.name,
|
||||
Manifest: manifest,
|
||||
Image: imageconfig,
|
||||
BuildInfo: buildinfo,
|
||||
Image: ic,
|
||||
BuildInfo: bi,
|
||||
})
|
||||
}
|
||||
|
||||
@@ -230,14 +257,22 @@ func (p *Printer) printManifestList(out io.Writer) error {
|
||||
}
|
||||
|
||||
func (p *Printer) printBuildInfos(bis map[string]*binfotypes.BuildInfo, out io.Writer) error {
|
||||
if len(bis) == 1 {
|
||||
if len(bis) == 0 {
|
||||
return nil
|
||||
} else if len(bis) == 1 {
|
||||
for _, bi := range bis {
|
||||
return p.printBuildInfo(bi, "", out)
|
||||
}
|
||||
}
|
||||
for pform, bi := range bis {
|
||||
var pkeys []string
|
||||
for _, pform := range p.platforms {
|
||||
pkeys = append(pkeys, platforms.Format(pform))
|
||||
}
|
||||
sort.Strings(pkeys)
|
||||
for _, platform := range pkeys {
|
||||
bi := bis[platform]
|
||||
w := tabwriter.NewWriter(out, 0, 0, 1, ' ', 0)
|
||||
_, _ = fmt.Fprintf(w, "\t\nPlatform:\t%s\t\n", pform)
|
||||
_, _ = fmt.Fprintf(w, "\t\nPlatform:\t%s\t\n", platform)
|
||||
_ = w.Flush()
|
||||
if err := p.printBuildInfo(bi, "", out); err != nil {
|
||||
return err
|
||||
|
@@ -8,14 +8,14 @@ import (
|
||||
|
||||
func WithPrefix(w Writer, pfx string, force bool) Writer {
|
||||
return &prefixed{
|
||||
main: w,
|
||||
Writer: w,
|
||||
pfx: pfx,
|
||||
force: force,
|
||||
}
|
||||
}
|
||||
|
||||
type prefixed struct {
|
||||
main Writer
|
||||
Writer
|
||||
pfx string
|
||||
force bool
|
||||
}
|
||||
@@ -26,7 +26,7 @@ func (p *prefixed) Write(v *client.SolveStatus) {
|
||||
v.Name = addPrefix(p.pfx, v.Name)
|
||||
}
|
||||
}
|
||||
p.main.Write(v)
|
||||
p.Writer.Write(v)
|
||||
}
|
||||
|
||||
func addPrefix(pfx, name string) string {
|
||||
|
@@ -5,11 +5,13 @@ import (
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"sync"
|
||||
|
||||
"github.com/containerd/console"
|
||||
"github.com/docker/buildx/util/logutil"
|
||||
"github.com/moby/buildkit/client"
|
||||
"github.com/moby/buildkit/util/progress/progressui"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
@@ -25,6 +27,8 @@ type Printer struct {
|
||||
done <-chan struct{}
|
||||
err error
|
||||
warnings []client.VertexWarning
|
||||
logMu sync.Mutex
|
||||
logSourceMap map[digest.Digest]interface{}
|
||||
}
|
||||
|
||||
func (p *Printer) Wait() error {
|
||||
@@ -41,6 +45,31 @@ func (p *Printer) Warnings() []client.VertexWarning {
|
||||
return p.warnings
|
||||
}
|
||||
|
||||
func (p *Printer) ValidateLogSource(dgst digest.Digest, v interface{}) bool {
|
||||
p.logMu.Lock()
|
||||
defer p.logMu.Unlock()
|
||||
src, ok := p.logSourceMap[dgst]
|
||||
if ok {
|
||||
if src == v {
|
||||
return true
|
||||
}
|
||||
} else {
|
||||
p.logSourceMap[dgst] = v
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (p *Printer) ClearLogSource(v interface{}) {
|
||||
p.logMu.Lock()
|
||||
defer p.logMu.Unlock()
|
||||
for d := range p.logSourceMap {
|
||||
if p.logSourceMap[d] == v {
|
||||
delete(p.logSourceMap, d)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func NewPrinter(ctx context.Context, w io.Writer, out console.File, mode string) *Printer {
|
||||
statusCh := make(chan *client.SolveStatus)
|
||||
doneCh := make(chan struct{})
|
||||
@@ -48,6 +77,7 @@ func NewPrinter(ctx context.Context, w io.Writer, out console.File, mode string)
|
||||
pw := &Printer{
|
||||
status: statusCh,
|
||||
done: doneCh,
|
||||
logSourceMap: map[digest.Digest]interface{}{},
|
||||
}
|
||||
|
||||
if v := os.Getenv("BUILDKIT_PROGRESS"); v != "" && mode == PrinterModeAuto {
|
||||
|
@@ -10,6 +10,8 @@ import (
|
||||
|
||||
type Writer interface {
|
||||
Write(*client.SolveStatus)
|
||||
ValidateLogSource(digest.Digest, interface{}) bool
|
||||
ClearLogSource(interface{})
|
||||
}
|
||||
|
||||
func Write(w Writer, name string, f func() error) {
|
||||
@@ -47,8 +49,20 @@ func NewChannel(w Writer) (chan *client.SolveStatus, chan struct{}) {
|
||||
v, ok := <-ch
|
||||
if !ok {
|
||||
close(done)
|
||||
w.ClearLogSource(done)
|
||||
return
|
||||
}
|
||||
|
||||
if len(v.Logs) > 0 {
|
||||
logs := make([]*client.VertexLog, 0, len(v.Logs))
|
||||
for _, l := range v.Logs {
|
||||
if w.ValidateLogSource(l.Vertex, done) {
|
||||
logs = append(logs, l)
|
||||
}
|
||||
}
|
||||
v.Logs = logs
|
||||
}
|
||||
|
||||
w.Write(v)
|
||||
}
|
||||
}()
|
||||
|
23
vendor/github.com/compose-spec/compose-go/consts/consts.go
generated
vendored
Normal file
23
vendor/github.com/compose-spec/compose-go/consts/consts.go
generated
vendored
Normal file
@@ -0,0 +1,23 @@
|
||||
/*
|
||||
Copyright 2020 The Compose Specification Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package consts
|
||||
|
||||
const (
|
||||
ComposeProjectName = "COMPOSE_PROJECT_NAME"
|
||||
ComposePathSeparator = "COMPOSE_PATH_SEPARATOR"
|
||||
ComposeFilePath = "COMPOSE_FILE"
|
||||
)
|
@@ -20,4 +20,3 @@ NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
||||
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
||||
|
@@ -1,4 +1,4 @@
|
||||
// Package godotenv is a go port of the ruby dotenv library (https://github.com/bkeepers/dotenv)
|
||||
// Package dotenv is a go port of the ruby dotenv library (https://github.com/bkeepers/dotenv)
|
||||
//
|
||||
// Examples/readme can be found on the github page at https://github.com/joho/godotenv
|
||||
//
|
||||
@@ -11,7 +11,7 @@
|
||||
// godotenv.Load()
|
||||
//
|
||||
// and all the env vars declared in .env will be available through os.Getenv("SOME_ENV_VAR")
|
||||
package godotenv
|
||||
package dotenv
|
||||
|
||||
import (
|
||||
"errors"
|
@@ -1,4 +1,4 @@
|
||||
package godotenv
|
||||
package dotenv
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
@@ -41,10 +41,10 @@ func parseBytes(src []byte, out map[string]string, lookupFn LookupFn) error {
|
||||
value, ok := lookupFn(key)
|
||||
if ok {
|
||||
out[key] = value
|
||||
}
|
||||
cutset = left
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
value, left, err := extractVarValue(left, out, lookupFn)
|
||||
if err != nil {
|
||||
@@ -132,14 +132,14 @@ func extractVarValue(src []byte, envMap map[string]string, lookupFn LookupFn) (v
|
||||
// unquoted value - read until new line
|
||||
end := bytes.IndexFunc(src, isNewLine)
|
||||
var rest []byte
|
||||
var value string
|
||||
if end < 0 {
|
||||
value := strings.TrimRightFunc(string(src), unicode.IsSpace)
|
||||
rest = nil
|
||||
return expandVariables(value, envMap, lookupFn), rest, nil
|
||||
value := strings.Split(string(src), "#")[0] // Remove inline comments on unquoted lines
|
||||
value = strings.TrimRightFunc(value, unicode.IsSpace)
|
||||
return expandVariables(value, envMap, lookupFn), nil, nil
|
||||
}
|
||||
|
||||
value = strings.TrimRightFunc(string(src[0:end]), unicode.IsSpace)
|
||||
value := strings.Split(string(src[0:end]), "#")[0]
|
||||
value = strings.TrimRightFunc(value, unicode.IsSpace)
|
||||
rest = src[end:]
|
||||
return expandVariables(value, envMap, lookupFn), rest, nil
|
||||
}
|
||||
@@ -228,7 +228,6 @@ func isSpace(r rune) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
|
||||
// isNewLine reports whether the rune is a new line character
|
||||
func isNewLine(r rune) bool {
|
||||
return r == '\n'
|
2
vendor/github.com/compose-spec/compose-go/interpolation/interpolation.go
generated
vendored
2
vendor/github.com/compose-spec/compose-go/interpolation/interpolation.go
generated
vendored
@@ -115,7 +115,7 @@ func newPathError(path Path, err error) error {
|
||||
return nil
|
||||
case *template.InvalidTemplateError:
|
||||
return errors.Errorf(
|
||||
"invalid interpolation format for %s: %#v. You may need to escape any $ with another $.",
|
||||
"invalid interpolation format for %s: %#v. You may need to escape any $ with another $",
|
||||
path, err.Template)
|
||||
default:
|
||||
return errors.Wrapf(err, "error while interpolating %s", path)
|
||||
|
7
vendor/github.com/compose-spec/compose-go/loader/full-example.yml
generated
vendored
7
vendor/github.com/compose-spec/compose-go/loader/full-example.yml
generated
vendored
@@ -1,3 +1,4 @@
|
||||
name: Full_Example_project_name
|
||||
services:
|
||||
foo:
|
||||
|
||||
@@ -6,6 +7,8 @@ services:
|
||||
dockerfile: Dockerfile
|
||||
args:
|
||||
foo: bar
|
||||
ssh:
|
||||
- default
|
||||
target: foo
|
||||
network: foo
|
||||
cache_from:
|
||||
@@ -85,6 +88,10 @@ services:
|
||||
- spread: node.labels.az
|
||||
endpoint_mode: dnsrr
|
||||
|
||||
device_cgroup_rules:
|
||||
- "c 1:3 mr"
|
||||
- "a 7:* rmw"
|
||||
|
||||
devices:
|
||||
- "/dev/ttyUSB0:/dev/ttyUSB0"
|
||||
|
||||
|
14
vendor/github.com/compose-spec/compose-go/loader/interpolate.go
generated
vendored
14
vendor/github.com/compose-spec/compose-go/loader/interpolate.go
generated
vendored
@@ -21,7 +21,6 @@ import (
|
||||
"strings"
|
||||
|
||||
interp "github.com/compose-spec/compose-go/interpolation"
|
||||
"github.com/compose-spec/compose-go/types"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
@@ -53,7 +52,6 @@ var interpolateTypeCastMapping = map[interp.Path]interp.Cast{
|
||||
servicePath("oom_score_adj"): toInt64,
|
||||
servicePath("pids_limit"): toInt64,
|
||||
servicePath("ports", interp.PathMatchList, "target"): toInt,
|
||||
servicePath("ports", interp.PathMatchList, "published"): toInt,
|
||||
servicePath("privileged"): toBoolean,
|
||||
servicePath("read_only"): toBoolean,
|
||||
servicePath("scale"): toInt,
|
||||
@@ -94,19 +92,11 @@ func toInt64(value string) (interface{}, error) {
|
||||
}
|
||||
|
||||
func toUnitBytes(value string) (interface{}, error) {
|
||||
i, err := strconv.ParseInt(value, 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return types.UnitBytes(i), nil
|
||||
return transformSize(value)
|
||||
}
|
||||
|
||||
func toDuration(value string) (interface{}, error) {
|
||||
i, err := strconv.ParseInt(value, 10, 64)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return types.Duration(i), nil
|
||||
return transformStringToDuration(value)
|
||||
}
|
||||
|
||||
func toFloat(value string) (interface{}, error) {
|
||||
|
128
vendor/github.com/compose-spec/compose-go/loader/loader.go
generated
vendored
128
vendor/github.com/compose-spec/compose-go/loader/loader.go
generated
vendored
@@ -23,15 +23,18 @@ import (
|
||||
"path"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/compose-spec/compose-go/consts"
|
||||
"github.com/compose-spec/compose-go/dotenv"
|
||||
interp "github.com/compose-spec/compose-go/interpolation"
|
||||
"github.com/compose-spec/compose-go/schema"
|
||||
"github.com/compose-spec/compose-go/template"
|
||||
"github.com/compose-spec/compose-go/types"
|
||||
"github.com/compose-spec/godotenv"
|
||||
"github.com/docker/go-units"
|
||||
"github.com/mattn/go-shellwords"
|
||||
"github.com/mitchellh/mapstructure"
|
||||
@@ -58,8 +61,19 @@ type Options struct {
|
||||
Interpolate *interp.Options
|
||||
// Discard 'env_file' entries after resolving to 'environment' section
|
||||
discardEnvFiles bool
|
||||
// Set project name
|
||||
Name string
|
||||
// Set project projectName
|
||||
projectName string
|
||||
// Indicates when the projectName was imperatively set or guessed from path
|
||||
projectNameImperativelySet bool
|
||||
}
|
||||
|
||||
func (o *Options) SetProjectName(name string, imperativelySet bool) {
|
||||
o.projectName = normalizeProjectName(name)
|
||||
o.projectNameImperativelySet = imperativelySet
|
||||
}
|
||||
|
||||
func (o Options) GetProjectName() (string, bool) {
|
||||
return o.projectName, o.projectNameImperativelySet
|
||||
}
|
||||
|
||||
// serviceRef identifies a reference to a service. It's used to detect cyclic
|
||||
@@ -192,8 +206,17 @@ func Load(configDetails types.ConfigDetails, options ...func(*Options)) (*types.
|
||||
s.EnvFile = newEnvFiles
|
||||
}
|
||||
|
||||
projectName, projectNameImperativelySet := opts.GetProjectName()
|
||||
model.Name = normalizeProjectName(model.Name)
|
||||
if !projectNameImperativelySet && model.Name != "" {
|
||||
projectName = model.Name
|
||||
}
|
||||
|
||||
if projectName != "" {
|
||||
configDetails.Environment[consts.ComposeProjectName] = projectName
|
||||
}
|
||||
project := &types.Project{
|
||||
Name: opts.Name,
|
||||
Name: projectName,
|
||||
WorkingDir: configDetails.WorkingDir,
|
||||
Services: model.Services,
|
||||
Networks: model.Networks,
|
||||
@@ -221,6 +244,13 @@ func Load(configDetails types.ConfigDetails, options ...func(*Options)) (*types.
|
||||
return project, nil
|
||||
}
|
||||
|
||||
func normalizeProjectName(s string) string {
|
||||
r := regexp.MustCompile("[a-z0-9_-]")
|
||||
s = strings.ToLower(s)
|
||||
s = strings.Join(r.FindAllString(s, -1), "")
|
||||
return strings.TrimLeft(s, "_-")
|
||||
}
|
||||
|
||||
func parseConfig(b []byte, opts *Options) (map[string]interface{}, error) {
|
||||
yml, err := ParseYAML(b)
|
||||
if err != nil {
|
||||
@@ -254,7 +284,14 @@ func loadSections(filename string, config map[string]interface{}, configDetails
|
||||
cfg := types.Config{
|
||||
Filename: filename,
|
||||
}
|
||||
|
||||
name := ""
|
||||
if n, ok := config["name"]; ok {
|
||||
name, ok = n.(string)
|
||||
if !ok {
|
||||
return nil, errors.New("project name must be a string")
|
||||
}
|
||||
}
|
||||
cfg.Name = name
|
||||
cfg.Services, err = LoadServices(filename, getSection(config, "services"), configDetails.WorkingDir, configDetails.LookupEnv, opts)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -280,10 +317,6 @@ func loadSections(filename string, config map[string]interface{}, configDetails
|
||||
if len(extensions) > 0 {
|
||||
cfg.Extensions = extensions
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &cfg, nil
|
||||
}
|
||||
|
||||
@@ -357,6 +390,7 @@ func createTransformHook(additionalTransformers ...Transformer) mapstructure.Dec
|
||||
reflect.TypeOf(types.DependsOnConfig{}): transformDependsOnConfig,
|
||||
reflect.TypeOf(types.ExtendsConfig{}): transformExtendsConfig,
|
||||
reflect.TypeOf(types.DeviceRequest{}): transformServiceDeviceRequest,
|
||||
reflect.TypeOf(types.SSHConfig{}): transformSSHConfig,
|
||||
}
|
||||
|
||||
for _, transformer := range additionalTransformers {
|
||||
@@ -559,7 +593,7 @@ func resolveEnvironment(serviceConfig *types.ServiceConfig, workingDir string, l
|
||||
return err
|
||||
}
|
||||
defer file.Close()
|
||||
fileVars, err := godotenv.ParseWithLookup(file, godotenv.LookupFn(lookupEnv))
|
||||
fileVars, err := dotenv.ParseWithLookup(file, dotenv.LookupFn(lookupEnv))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -817,6 +851,10 @@ var transformServicePort TransformerFunc = func(data interface{}) (interface{},
|
||||
ports = append(ports, v)
|
||||
}
|
||||
case map[string]interface{}:
|
||||
published := value["published"]
|
||||
if v, ok := published.(int); ok {
|
||||
value["published"] = strconv.Itoa(v)
|
||||
}
|
||||
ports = append(ports, groupXFieldsIntoExtensions(value))
|
||||
default:
|
||||
return data, errors.Errorf("invalid type %T for port", value)
|
||||
@@ -891,7 +929,7 @@ var transformDependsOnConfig TransformerFunc = func(data interface{}) (interface
|
||||
for _, serviceIntf := range value {
|
||||
service, ok := serviceIntf.(string)
|
||||
if !ok {
|
||||
return data, errors.Errorf("invalid type %T for service depends_on element. Expected string.", value)
|
||||
return data, errors.Errorf("invalid type %T for service depends_on elementn, expected string", value)
|
||||
}
|
||||
transformed[service] = map[string]interface{}{"condition": types.ServiceConditionStarted}
|
||||
}
|
||||
@@ -941,6 +979,35 @@ var transformServiceNetworkMap TransformerFunc = func(value interface{}) (interf
|
||||
return value, nil
|
||||
}
|
||||
|
||||
var transformSSHConfig TransformerFunc = func(data interface{}) (interface{}, error) {
|
||||
switch value := data.(type) {
|
||||
case map[string]interface{}:
|
||||
var result []types.SSHKey
|
||||
for key, val := range value {
|
||||
if val == nil {
|
||||
val = ""
|
||||
}
|
||||
result = append(result, types.SSHKey{ID: key, Path: val.(string)})
|
||||
}
|
||||
return result, nil
|
||||
case []interface{}:
|
||||
var result []types.SSHKey
|
||||
for _, v := range value {
|
||||
key, val := transformValueToMapEntry(v.(string), "=", false)
|
||||
result = append(result, types.SSHKey{ID: key, Path: val.(string)})
|
||||
}
|
||||
return result, nil
|
||||
case string:
|
||||
if value == "" {
|
||||
value = "default"
|
||||
}
|
||||
key, val := transformValueToMapEntry(value, "=", false)
|
||||
result := []types.SSHKey{{ID: key, Path: val.(string)}}
|
||||
return result, nil
|
||||
}
|
||||
return nil, errors.Errorf("expected a sting, map or a list, got %T: %#v", data, data)
|
||||
}
|
||||
|
||||
var transformStringOrNumberList TransformerFunc = func(value interface{}) (interface{}, error) {
|
||||
list := value.([]interface{})
|
||||
result := make([]string, len(list))
|
||||
@@ -963,47 +1030,52 @@ var transformStringList TransformerFunc = func(data interface{}) (interface{}, e
|
||||
|
||||
func transformMappingOrListFunc(sep string, allowNil bool) TransformerFunc {
|
||||
return func(data interface{}) (interface{}, error) {
|
||||
return transformMappingOrList(data, sep, allowNil), nil
|
||||
return transformMappingOrList(data, sep, allowNil)
|
||||
}
|
||||
}
|
||||
|
||||
func transformListOrMappingFunc(sep string, allowNil bool) TransformerFunc {
|
||||
return func(data interface{}) (interface{}, error) {
|
||||
return transformListOrMapping(data, sep, allowNil), nil
|
||||
return transformListOrMapping(data, sep, allowNil)
|
||||
}
|
||||
}
|
||||
|
||||
func transformListOrMapping(listOrMapping interface{}, sep string, allowNil bool) interface{} {
|
||||
func transformListOrMapping(listOrMapping interface{}, sep string, allowNil bool) (interface{}, error) {
|
||||
switch value := listOrMapping.(type) {
|
||||
case map[string]interface{}:
|
||||
return toStringList(value, sep, allowNil)
|
||||
return toStringList(value, sep, allowNil), nil
|
||||
case []interface{}:
|
||||
return listOrMapping
|
||||
return listOrMapping, nil
|
||||
}
|
||||
panic(errors.Errorf("expected a map or a list, got %T: %#v", listOrMapping, listOrMapping))
|
||||
return nil, errors.Errorf("expected a map or a list, got %T: %#v", listOrMapping, listOrMapping)
|
||||
}
|
||||
|
||||
func transformMappingOrList(mappingOrList interface{}, sep string, allowNil bool) interface{} {
|
||||
func transformMappingOrList(mappingOrList interface{}, sep string, allowNil bool) (interface{}, error) {
|
||||
switch value := mappingOrList.(type) {
|
||||
case map[string]interface{}:
|
||||
return toMapStringString(value, allowNil)
|
||||
return toMapStringString(value, allowNil), nil
|
||||
case []interface{}:
|
||||
result := make(map[string]interface{})
|
||||
for _, value := range value {
|
||||
parts := strings.SplitN(value.(string), sep, 2)
|
||||
key, val := transformValueToMapEntry(value.(string), sep, allowNil)
|
||||
result[key] = val
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
return nil, errors.Errorf("expected a map or a list, got %T: %#v", mappingOrList, mappingOrList)
|
||||
}
|
||||
|
||||
func transformValueToMapEntry(value string, separator string, allowNil bool) (string, interface{}) {
|
||||
parts := strings.SplitN(value, separator, 2)
|
||||
key := parts[0]
|
||||
switch {
|
||||
case len(parts) == 1 && allowNil:
|
||||
result[key] = nil
|
||||
return key, nil
|
||||
case len(parts) == 1 && !allowNil:
|
||||
result[key] = ""
|
||||
return key, ""
|
||||
default:
|
||||
result[key] = parts[1]
|
||||
return key, parts[1]
|
||||
}
|
||||
}
|
||||
return result
|
||||
}
|
||||
panic(errors.Errorf("expected a map or a list, got %T: %#v", mappingOrList, mappingOrList))
|
||||
}
|
||||
|
||||
var transformShellCommand TransformerFunc = func(value interface{}) (interface{}, error) {
|
||||
@@ -1045,6 +1117,8 @@ var transformStringToDuration TransformerFunc = func(value interface{}) (interfa
|
||||
return value, err
|
||||
}
|
||||
return types.Duration(d), nil
|
||||
case types.Duration:
|
||||
return value, nil
|
||||
default:
|
||||
return value, errors.Errorf("invalid type %T for duration", value)
|
||||
}
|
||||
|
14
vendor/github.com/compose-spec/compose-go/loader/merge.go
generated
vendored
14
vendor/github.com/compose-spec/compose-go/loader/merge.go
generated
vendored
@@ -53,6 +53,7 @@ func merge(configs []*types.Config) (*types.Config, error) {
|
||||
base := configs[0]
|
||||
for _, override := range configs[1:] {
|
||||
var err error
|
||||
base.Name = mergeNames(base.Name, override.Name)
|
||||
base.Services, err = mergeServices(base.Services, override.Services)
|
||||
if err != nil {
|
||||
return base, errors.Wrapf(err, "cannot merge services from %s", override.Filename)
|
||||
@@ -81,6 +82,13 @@ func merge(configs []*types.Config) (*types.Config, error) {
|
||||
return base, nil
|
||||
}
|
||||
|
||||
func mergeNames(base, override string) string {
|
||||
if override != "" {
|
||||
return override
|
||||
}
|
||||
return base
|
||||
}
|
||||
|
||||
func mergeServices(base, override []types.ServiceConfig) ([]types.ServiceConfig, error) {
|
||||
baseServices := mapByName(base)
|
||||
overrideServices := mapByName(override)
|
||||
@@ -154,7 +162,7 @@ func toServicePortConfigsMap(s interface{}) (map[interface{}]interface{}, error)
|
||||
m := map[interface{}]interface{}{}
|
||||
type port struct {
|
||||
target uint32
|
||||
published uint32
|
||||
published string
|
||||
ip string
|
||||
protocol string
|
||||
}
|
||||
@@ -291,7 +299,7 @@ func mergeLoggingConfig(dst, src reflect.Value) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
//nolint: unparam
|
||||
// nolint: unparam
|
||||
func mergeUlimitsConfig(dst, src reflect.Value) error {
|
||||
if src.Interface() != reflect.Zero(reflect.TypeOf(src.Interface())).Interface() {
|
||||
dst.Elem().Set(src.Elem())
|
||||
@@ -299,7 +307,7 @@ func mergeUlimitsConfig(dst, src reflect.Value) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
//nolint: unparam
|
||||
// nolint: unparam
|
||||
func mergeServiceNetworkConfig(dst, src reflect.Value) error {
|
||||
if src.Interface() != reflect.Zero(reflect.TypeOf(src.Interface())).Interface() {
|
||||
dst.Elem().FieldByName("Aliases").Set(src.Elem().FieldByName("Aliases"))
|
||||
|
2
vendor/github.com/compose-spec/compose-go/loader/normalize.go
generated
vendored
2
vendor/github.com/compose-spec/compose-go/loader/normalize.go
generated
vendored
@@ -79,7 +79,7 @@ func normalize(project *types.Project, resolvePaths bool) error {
|
||||
if resolvePaths {
|
||||
s.Build.Context = localContext
|
||||
}
|
||||
} else {
|
||||
// } else {
|
||||
// might be a remote http/git context. Unfortunately supported "remote" syntax is highly ambiguous
|
||||
// in moby/moby and not defined by compose-spec, so let's assume runtime will check
|
||||
}
|
||||
|
38
vendor/github.com/compose-spec/compose-go/loader/volume.go
generated
vendored
38
vendor/github.com/compose-spec/compose-go/loader/volume.go
generated
vendored
@@ -92,7 +92,7 @@ func populateFieldFromBuffer(char rune, buffer []rune, volume *types.ServiceVolu
|
||||
volume.Volume = &types.ServiceVolumeVolume{NoCopy: true}
|
||||
default:
|
||||
if isBindOption(option) {
|
||||
volume.Bind = &types.ServiceVolumeBind{Propagation: option}
|
||||
setBindOption(volume, option)
|
||||
}
|
||||
// ignore unknown options
|
||||
}
|
||||
@@ -109,13 +109,39 @@ var Propagations = []string{
|
||||
types.PropagationSlave,
|
||||
}
|
||||
|
||||
type setBindOptionFunc func(bind *types.ServiceVolumeBind, option string)
|
||||
|
||||
var bindOptions = map[string]setBindOptionFunc{
|
||||
types.PropagationRPrivate: setBindPropagation,
|
||||
types.PropagationPrivate: setBindPropagation,
|
||||
types.PropagationRShared: setBindPropagation,
|
||||
types.PropagationShared: setBindPropagation,
|
||||
types.PropagationRSlave: setBindPropagation,
|
||||
types.PropagationSlave: setBindPropagation,
|
||||
types.SELinuxShared: setBindSELinux,
|
||||
types.SELinuxPrivate: setBindSELinux,
|
||||
}
|
||||
|
||||
func setBindPropagation(bind *types.ServiceVolumeBind, option string) {
|
||||
bind.Propagation = option
|
||||
}
|
||||
|
||||
func setBindSELinux(bind *types.ServiceVolumeBind, option string) {
|
||||
bind.SELinux = option
|
||||
}
|
||||
|
||||
func isBindOption(option string) bool {
|
||||
for _, propagation := range Propagations {
|
||||
if option == propagation {
|
||||
return true
|
||||
_, ok := bindOptions[option]
|
||||
|
||||
return ok
|
||||
}
|
||||
|
||||
func setBindOption(volume *types.ServiceVolumeConfig, option string) {
|
||||
if volume.Bind == nil {
|
||||
volume.Bind = &types.ServiceVolumeBind{}
|
||||
}
|
||||
}
|
||||
return false
|
||||
|
||||
bindOptions[option](volume.Bind, option)
|
||||
}
|
||||
|
||||
func populateType(volume *types.ServiceVolumeConfig) {
|
||||
|
19
vendor/github.com/compose-spec/compose-go/schema/compose-spec.json
generated
vendored
19
vendor/github.com/compose-spec/compose-go/schema/compose-spec.json
generated
vendored
@@ -8,7 +8,12 @@
|
||||
"properties": {
|
||||
"version": {
|
||||
"type": "string",
|
||||
"description": "Version of the Compose specification used. Tools not implementing required version MUST reject the configuration file."
|
||||
"description": "declared for backward compatibility, ignored."
|
||||
},
|
||||
|
||||
"name": {
|
||||
"type": "string",
|
||||
"description": "define the Compose project name, until user defines one explicitly."
|
||||
},
|
||||
|
||||
"services": {
|
||||
@@ -86,9 +91,13 @@
|
||||
"context": {"type": "string"},
|
||||
"dockerfile": {"type": "string"},
|
||||
"args": {"$ref": "#/definitions/list_or_dict"},
|
||||
"ssh": {"$ref": "#/definitions/list_or_dict"},
|
||||
"labels": {"$ref": "#/definitions/list_or_dict"},
|
||||
"cache_from": {"type": "array", "items": {"type": "string"}},
|
||||
"cache_to": {"type": "array", "items": {"type": "string"}},
|
||||
"no_cache": {"type": "boolean"},
|
||||
"network": {"type": "string"},
|
||||
"pull": {"type": "boolean"},
|
||||
"target": {"type": "string"},
|
||||
"shm_size": {"type": ["integer", "string"]},
|
||||
"extra_hosts": {"$ref": "#/definitions/list_or_dict"},
|
||||
@@ -318,7 +327,7 @@
|
||||
"mode": {"type": "string"},
|
||||
"host_ip": {"type": "string"},
|
||||
"target": {"type": "integer"},
|
||||
"published": {"type": "integer"},
|
||||
"published": {"type": ["string", "integer"]},
|
||||
"protocol": {"type": "string"}
|
||||
},
|
||||
"additionalProperties": false,
|
||||
@@ -410,7 +419,8 @@
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"propagation": {"type": "string"},
|
||||
"create_host_path": {"type": "boolean"}
|
||||
"create_host_path": {"type": "boolean"},
|
||||
"selinux": {"type": "string", "enum": ["z", "Z"]}
|
||||
},
|
||||
"additionalProperties": false,
|
||||
"patternProperties": {"^x-": {}}
|
||||
@@ -519,7 +529,8 @@
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"cpus": {"type": ["number", "string"]},
|
||||
"memory": {"type": "string"}
|
||||
"memory": {"type": "string"},
|
||||
"pids": {"type": "integer"}
|
||||
},
|
||||
"additionalProperties": false,
|
||||
"patternProperties": {"^x-": {}}
|
||||
|
5
vendor/github.com/compose-spec/compose-go/schema/schema.go
generated
vendored
5
vendor/github.com/compose-spec/compose-go/schema/schema.go
generated
vendored
@@ -27,11 +27,6 @@ import (
|
||||
_ "embed"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultVersion = "1.0"
|
||||
versionField = "version"
|
||||
)
|
||||
|
||||
type portsFormatChecker struct{}
|
||||
|
||||
func (checker portsFormatChecker) IsFormat(input interface{}) bool {
|
||||
|
57
vendor/github.com/compose-spec/compose-go/template/template.go
generated
vendored
57
vendor/github.com/compose-spec/compose-go/template/template.go
generated
vendored
@@ -26,6 +26,7 @@ import (
|
||||
|
||||
var delimiter = "\\$"
|
||||
var substitutionNamed = "[_a-z][_a-z0-9]*"
|
||||
|
||||
var substitutionBraced = "[_a-z][_a-z0-9]*(?::?[-?](.*}|[^}]*))?"
|
||||
|
||||
var patternString = fmt.Sprintf(
|
||||
@@ -60,15 +61,17 @@ type SubstituteFunc func(string, Mapping) (string, bool, error)
|
||||
// It accepts additional substitute function.
|
||||
func SubstituteWith(template string, mapping Mapping, pattern *regexp.Regexp, subsFuncs ...SubstituteFunc) (string, error) {
|
||||
if len(subsFuncs) == 0 {
|
||||
subsFuncs = []SubstituteFunc{
|
||||
softDefault,
|
||||
hardDefault,
|
||||
requiredNonEmpty,
|
||||
required,
|
||||
}
|
||||
subsFuncs = getDefaultSortedSubstitutionFunctions(template)
|
||||
}
|
||||
var err error
|
||||
result := pattern.ReplaceAllStringFunc(template, func(substring string) string {
|
||||
closingBraceIndex := getFirstBraceClosingIndex(substring)
|
||||
rest := ""
|
||||
if closingBraceIndex > -1 {
|
||||
rest = substring[closingBraceIndex+1:]
|
||||
substring = substring[0 : closingBraceIndex+1]
|
||||
}
|
||||
|
||||
matches := pattern.FindStringSubmatch(substring)
|
||||
groups := matchGroups(matches, pattern)
|
||||
if escaped := groups["escaped"]; escaped != "" {
|
||||
@@ -100,7 +103,11 @@ func SubstituteWith(template string, mapping Mapping, pattern *regexp.Regexp, su
|
||||
if !applied {
|
||||
continue
|
||||
}
|
||||
return value
|
||||
interpolatedNested, err := SubstituteWith(rest, mapping, pattern, subsFuncs...)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
return value + interpolatedNested
|
||||
}
|
||||
}
|
||||
|
||||
@@ -114,6 +121,42 @@ func SubstituteWith(template string, mapping Mapping, pattern *regexp.Regexp, su
|
||||
return result, err
|
||||
}
|
||||
|
||||
func getDefaultSortedSubstitutionFunctions(template string, fns ...SubstituteFunc) []SubstituteFunc {
|
||||
hyphenIndex := strings.IndexByte(template, '-')
|
||||
questionIndex := strings.IndexByte(template, '?')
|
||||
if hyphenIndex < 0 || hyphenIndex > questionIndex {
|
||||
return []SubstituteFunc{
|
||||
requiredNonEmpty,
|
||||
required,
|
||||
softDefault,
|
||||
hardDefault,
|
||||
}
|
||||
}
|
||||
return []SubstituteFunc{
|
||||
softDefault,
|
||||
hardDefault,
|
||||
requiredNonEmpty,
|
||||
required,
|
||||
}
|
||||
}
|
||||
|
||||
func getFirstBraceClosingIndex(s string) int {
|
||||
openVariableBraces := 0
|
||||
for i := 0; i < len(s); i++ {
|
||||
if s[i] == '}' {
|
||||
openVariableBraces--
|
||||
if openVariableBraces == 0 {
|
||||
return i
|
||||
}
|
||||
}
|
||||
if strings.HasPrefix(s[i:], "${") {
|
||||
openVariableBraces++
|
||||
i++
|
||||
}
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
// Substitute variables in the string with their values
|
||||
func Substitute(template string, mapping Mapping) (string, error) {
|
||||
return SubstituteWith(template, mapping, defaultPattern)
|
||||
|
1
vendor/github.com/compose-spec/compose-go/types/config.go
generated
vendored
1
vendor/github.com/compose-spec/compose-go/types/config.go
generated
vendored
@@ -49,6 +49,7 @@ type ConfigFile struct {
|
||||
// Config is a full compose file configuration and model
|
||||
type Config struct {
|
||||
Filename string `yaml:"-" json:"-"`
|
||||
Name string `yaml:",omitempty" json:"name,omitempty"`
|
||||
Services Services `json:"services"`
|
||||
Networks Networks `yaml:",omitempty" json:"networks,omitempty"`
|
||||
Volumes Volumes `yaml:",omitempty" json:"volumes,omitempty"`
|
||||
|
2
vendor/github.com/compose-spec/compose-go/types/project.go
generated
vendored
2
vendor/github.com/compose-spec/compose-go/types/project.go
generated
vendored
@@ -29,7 +29,7 @@ import (
|
||||
|
||||
// Project is the result of loading a set of compose files
|
||||
type Project struct {
|
||||
Name string `yaml:"-" json:"-"`
|
||||
Name string `yaml:"name,omitempty" json:"name,omitempty"`
|
||||
WorkingDir string `yaml:"-" json:"-"`
|
||||
Services Services `json:"services"`
|
||||
Networks Networks `yaml:",omitempty" json:"networks,omitempty"`
|
||||
|
76
vendor/github.com/compose-spec/compose-go/types/types.go
generated
vendored
76
vendor/github.com/compose-spec/compose-go/types/types.go
generated
vendored
@@ -108,6 +108,7 @@ type ServiceConfig struct {
|
||||
CredentialSpec *CredentialSpecConfig `mapstructure:"credential_spec" yaml:"credential_spec,omitempty" json:"credential_spec,omitempty"`
|
||||
DependsOn DependsOnConfig `mapstructure:"depends_on" yaml:"depends_on,omitempty" json:"depends_on,omitempty"`
|
||||
Deploy *DeployConfig `yaml:",omitempty" json:"deploy,omitempty"`
|
||||
DeviceCgroupRules []string `mapstructure:"device_cgroup_rules" yaml:"device_cgroup_rules,omitempty" json:"device_cgroup_rules,omitempty"`
|
||||
Devices []string `yaml:",omitempty" json:"devices,omitempty"`
|
||||
DNS StringList `yaml:",omitempty" json:"dns,omitempty"`
|
||||
DNSOpts []string `mapstructure:"dns_opt" yaml:"dns_opt,omitempty" json:"dns_opt,omitempty"`
|
||||
@@ -129,6 +130,7 @@ type ServiceConfig struct {
|
||||
Ipc string `yaml:",omitempty" json:"ipc,omitempty"`
|
||||
Isolation string `mapstructure:"isolation" yaml:"isolation,omitempty" json:"isolation,omitempty"`
|
||||
Labels Labels `yaml:",omitempty" json:"labels,omitempty"`
|
||||
CustomLabels Labels `yaml:"-" json:"-"`
|
||||
Links []string `yaml:",omitempty" json:"links,omitempty"`
|
||||
Logging *LoggingConfig `yaml:",omitempty" json:"logging,omitempty"`
|
||||
LogDriver string `mapstructure:"log_driver" yaml:"log_driver,omitempty" json:"log_driver,omitempty"`
|
||||
@@ -292,8 +294,12 @@ type BuildConfig struct {
|
||||
Context string `yaml:",omitempty" json:"context,omitempty"`
|
||||
Dockerfile string `yaml:",omitempty" json:"dockerfile,omitempty"`
|
||||
Args MappingWithEquals `yaml:",omitempty" json:"args,omitempty"`
|
||||
SSH SSHConfig `yaml:"ssh,omitempty" json:"ssh,omitempty"`
|
||||
Labels Labels `yaml:",omitempty" json:"labels,omitempty"`
|
||||
CacheFrom StringList `mapstructure:"cache_from" yaml:"cache_from,omitempty" json:"cache_from,omitempty"`
|
||||
CacheTo StringList `mapstructure:"cache_to" yaml:"cache_to,omitempty" json:"cache_to,omitempty"`
|
||||
NoCache bool `mapstructure:"no_cache" yaml:"no_cache,omitempty" json:"no_cache,omitempty"`
|
||||
Pull bool `mapstructure:"pull" yaml:"pull,omitempty" json:"pull,omitempty"`
|
||||
ExtraHosts HostsList `mapstructure:"extra_hosts" yaml:"extra_hosts,omitempty" json:"extra_hosts,omitempty"`
|
||||
Isolation string `yaml:",omitempty" json:"isolation,omitempty"`
|
||||
Network string `yaml:",omitempty" json:"network,omitempty"`
|
||||
@@ -305,11 +311,11 @@ type BuildConfig struct {
|
||||
// BlkioConfig define blkio config
|
||||
type BlkioConfig struct {
|
||||
Weight uint16 `yaml:",omitempty" json:"weight,omitempty"`
|
||||
WeightDevice []WeightDevice `yaml:",omitempty" json:"weight_device,omitempty"`
|
||||
DeviceReadBps []ThrottleDevice `yaml:",omitempty" json:"device_read_bps,omitempty"`
|
||||
DeviceReadIOps []ThrottleDevice `yaml:",omitempty" json:"device_read_iops,omitempty"`
|
||||
DeviceWriteBps []ThrottleDevice `yaml:",omitempty" json:"device_write_bps,omitempty"`
|
||||
DeviceWriteIOps []ThrottleDevice `yaml:",omitempty" json:"device_write_iops,omitempty"`
|
||||
WeightDevice []WeightDevice `mapstructure:"weight_device" yaml:",omitempty" json:"weight_device,omitempty"`
|
||||
DeviceReadBps []ThrottleDevice `mapstructure:"device_read_bps" yaml:",omitempty" json:"device_read_bps,omitempty"`
|
||||
DeviceReadIOps []ThrottleDevice `mapstructure:"device_read_iops" yaml:",omitempty" json:"device_read_iops,omitempty"`
|
||||
DeviceWriteBps []ThrottleDevice `mapstructure:"device_write_bps" yaml:",omitempty" json:"device_write_bps,omitempty"`
|
||||
DeviceWriteIOps []ThrottleDevice `mapstructure:"device_write_iops" yaml:",omitempty" json:"device_write_iops,omitempty"`
|
||||
|
||||
Extensions map[string]interface{} `yaml:",inline" json:"-"`
|
||||
}
|
||||
@@ -423,6 +429,39 @@ func (l Labels) Add(key, value string) Labels {
|
||||
return l
|
||||
}
|
||||
|
||||
type SSHKey struct {
|
||||
ID string
|
||||
Path string
|
||||
}
|
||||
|
||||
// SSHConfig is a mapping type for SSH build config
|
||||
type SSHConfig []SSHKey
|
||||
|
||||
func (s SSHConfig) Get(id string) (string, error) {
|
||||
for _, sshKey := range s {
|
||||
if sshKey.ID == id {
|
||||
return sshKey.Path, nil
|
||||
}
|
||||
}
|
||||
return "", fmt.Errorf("ID %s not found in SSH keys", id)
|
||||
}
|
||||
|
||||
// MarshalYAML makes SSHKey implement yaml.Marshaller
|
||||
func (s SSHKey) MarshalYAML() (interface{}, error) {
|
||||
if s.Path == "" {
|
||||
return s.ID, nil
|
||||
}
|
||||
return fmt.Sprintf("%s: %s", s.ID, s.Path), nil
|
||||
}
|
||||
|
||||
// MarshalJSON makes SSHKey implement json.Marshaller
|
||||
func (s SSHKey) MarshalJSON() ([]byte, error) {
|
||||
if s.Path == "" {
|
||||
return []byte(fmt.Sprintf(`"%s"`, s.ID)), nil
|
||||
}
|
||||
return []byte(fmt.Sprintf(`"%s": %s`, s.ID, s.Path)), nil
|
||||
}
|
||||
|
||||
// MappingWithColon is a mapping type that can be converted from a list of
|
||||
// 'key: value' strings
|
||||
type MappingWithColon map[string]string
|
||||
@@ -493,6 +532,7 @@ type Resource struct {
|
||||
// TODO: types to convert from units and ratios
|
||||
NanoCPUs string `mapstructure:"cpus" yaml:"cpus,omitempty" json:"cpus,omitempty"`
|
||||
MemoryBytes UnitBytes `mapstructure:"memory" yaml:"memory,omitempty" json:"memory,omitempty"`
|
||||
PIds int64 `mapstructure:"pids" yaml:"pids,omitempty" json:"pids,omitempty"`
|
||||
Devices []DeviceRequest `mapstructure:"devices" yaml:"devices,omitempty" json:"devices,omitempty"`
|
||||
GenericResources []GenericResource `mapstructure:"generic_resources" yaml:"generic_resources,omitempty" json:"generic_resources,omitempty"`
|
||||
|
||||
@@ -579,7 +619,7 @@ type ServicePortConfig struct {
|
||||
Mode string `yaml:",omitempty" json:"mode,omitempty"`
|
||||
HostIP string `mapstructure:"host_ip" yaml:"host_ip,omitempty" json:"host_ip,omitempty"`
|
||||
Target uint32 `yaml:",omitempty" json:"target,omitempty"`
|
||||
Published uint32 `yaml:",omitempty" json:"published,omitempty"`
|
||||
Published string `yaml:",omitempty" json:"published,omitempty"`
|
||||
Protocol string `yaml:",omitempty" json:"protocol,omitempty"`
|
||||
|
||||
Extensions map[string]interface{} `yaml:",inline" json:"-"`
|
||||
@@ -613,22 +653,14 @@ func ParsePortConfig(value string) ([]ServicePortConfig, error) {
|
||||
func convertPortToPortConfig(port nat.Port, portBindings map[nat.Port][]nat.PortBinding) ([]ServicePortConfig, error) {
|
||||
var portConfigs []ServicePortConfig
|
||||
for _, binding := range portBindings[port] {
|
||||
startHostPort, endHostPort, err := nat.ParsePortRange(binding.HostPort)
|
||||
|
||||
if err != nil && binding.HostPort != "" {
|
||||
return nil, fmt.Errorf("invalid hostport binding (%s) for port (%s)", binding.HostPort, port.Port())
|
||||
}
|
||||
|
||||
for i := startHostPort; i <= endHostPort; i++ {
|
||||
portConfigs = append(portConfigs, ServicePortConfig{
|
||||
HostIP: binding.HostIP,
|
||||
Protocol: strings.ToLower(port.Proto()),
|
||||
Target: uint32(port.Int()),
|
||||
Published: uint32(i),
|
||||
Published: binding.HostPort,
|
||||
Mode: "ingress",
|
||||
})
|
||||
}
|
||||
}
|
||||
return portConfigs, nil
|
||||
}
|
||||
|
||||
@@ -655,16 +687,30 @@ const (
|
||||
VolumeTypeTmpfs = "tmpfs"
|
||||
// VolumeTypeNamedPipe is the type for mounting Windows named pipes
|
||||
VolumeTypeNamedPipe = "npipe"
|
||||
|
||||
// SElinuxShared share the volume content
|
||||
SElinuxShared = "z"
|
||||
// SElinuxUnshared label content as private unshared
|
||||
SElinuxUnshared = "Z"
|
||||
)
|
||||
|
||||
// ServiceVolumeBind are options for a service volume of type bind
|
||||
type ServiceVolumeBind struct {
|
||||
SELinux string `mapstructure:"selinux" yaml:",omitempty" json:"selinux,omitempty"`
|
||||
Propagation string `yaml:",omitempty" json:"propagation,omitempty"`
|
||||
CreateHostPath bool `mapstructure:"create_host_path" yaml:"create_host_path,omitempty" json:"create_host_path,omitempty"`
|
||||
|
||||
Extensions map[string]interface{} `yaml:",inline" json:"-"`
|
||||
}
|
||||
|
||||
// SELinux represents the SELinux re-labeling options.
|
||||
const (
|
||||
// SELinuxShared option indicates that the bind mount content is shared among multiple containers
|
||||
SELinuxShared string = "z"
|
||||
// SELinuxPrivate option indicates that the bind mount content is private and unshared
|
||||
SELinuxPrivate string = "Z"
|
||||
)
|
||||
|
||||
// Propagation represents the propagation of a mount.
|
||||
const (
|
||||
// PropagationRPrivate RPRIVATE
|
||||
|
1
vendor/github.com/compose-spec/godotenv/.gitignore
generated
vendored
1
vendor/github.com/compose-spec/godotenv/.gitignore
generated
vendored
@@ -1 +0,0 @@
|
||||
.DS_Store
|
27
vendor/github.com/compose-spec/godotenv/Earthfile
generated
vendored
27
vendor/github.com/compose-spec/godotenv/Earthfile
generated
vendored
@@ -1,27 +0,0 @@
|
||||
ARG GOLANG_VERSION=1.17.1
|
||||
ARG ALPINE_VERSION=3.14
|
||||
|
||||
FROM golang:${GOLANG_VERSION}-alpine${ALPINE_VERSION}
|
||||
WORKDIR /code
|
||||
|
||||
code:
|
||||
FROM +base
|
||||
COPY . .
|
||||
|
||||
golangci:
|
||||
ARG GOLANGCI_VERSION=v1.40.1
|
||||
FROM golangci/golangci-lint:${GOLANGCI_VERSION}-alpine
|
||||
SAVE ARTIFACT /usr/bin/golangci-lint
|
||||
|
||||
lint:
|
||||
FROM +code
|
||||
COPY +golangci/golangci-lint /usr/bin/golangci-lint
|
||||
RUN golangci-lint run --timeout 5m ./...
|
||||
|
||||
test:
|
||||
FROM +code
|
||||
RUN go test ./...
|
||||
|
||||
all:
|
||||
BUILD +lint
|
||||
BUILD +test
|
18
vendor/github.com/compose-spec/godotenv/README.md
generated
vendored
18
vendor/github.com/compose-spec/godotenv/README.md
generated
vendored
@@ -1,18 +0,0 @@
|
||||
# GoDotEnv
|
||||
|
||||
A Go (golang) port of the Ruby dotenv project (which loads env vars from a .env file)
|
||||
|
||||
From the original Library:
|
||||
|
||||
> Storing configuration in the environment is one of the tenets of a twelve-factor app. Anything that is likely to change between deployment environments–such as resource handles for databases or credentials for external services–should be extracted from the code into environment variables.
|
||||
>
|
||||
> But it is not always practical to set environment variables on development machines or continuous integration servers where multiple projects are run. Dotenv load variables from a .env file into ENV when the environment is bootstrapped.
|
||||
|
||||
This is a fork of [joho/godotenv](https://github.com/joho/godotenv) focussing on `.env` file support by the compose specification
|
||||
|
||||
|
||||
|
||||
To run linter and tests, please install [Earthly](https://earthly.dev/get-earthly) and run:
|
||||
```sh
|
||||
earthly +all
|
||||
```
|
3
vendor/github.com/compose-spec/godotenv/go.mod
generated
vendored
3
vendor/github.com/compose-spec/godotenv/go.mod
generated
vendored
@@ -1,3 +0,0 @@
|
||||
module github.com/compose-spec/godotenv
|
||||
|
||||
go 1.16
|
0
vendor/github.com/compose-spec/godotenv/go.sum
generated
vendored
0
vendor/github.com/compose-spec/godotenv/go.sum
generated
vendored
2
vendor/github.com/containerd/containerd/version/version.go
generated
vendored
2
vendor/github.com/containerd/containerd/version/version.go
generated
vendored
@@ -23,7 +23,7 @@ var (
|
||||
Package = "github.com/containerd/containerd"
|
||||
|
||||
// Version holds the complete version number. Filled in at linking time.
|
||||
Version = "1.6.0+unknown"
|
||||
Version = "1.6.1+unknown"
|
||||
|
||||
// Revision is filled with the VCS (e.g. git) revision being used to build
|
||||
// the program at linking time.
|
||||
|
35
vendor/github.com/docker/cli/cli-plugins/manager/manager.go
generated
vendored
35
vendor/github.com/docker/cli/cli-plugins/manager/manager.go
generated
vendored
@@ -1,7 +1,6 @@
|
||||
package manager
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sort"
|
||||
@@ -57,12 +56,12 @@ func getPluginDirs(dockerCli command.Cli) ([]string, error) {
|
||||
}
|
||||
|
||||
func addPluginCandidatesFromDir(res map[string][]string, d string) error {
|
||||
dentries, err := ioutil.ReadDir(d)
|
||||
dentries, err := os.ReadDir(d)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for _, dentry := range dentries {
|
||||
switch dentry.Mode() & os.ModeType {
|
||||
switch dentry.Type() & os.ModeType {
|
||||
case 0, os.ModeSymlink:
|
||||
// Regular file or symlink, keep going
|
||||
default:
|
||||
@@ -104,6 +103,36 @@ func listPluginCandidates(dirs []string) (map[string][]string, error) {
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// GetPlugin returns a plugin on the system by its name
|
||||
func GetPlugin(name string, dockerCli command.Cli, rootcmd *cobra.Command) (*Plugin, error) {
|
||||
pluginDirs, err := getPluginDirs(dockerCli)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
candidates, err := listPluginCandidates(pluginDirs)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if paths, ok := candidates[name]; ok {
|
||||
if len(paths) == 0 {
|
||||
return nil, errPluginNotFound(name)
|
||||
}
|
||||
c := &candidate{paths[0]}
|
||||
p, err := newPlugin(c, rootcmd)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !IsNotFound(p.Err) {
|
||||
p.ShadowedPaths = paths[1:]
|
||||
}
|
||||
return &p, nil
|
||||
}
|
||||
|
||||
return nil, errPluginNotFound(name)
|
||||
}
|
||||
|
||||
// ListPlugins produces a list of the plugins available on the system
|
||||
func ListPlugins(dockerCli command.Cli, rootcmd *cobra.Command) ([]Plugin, error) {
|
||||
pluginDirs, err := getPluginDirs(dockerCli)
|
||||
|
8
vendor/github.com/docker/cli/cli-plugins/plugin/plugin.go
generated
vendored
8
vendor/github.com/docker/cli/cli-plugins/plugin/plugin.go
generated
vendored
@@ -160,3 +160,11 @@ func newMetadataSubcommand(plugin *cobra.Command, meta manager.Metadata) *cobra.
|
||||
}
|
||||
return cmd
|
||||
}
|
||||
|
||||
// RunningStandalone tells a CLI plugin it is run standalone by direct execution
|
||||
func RunningStandalone() bool {
|
||||
if os.Getenv(manager.ReexecEnvvar) != "" {
|
||||
return false
|
||||
}
|
||||
return len(os.Args) < 2 || os.Args[1] != manager.MetadataSubcommandName
|
||||
}
|
||||
|
5
vendor/github.com/docker/cli/cli/cobra.go
generated
vendored
5
vendor/github.com/docker/cli/cli/cobra.go
generated
vendored
@@ -58,11 +58,8 @@ func setupCommonRootCommand(rootCmd *cobra.Command) (*cliflags.ClientOptions, *p
|
||||
// SetupRootCommand sets default usage, help, and error handling for the
|
||||
// root command.
|
||||
func SetupRootCommand(rootCmd *cobra.Command) (*cliflags.ClientOptions, *pflag.FlagSet, *cobra.Command) {
|
||||
opts, flags, helpCmd := setupCommonRootCommand(rootCmd)
|
||||
|
||||
rootCmd.SetVersionTemplate("Docker version {{.Version}}\n")
|
||||
|
||||
return opts, flags, helpCmd
|
||||
return setupCommonRootCommand(rootCmd)
|
||||
}
|
||||
|
||||
// SetupPluginRootCommand sets default usage, help and error handling for a plugin root command.
|
||||
|
70
vendor/github.com/docker/cli/cli/command/cli.go
generated
vendored
70
vendor/github.com/docker/cli/cli/command/cli.go
generated
vendored
@@ -3,7 +3,6 @@ package command
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"runtime"
|
||||
@@ -33,9 +32,7 @@ import (
|
||||
"github.com/moby/term"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/theupdateframework/notary"
|
||||
notaryclient "github.com/theupdateframework/notary/client"
|
||||
"github.com/theupdateframework/notary/passphrase"
|
||||
)
|
||||
|
||||
// Streams is an interface which exposes the standard input and output streams
|
||||
@@ -61,9 +58,9 @@ type Cli interface {
|
||||
ManifestStore() manifeststore.Store
|
||||
RegistryClient(bool) registryclient.RegistryClient
|
||||
ContentTrustEnabled() bool
|
||||
BuildKitEnabled() (bool, error)
|
||||
ContextStore() store.Store
|
||||
CurrentContext() string
|
||||
StackOrchestrator(flagValue string) (Orchestrator, error)
|
||||
DockerEndpoint() docker.Endpoint
|
||||
}
|
||||
|
||||
@@ -171,18 +168,24 @@ func (cli *DockerCli) ContentTrustEnabled() bool {
|
||||
return cli.contentTrust
|
||||
}
|
||||
|
||||
// BuildKitEnabled returns whether buildkit is enabled either through a daemon setting
|
||||
// or otherwise the client-side DOCKER_BUILDKIT environment variable
|
||||
func BuildKitEnabled(si ServerInfo) (bool, error) {
|
||||
buildkitEnabled := si.BuildkitVersion == types.BuilderBuildKit
|
||||
if buildkitEnv := os.Getenv("DOCKER_BUILDKIT"); buildkitEnv != "" {
|
||||
var err error
|
||||
buildkitEnabled, err = strconv.ParseBool(buildkitEnv)
|
||||
// BuildKitEnabled returns buildkit is enabled or not.
|
||||
func (cli *DockerCli) BuildKitEnabled() (bool, error) {
|
||||
// use DOCKER_BUILDKIT env var value if set
|
||||
if v, ok := os.LookupEnv("DOCKER_BUILDKIT"); ok {
|
||||
enabled, err := strconv.ParseBool(v)
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "DOCKER_BUILDKIT environment variable expects boolean value")
|
||||
}
|
||||
return enabled, nil
|
||||
}
|
||||
return buildkitEnabled, nil
|
||||
// if a builder alias is defined, we are using BuildKit
|
||||
aliasMap := cli.ConfigFile().Aliases
|
||||
if _, ok := aliasMap["builder"]; ok {
|
||||
return true, nil
|
||||
}
|
||||
// otherwise, assume BuildKit is enabled but
|
||||
// not if wcow reported from server side
|
||||
return cli.ServerInfo().OSType != "windows", nil
|
||||
}
|
||||
|
||||
// ManifestStore returns a store for local manifests
|
||||
@@ -252,14 +255,6 @@ func (cli *DockerCli) Initialize(opts *cliflags.ClientOptions, ops ...Initialize
|
||||
|
||||
if cli.client == nil {
|
||||
cli.client, err = newAPIClientFromEndpoint(cli.dockerEndpoint, cli.configFile)
|
||||
if tlsconfig.IsErrEncryptedKey(err) {
|
||||
passRetriever := passphrase.PromptRetrieverWithInOut(cli.In(), cli.Out(), nil)
|
||||
newClient := func(password string) (client.APIClient, error) {
|
||||
cli.dockerEndpoint.TLSPassword = password
|
||||
return newAPIClientFromEndpoint(cli.dockerEndpoint, cli.configFile)
|
||||
}
|
||||
cli.client, err = getClientWithPassword(passRetriever, newClient)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -279,7 +274,7 @@ func NewAPIClientFromFlags(opts *cliflags.CommonOptions, configFile *configfile.
|
||||
store := &ContextStoreWithDefault{
|
||||
Store: store.New(cliconfig.ContextStoreDir(), storeConfig),
|
||||
Resolver: func() (*DefaultContext, error) {
|
||||
return ResolveDefaultContext(opts, configFile, storeConfig, ioutil.Discard)
|
||||
return ResolveDefaultContext(opts, configFile, storeConfig, io.Discard)
|
||||
},
|
||||
}
|
||||
contextName, err := resolveContextName(opts, configFile, store)
|
||||
@@ -377,20 +372,6 @@ func (cli *DockerCli) initializeFromClient() {
|
||||
cli.client.NegotiateAPIVersionPing(ping)
|
||||
}
|
||||
|
||||
func getClientWithPassword(passRetriever notary.PassRetriever, newClient func(password string) (client.APIClient, error)) (client.APIClient, error) {
|
||||
for attempts := 0; ; attempts++ {
|
||||
passwd, giveup, err := passRetriever("private", "encrypted TLS private", false, attempts)
|
||||
if giveup || err != nil {
|
||||
return nil, errors.Wrap(err, "private key is encrypted, but could not get passphrase")
|
||||
}
|
||||
|
||||
apiclient, err := newClient(passwd)
|
||||
if !tlsconfig.IsErrEncryptedKey(err) {
|
||||
return apiclient, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// NotaryClient provides a Notary Repository to interact with signed metadata for an image
|
||||
func (cli *DockerCli) NotaryClient(imgRefAndAuth trust.ImageRefAndAuth, actions []string) (notaryclient.Repository, error) {
|
||||
return trust.GetNotaryRepository(cli.In(), cli.Out(), UserAgent(), imgRefAndAuth.RepoInfo(), imgRefAndAuth.AuthConfig(), actions...)
|
||||
@@ -406,25 +387,6 @@ func (cli *DockerCli) CurrentContext() string {
|
||||
return cli.currentContext
|
||||
}
|
||||
|
||||
// StackOrchestrator resolves which stack orchestrator is in use
|
||||
func (cli *DockerCli) StackOrchestrator(flagValue string) (Orchestrator, error) {
|
||||
currentContext := cli.CurrentContext()
|
||||
ctxRaw, err := cli.ContextStore().GetMetadata(currentContext)
|
||||
if store.IsErrContextDoesNotExist(err) {
|
||||
// case where the currentContext has been removed (CLI behavior is to fallback to using DOCKER_HOST based resolution)
|
||||
return GetStackOrchestrator(flagValue, "", cli.ConfigFile().StackOrchestrator, cli.Err())
|
||||
}
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
ctxMeta, err := GetDockerContext(ctxRaw)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
ctxOrchestrator := string(ctxMeta.StackOrchestrator)
|
||||
return GetStackOrchestrator(flagValue, ctxOrchestrator, cli.ConfigFile().StackOrchestrator, cli.Err())
|
||||
}
|
||||
|
||||
// DockerEndpoint returns the current docker endpoint
|
||||
func (cli *DockerCli) DockerEndpoint() docker.Endpoint {
|
||||
return cli.dockerEndpoint
|
||||
|
6
vendor/github.com/docker/cli/cli/command/context.go
generated
vendored
6
vendor/github.com/docker/cli/cli/command/context.go
generated
vendored
@@ -10,7 +10,6 @@ import (
|
||||
// DockerContext is a typed representation of what we put in Context metadata
|
||||
type DockerContext struct {
|
||||
Description string
|
||||
StackOrchestrator Orchestrator
|
||||
AdditionalFields map[string]interface{}
|
||||
}
|
||||
|
||||
@@ -20,9 +19,6 @@ func (dc DockerContext) MarshalJSON() ([]byte, error) {
|
||||
if dc.Description != "" {
|
||||
s["Description"] = dc.Description
|
||||
}
|
||||
if dc.StackOrchestrator != "" {
|
||||
s["StackOrchestrator"] = dc.StackOrchestrator
|
||||
}
|
||||
if dc.AdditionalFields != nil {
|
||||
for k, v := range dc.AdditionalFields {
|
||||
s[k] = v
|
||||
@@ -41,8 +37,6 @@ func (dc *DockerContext) UnmarshalJSON(payload []byte) error {
|
||||
switch k {
|
||||
case "Description":
|
||||
dc.Description = v.(string)
|
||||
case "StackOrchestrator":
|
||||
dc.StackOrchestrator = Orchestrator(v.(string))
|
||||
default:
|
||||
if dc.AdditionalFields == nil {
|
||||
dc.AdditionalFields = make(map[string]interface{})
|
||||
|
9
vendor/github.com/docker/cli/cli/command/defaultcontextstore.go
generated
vendored
9
vendor/github.com/docker/cli/cli/command/defaultcontextstore.go
generated
vendored
@@ -41,15 +41,11 @@ type EndpointDefaultResolver interface {
|
||||
// the lack of a default (e.g. because the config file which
|
||||
// would contain it is missing). If there is no default then
|
||||
// returns nil, nil, nil.
|
||||
ResolveDefault(Orchestrator) (interface{}, *store.EndpointTLSData, error)
|
||||
ResolveDefault() (interface{}, *store.EndpointTLSData, error)
|
||||
}
|
||||
|
||||
// ResolveDefaultContext creates a Metadata for the current CLI invocation parameters
|
||||
func ResolveDefaultContext(opts *cliflags.CommonOptions, config *configfile.ConfigFile, storeconfig store.Config, stderr io.Writer) (*DefaultContext, error) {
|
||||
stackOrchestrator, err := GetStackOrchestrator("", "", config.StackOrchestrator, stderr)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
contextTLSData := store.ContextTLSData{
|
||||
Endpoints: make(map[string]store.EndpointTLSData),
|
||||
}
|
||||
@@ -57,7 +53,6 @@ func ResolveDefaultContext(opts *cliflags.CommonOptions, config *configfile.Conf
|
||||
Endpoints: make(map[string]interface{}),
|
||||
Metadata: DockerContext{
|
||||
Description: "",
|
||||
StackOrchestrator: stackOrchestrator,
|
||||
},
|
||||
Name: DefaultContextName,
|
||||
}
|
||||
@@ -77,7 +72,7 @@ func ResolveDefaultContext(opts *cliflags.CommonOptions, config *configfile.Conf
|
||||
}
|
||||
ep := get()
|
||||
if i, ok := ep.(EndpointDefaultResolver); ok {
|
||||
meta, tls, err := i.ResolveDefault(stackOrchestrator)
|
||||
meta, tls, err := i.ResolveDefault()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
84
vendor/github.com/docker/cli/cli/command/orchestrator.go
generated
vendored
84
vendor/github.com/docker/cli/cli/command/orchestrator.go
generated
vendored
@@ -1,84 +0,0 @@
|
||||
package command
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
)
|
||||
|
||||
// Orchestrator type acts as an enum describing supported orchestrators.
|
||||
type Orchestrator string
|
||||
|
||||
const (
|
||||
// OrchestratorKubernetes orchestrator
|
||||
OrchestratorKubernetes = Orchestrator("kubernetes")
|
||||
// OrchestratorSwarm orchestrator
|
||||
OrchestratorSwarm = Orchestrator("swarm")
|
||||
// OrchestratorAll orchestrator
|
||||
OrchestratorAll = Orchestrator("all")
|
||||
orchestratorUnset = Orchestrator("")
|
||||
|
||||
defaultOrchestrator = OrchestratorSwarm
|
||||
envVarDockerStackOrchestrator = "DOCKER_STACK_ORCHESTRATOR"
|
||||
envVarDockerOrchestrator = "DOCKER_ORCHESTRATOR"
|
||||
)
|
||||
|
||||
// HasKubernetes returns true if defined orchestrator has Kubernetes capabilities.
|
||||
func (o Orchestrator) HasKubernetes() bool {
|
||||
return o == OrchestratorKubernetes || o == OrchestratorAll
|
||||
}
|
||||
|
||||
// HasSwarm returns true if defined orchestrator has Swarm capabilities.
|
||||
func (o Orchestrator) HasSwarm() bool {
|
||||
return o == OrchestratorSwarm || o == OrchestratorAll
|
||||
}
|
||||
|
||||
// HasAll returns true if defined orchestrator has both Swarm and Kubernetes capabilities.
|
||||
func (o Orchestrator) HasAll() bool {
|
||||
return o == OrchestratorAll
|
||||
}
|
||||
|
||||
func normalize(value string) (Orchestrator, error) {
|
||||
switch value {
|
||||
case "kubernetes":
|
||||
return OrchestratorKubernetes, nil
|
||||
case "swarm":
|
||||
return OrchestratorSwarm, nil
|
||||
case "", "unset": // unset is the old value for orchestratorUnset. Keep accepting this for backward compat
|
||||
return orchestratorUnset, nil
|
||||
case "all":
|
||||
return OrchestratorAll, nil
|
||||
default:
|
||||
return defaultOrchestrator, fmt.Errorf("specified orchestrator %q is invalid, please use either kubernetes, swarm or all", value)
|
||||
}
|
||||
}
|
||||
|
||||
// NormalizeOrchestrator parses an orchestrator value and checks if it is valid
|
||||
func NormalizeOrchestrator(value string) (Orchestrator, error) {
|
||||
return normalize(value)
|
||||
}
|
||||
|
||||
// GetStackOrchestrator checks DOCKER_STACK_ORCHESTRATOR environment variable and configuration file
|
||||
// orchestrator value and returns user defined Orchestrator.
|
||||
func GetStackOrchestrator(flagValue, contextValue, globalDefault string, stderr io.Writer) (Orchestrator, error) {
|
||||
// Check flag
|
||||
if o, err := normalize(flagValue); o != orchestratorUnset {
|
||||
return o, err
|
||||
}
|
||||
// Check environment variable
|
||||
env := os.Getenv(envVarDockerStackOrchestrator)
|
||||
if env == "" && os.Getenv(envVarDockerOrchestrator) != "" {
|
||||
fmt.Fprintf(stderr, "WARNING: experimental environment variable %s is set. Please use %s instead\n", envVarDockerOrchestrator, envVarDockerStackOrchestrator)
|
||||
}
|
||||
if o, err := normalize(env); o != orchestratorUnset {
|
||||
return o, err
|
||||
}
|
||||
if o, err := normalize(contextValue); o != orchestratorUnset {
|
||||
return o, err
|
||||
}
|
||||
if o, err := normalize(globalDefault); o != orchestratorUnset {
|
||||
return o, err
|
||||
}
|
||||
// Nothing set, use default orchestrator
|
||||
return defaultOrchestrator, nil
|
||||
}
|
15
vendor/github.com/docker/cli/cli/command/registry.go
generated
vendored
15
vendor/github.com/docker/cli/cli/command/registry.go
generated
vendored
@@ -63,17 +63,14 @@ func RegistryAuthenticationPrivilegedFunc(cli Cli, index *registrytypes.IndexInf
|
||||
indexServer := registry.GetAuthConfigKey(index)
|
||||
isDefaultRegistry := indexServer == ElectAuthServer(context.Background(), cli)
|
||||
authConfig, err := GetDefaultAuthConfig(cli, true, indexServer, isDefaultRegistry)
|
||||
if authConfig == nil {
|
||||
authConfig = &types.AuthConfig{}
|
||||
}
|
||||
if err != nil {
|
||||
fmt.Fprintf(cli.Err(), "Unable to retrieve stored credentials for %s, error: %s.\n", indexServer, err)
|
||||
}
|
||||
err = ConfigureAuth(cli, "", "", authConfig, isDefaultRegistry)
|
||||
err = ConfigureAuth(cli, "", "", &authConfig, isDefaultRegistry)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return EncodeAuthToBase64(*authConfig)
|
||||
return EncodeAuthToBase64(authConfig)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -92,7 +89,7 @@ func ResolveAuthConfig(ctx context.Context, cli Cli, index *registrytypes.IndexI
|
||||
|
||||
// GetDefaultAuthConfig gets the default auth config given a serverAddress
|
||||
// If credentials for given serverAddress exists in the credential store, the configuration will be populated with values in it
|
||||
func GetDefaultAuthConfig(cli Cli, checkCredStore bool, serverAddress string, isDefaultRegistry bool) (*types.AuthConfig, error) {
|
||||
func GetDefaultAuthConfig(cli Cli, checkCredStore bool, serverAddress string, isDefaultRegistry bool) (types.AuthConfig, error) {
|
||||
if !isDefaultRegistry {
|
||||
serverAddress = registry.ConvertToHostname(serverAddress)
|
||||
}
|
||||
@@ -101,13 +98,15 @@ func GetDefaultAuthConfig(cli Cli, checkCredStore bool, serverAddress string, is
|
||||
if checkCredStore {
|
||||
authconfig, err = cli.ConfigFile().GetAuthConfig(serverAddress)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return types.AuthConfig{
|
||||
ServerAddress: serverAddress,
|
||||
}, err
|
||||
}
|
||||
}
|
||||
authconfig.ServerAddress = serverAddress
|
||||
authconfig.IdentityToken = ""
|
||||
res := types.AuthConfig(authconfig)
|
||||
return &res, nil
|
||||
return res, nil
|
||||
}
|
||||
|
||||
// ConfigureAuth handles prompting of user's username and password if needed
|
||||
|
22
vendor/github.com/docker/cli/cli/config/config.go
generated
vendored
22
vendor/github.com/docker/cli/cli/config/config.go
generated
vendored
@@ -104,14 +104,18 @@ func LoadFromReader(configData io.Reader) (*configfile.ConfigFile, error) {
|
||||
return &configFile, err
|
||||
}
|
||||
|
||||
// TODO remove this temporary hack, which is used to warn about the deprecated ~/.dockercfg file
|
||||
var printLegacyFileWarning bool
|
||||
|
||||
// Load reads the configuration files in the given directory, and sets up
|
||||
// the auth config information and returns values.
|
||||
// FIXME: use the internal golang config parser
|
||||
func Load(configDir string) (*configfile.ConfigFile, error) {
|
||||
printLegacyFileWarning = false
|
||||
cfg, _, err := load(configDir)
|
||||
return cfg, err
|
||||
}
|
||||
|
||||
// TODO remove this temporary hack, which is used to warn about the deprecated ~/.dockercfg file
|
||||
// so we can remove the bool return value and collapse this back into `Load`
|
||||
func load(configDir string) (*configfile.ConfigFile, bool, error) {
|
||||
printLegacyFileWarning := false
|
||||
|
||||
if configDir == "" {
|
||||
configDir = Dir()
|
||||
@@ -127,11 +131,11 @@ func Load(configDir string) (*configfile.ConfigFile, error) {
|
||||
if err != nil {
|
||||
err = errors.Wrap(err, filename)
|
||||
}
|
||||
return configFile, err
|
||||
return configFile, printLegacyFileWarning, err
|
||||
} else if !os.IsNotExist(err) {
|
||||
// if file is there but we can't stat it for any reason other
|
||||
// than it doesn't exist then stop
|
||||
return configFile, errors.Wrap(err, filename)
|
||||
return configFile, printLegacyFileWarning, errors.Wrap(err, filename)
|
||||
}
|
||||
|
||||
// Can't find latest config file so check for the old one
|
||||
@@ -140,16 +144,16 @@ func Load(configDir string) (*configfile.ConfigFile, error) {
|
||||
printLegacyFileWarning = true
|
||||
defer file.Close()
|
||||
if err := configFile.LegacyLoadFromReader(file); err != nil {
|
||||
return configFile, errors.Wrap(err, filename)
|
||||
return configFile, printLegacyFileWarning, errors.Wrap(err, filename)
|
||||
}
|
||||
}
|
||||
return configFile, nil
|
||||
return configFile, printLegacyFileWarning, nil
|
||||
}
|
||||
|
||||
// LoadDefaultConfigFile attempts to load the default config file and returns
|
||||
// an initialized ConfigFile struct if none is found.
|
||||
func LoadDefaultConfigFile(stderr io.Writer) *configfile.ConfigFile {
|
||||
configFile, err := Load(Dir())
|
||||
configFile, printLegacyFileWarning, err := load(Dir())
|
||||
if err != nil {
|
||||
fmt.Fprintf(stderr, "WARNING: Error loading config file: %v\n", err)
|
||||
}
|
||||
|
34
vendor/github.com/docker/cli/cli/config/configfile/file.go
generated
vendored
34
vendor/github.com/docker/cli/cli/config/configfile/file.go
generated
vendored
@@ -3,9 +3,7 @@ package configfile
|
||||
import (
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
@@ -46,8 +44,7 @@ type ConfigFile struct {
|
||||
PruneFilters []string `json:"pruneFilters,omitempty"`
|
||||
Proxies map[string]ProxyConfig `json:"proxies,omitempty"`
|
||||
Experimental string `json:"experimental,omitempty"`
|
||||
StackOrchestrator string `json:"stackOrchestrator,omitempty"`
|
||||
Kubernetes *KubernetesConfig `json:"kubernetes,omitempty"`
|
||||
StackOrchestrator string `json:"stackOrchestrator,omitempty"` // Deprecated: swarm is now the default orchestrator, and this option is ignored.
|
||||
CurrentContext string `json:"currentContext,omitempty"`
|
||||
CLIPluginsExtraDirs []string `json:"cliPluginsExtraDirs,omitempty"`
|
||||
Plugins map[string]map[string]string `json:"plugins,omitempty"`
|
||||
@@ -60,11 +57,7 @@ type ProxyConfig struct {
|
||||
HTTPSProxy string `json:"httpsProxy,omitempty"`
|
||||
NoProxy string `json:"noProxy,omitempty"`
|
||||
FTPProxy string `json:"ftpProxy,omitempty"`
|
||||
}
|
||||
|
||||
// KubernetesConfig contains Kubernetes orchestrator settings
|
||||
type KubernetesConfig struct {
|
||||
AllNamespaces string `json:"allNamespaces,omitempty"`
|
||||
AllProxy string `json:"allProxy,omitempty"`
|
||||
}
|
||||
|
||||
// New initializes an empty configuration file for the given filename 'fn'
|
||||
@@ -81,7 +74,7 @@ func New(fn string) *ConfigFile {
|
||||
// LegacyLoadFromReader reads the non-nested configuration data given and sets up the
|
||||
// auth config information with given directory and populates the receiver object
|
||||
func (configFile *ConfigFile) LegacyLoadFromReader(configData io.Reader) error {
|
||||
b, err := ioutil.ReadAll(configData)
|
||||
b, err := io.ReadAll(configData)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -119,7 +112,7 @@ func (configFile *ConfigFile) LegacyLoadFromReader(configData io.Reader) error {
|
||||
// LoadFromReader reads the configuration data given and sets up the auth config
|
||||
// information with given directory and populates the receiver object
|
||||
func (configFile *ConfigFile) LoadFromReader(configData io.Reader) error {
|
||||
if err := json.NewDecoder(configData).Decode(&configFile); err != nil && !errors.Is(err, io.EOF) {
|
||||
if err := json.NewDecoder(configData).Decode(configFile); err != nil && !errors.Is(err, io.EOF) {
|
||||
return err
|
||||
}
|
||||
var err error
|
||||
@@ -134,7 +127,7 @@ func (configFile *ConfigFile) LoadFromReader(configData io.Reader) error {
|
||||
ac.ServerAddress = addr
|
||||
configFile.AuthConfigs[addr] = ac
|
||||
}
|
||||
return checkKubernetesConfiguration(configFile.Kubernetes)
|
||||
return nil
|
||||
}
|
||||
|
||||
// ContainsAuth returns whether there is authentication configured
|
||||
@@ -194,7 +187,7 @@ func (configFile *ConfigFile) Save() (retErr error) {
|
||||
if err := os.MkdirAll(dir, 0700); err != nil {
|
||||
return err
|
||||
}
|
||||
temp, err := ioutil.TempFile(dir, filepath.Base(configFile.Filename))
|
||||
temp, err := os.CreateTemp(dir, filepath.Base(configFile.Filename))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -244,6 +237,7 @@ func (configFile *ConfigFile) ParseProxyConfig(host string, runOpts map[string]*
|
||||
"HTTPS_PROXY": &config.HTTPSProxy,
|
||||
"NO_PROXY": &config.NoProxy,
|
||||
"FTP_PROXY": &config.FTPProxy,
|
||||
"ALL_PROXY": &config.AllProxy,
|
||||
}
|
||||
m := runOpts
|
||||
if m == nil {
|
||||
@@ -399,17 +393,3 @@ func (configFile *ConfigFile) SetPluginConfig(pluginname, option, value string)
|
||||
delete(configFile.Plugins, pluginname)
|
||||
}
|
||||
}
|
||||
|
||||
func checkKubernetesConfiguration(kubeConfig *KubernetesConfig) error {
|
||||
if kubeConfig == nil {
|
||||
return nil
|
||||
}
|
||||
switch kubeConfig.AllNamespaces {
|
||||
case "":
|
||||
case "enabled":
|
||||
case "disabled":
|
||||
default:
|
||||
return fmt.Errorf("invalid 'kubernetes.allNamespaces' value, should be 'enabled' or 'disabled': %s", kubeConfig.AllNamespaces)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
18
vendor/github.com/docker/cli/cli/context/docker/load.go
generated
vendored
18
vendor/github.com/docker/cli/cli/context/docker/load.go
generated
vendored
@@ -4,7 +4,6 @@ import (
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"encoding/pem"
|
||||
"fmt"
|
||||
"net"
|
||||
"net/http"
|
||||
"os"
|
||||
@@ -27,6 +26,11 @@ type EndpointMeta = context.EndpointMetaBase
|
||||
type Endpoint struct {
|
||||
EndpointMeta
|
||||
TLSData *context.TLSData
|
||||
|
||||
// Deprecated: Use of encrypted TLS private keys has been deprecated, and
|
||||
// will be removed in a future release. Golang has deprecated support for
|
||||
// legacy PEM encryption (as specified in RFC 1423), as it is insecure by
|
||||
// design (see https://go-review.googlesource.com/c/go/+/264159).
|
||||
TLSPassword string
|
||||
}
|
||||
|
||||
@@ -62,16 +66,10 @@ func (c *Endpoint) tlsConfig() (*tls.Config, error) {
|
||||
keyBytes := c.TLSData.Key
|
||||
pemBlock, _ := pem.Decode(keyBytes)
|
||||
if pemBlock == nil {
|
||||
return nil, fmt.Errorf("no valid private key found")
|
||||
return nil, errors.New("no valid private key found")
|
||||
}
|
||||
|
||||
var err error
|
||||
if x509.IsEncryptedPEMBlock(pemBlock) {
|
||||
keyBytes, err = x509.DecryptPEMBlock(pemBlock, []byte(c.TLSPassword))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "private key is encrypted, but could not decrypt it")
|
||||
}
|
||||
keyBytes = pem.EncodeToMemory(&pem.Block{Type: pemBlock.Type, Bytes: keyBytes})
|
||||
if x509.IsEncryptedPEMBlock(pemBlock) { //nolint: staticcheck // SA1019: x509.IsEncryptedPEMBlock is deprecated, and insecure by design
|
||||
return nil, errors.New("private key is encrypted - support for encrypted private keys has been removed, see https://docs.docker.com/go/deprecated/")
|
||||
}
|
||||
|
||||
x509cert, err := tls.X509KeyPair(c.TLSData.Cert, keyBytes)
|
||||
|
6
vendor/github.com/docker/cli/cli/context/store/doc.go
generated
vendored
6
vendor/github.com/docker/cli/cli/context/store/doc.go
generated
vendored
@@ -12,11 +12,9 @@
|
||||
// - tls/
|
||||
// - <context id>/endpoint1/: directory containing TLS data for the endpoint1 in the corresponding context
|
||||
//
|
||||
// The context store itself has absolutely no knowledge about what a docker or a kubernetes endpoint should contain in term of metadata or TLS config.
|
||||
// The context store itself has absolutely no knowledge about what a docker endpoint should contain in term of metadata or TLS config.
|
||||
// Client code is responsible for generating and parsing endpoint metadata and TLS files.
|
||||
// The multi-endpoints approach of this package allows to combine many different endpoints in the same "context" (e.g., the Docker CLI
|
||||
// is able for a single context to define both a docker endpoint and a Kubernetes endpoint for the same cluster, and also specify which
|
||||
// orchestrator to use by default when deploying a compose stack on this cluster).
|
||||
// The multi-endpoints approach of this package allows to combine many different endpoints in the same "context".
|
||||
//
|
||||
// Context IDs are actually SHA256 hashes of the context name, and are there only to avoid dealing with special characters in context names.
|
||||
package store
|
||||
|
7
vendor/github.com/docker/cli/cli/context/store/metadatastore.go
generated
vendored
7
vendor/github.com/docker/cli/cli/context/store/metadatastore.go
generated
vendored
@@ -3,7 +3,6 @@ package store
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"reflect"
|
||||
@@ -35,7 +34,7 @@ func (s *metadataStore) createOrUpdate(meta Metadata) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return ioutil.WriteFile(filepath.Join(contextDir, metaFile), bytes, 0644)
|
||||
return os.WriteFile(filepath.Join(contextDir, metaFile), bytes, 0644)
|
||||
}
|
||||
|
||||
func parseTypedOrMap(payload []byte, getter TypeGetter) (interface{}, error) {
|
||||
@@ -58,7 +57,7 @@ func parseTypedOrMap(payload []byte, getter TypeGetter) (interface{}, error) {
|
||||
|
||||
func (s *metadataStore) get(id contextdir) (Metadata, error) {
|
||||
contextDir := s.contextDir(id)
|
||||
bytes, err := ioutil.ReadFile(filepath.Join(contextDir, metaFile))
|
||||
bytes, err := os.ReadFile(filepath.Join(contextDir, metaFile))
|
||||
if err != nil {
|
||||
return Metadata{}, convertContextDoesNotExist(err)
|
||||
}
|
||||
@@ -117,7 +116,7 @@ func isContextDir(path string) bool {
|
||||
}
|
||||
|
||||
func listRecursivelyMetadataDirs(root string) ([]string, error) {
|
||||
fis, err := ioutil.ReadDir(root)
|
||||
fis, err := os.ReadDir(root)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
11
vendor/github.com/docker/cli/cli/context/store/store.go
generated
vendored
11
vendor/github.com/docker/cli/cli/context/store/store.go
generated
vendored
@@ -9,7 +9,6 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"path"
|
||||
"path/filepath"
|
||||
@@ -349,7 +348,7 @@ func importTar(name string, s Writer, reader io.Reader) error {
|
||||
return errors.Wrap(err, hdr.Name)
|
||||
}
|
||||
if hdr.Name == metaFile {
|
||||
data, err := ioutil.ReadAll(tr)
|
||||
data, err := io.ReadAll(tr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -362,7 +361,7 @@ func importTar(name string, s Writer, reader io.Reader) error {
|
||||
}
|
||||
importedMetaFile = true
|
||||
} else if strings.HasPrefix(hdr.Name, "tls/") {
|
||||
data, err := ioutil.ReadAll(tr)
|
||||
data, err := io.ReadAll(tr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -378,7 +377,7 @@ func importTar(name string, s Writer, reader io.Reader) error {
|
||||
}
|
||||
|
||||
func importZip(name string, s Writer, reader io.Reader) error {
|
||||
body, err := ioutil.ReadAll(&LimitedReader{R: reader, N: maxAllowedFileSizeToImport})
|
||||
body, err := io.ReadAll(&LimitedReader{R: reader, N: maxAllowedFileSizeToImport})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@@ -406,7 +405,7 @@ func importZip(name string, s Writer, reader io.Reader) error {
|
||||
return err
|
||||
}
|
||||
|
||||
data, err := ioutil.ReadAll(&LimitedReader{R: f, N: maxAllowedFileSizeToImport})
|
||||
data, err := io.ReadAll(&LimitedReader{R: f, N: maxAllowedFileSizeToImport})
|
||||
defer f.Close()
|
||||
if err != nil {
|
||||
return err
|
||||
@@ -424,7 +423,7 @@ func importZip(name string, s Writer, reader io.Reader) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
data, err := ioutil.ReadAll(f)
|
||||
data, err := io.ReadAll(f)
|
||||
defer f.Close()
|
||||
if err != nil {
|
||||
return err
|
||||
|
11
vendor/github.com/docker/cli/cli/context/store/tlsstore.go
generated
vendored
11
vendor/github.com/docker/cli/cli/context/store/tlsstore.go
generated
vendored
@@ -1,7 +1,6 @@
|
||||
package store
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
)
|
||||
@@ -33,18 +32,18 @@ func (s *tlsStore) createOrUpdate(contextID contextdir, endpointName, filename s
|
||||
if err := os.MkdirAll(epdir, 0700); err != nil {
|
||||
return err
|
||||
}
|
||||
return ioutil.WriteFile(s.filePath(contextID, endpointName, filename), data, 0600)
|
||||
return os.WriteFile(s.filePath(contextID, endpointName, filename), data, 0600)
|
||||
}
|
||||
|
||||
func (s *tlsStore) getData(contextID contextdir, endpointName, filename string) ([]byte, error) {
|
||||
data, err := ioutil.ReadFile(s.filePath(contextID, endpointName, filename))
|
||||
data, err := os.ReadFile(s.filePath(contextID, endpointName, filename))
|
||||
if err != nil {
|
||||
return nil, convertTLSDataDoesNotExist(endpointName, filename, err)
|
||||
}
|
||||
return data, nil
|
||||
}
|
||||
|
||||
func (s *tlsStore) remove(contextID contextdir, endpointName, filename string) error {
|
||||
func (s *tlsStore) remove(contextID contextdir, endpointName, filename string) error { // nolint:unused
|
||||
err := os.Remove(s.filePath(contextID, endpointName, filename))
|
||||
if os.IsNotExist(err) {
|
||||
return nil
|
||||
@@ -61,7 +60,7 @@ func (s *tlsStore) removeAllContextData(contextID contextdir) error {
|
||||
}
|
||||
|
||||
func (s *tlsStore) listContextData(contextID contextdir) (map[string]EndpointFiles, error) {
|
||||
epFSs, err := ioutil.ReadDir(s.contextDir(contextID))
|
||||
epFSs, err := os.ReadDir(s.contextDir(contextID))
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return map[string]EndpointFiles{}, nil
|
||||
@@ -72,7 +71,7 @@ func (s *tlsStore) listContextData(contextID contextdir) (map[string]EndpointFil
|
||||
for _, epFS := range epFSs {
|
||||
if epFS.IsDir() {
|
||||
epDir := s.endpointDir(contextID, epFS.Name())
|
||||
fss, err := ioutil.ReadDir(epDir)
|
||||
fss, err := os.ReadDir(epDir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
8
vendor/github.com/docker/cli/cli/context/tlsdata.go
generated
vendored
8
vendor/github.com/docker/cli/cli/context/tlsdata.go
generated
vendored
@@ -1,7 +1,7 @@
|
||||
package context
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"os"
|
||||
|
||||
"github.com/docker/cli/cli/context/store"
|
||||
"github.com/pkg/errors"
|
||||
@@ -77,17 +77,17 @@ func TLSDataFromFiles(caPath, certPath, keyPath string) (*TLSData, error) {
|
||||
err error
|
||||
)
|
||||
if caPath != "" {
|
||||
if ca, err = ioutil.ReadFile(caPath); err != nil {
|
||||
if ca, err = os.ReadFile(caPath); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if certPath != "" {
|
||||
if cert, err = ioutil.ReadFile(certPath); err != nil {
|
||||
if cert, err = os.ReadFile(certPath); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
if keyPath != "" {
|
||||
if key, err = ioutil.ReadFile(keyPath); err != nil {
|
||||
if key, err = os.ReadFile(keyPath); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
9
vendor/github.com/docker/cli/cli/manifest/store/store.go
generated
vendored
9
vendor/github.com/docker/cli/cli/manifest/store/store.go
generated
vendored
@@ -3,7 +3,6 @@ package store
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
@@ -47,7 +46,7 @@ func (s *fsStore) Get(listRef reference.Reference, manifest reference.Reference)
|
||||
}
|
||||
|
||||
func (s *fsStore) getFromFilename(ref reference.Reference, filename string) (types.ImageManifest, error) {
|
||||
bytes, err := ioutil.ReadFile(filename)
|
||||
bytes, err := os.ReadFile(filename)
|
||||
switch {
|
||||
case os.IsNotExist(err):
|
||||
return types.ImageManifest{}, newNotFoundError(ref.String())
|
||||
@@ -112,7 +111,7 @@ func (s *fsStore) GetList(listRef reference.Reference) ([]types.ImageManifest, e
|
||||
// listManifests stored in a transaction
|
||||
func (s *fsStore) listManifests(transaction string) ([]string, error) {
|
||||
transactionDir := filepath.Join(s.root, makeFilesafeName(transaction))
|
||||
fileInfos, err := ioutil.ReadDir(transactionDir)
|
||||
fileInfos, err := os.ReadDir(transactionDir)
|
||||
switch {
|
||||
case os.IsNotExist(err):
|
||||
return nil, nil
|
||||
@@ -120,7 +119,7 @@ func (s *fsStore) listManifests(transaction string) ([]string, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
filenames := []string{}
|
||||
filenames := make([]string, 0, len(fileInfos))
|
||||
for _, info := range fileInfos {
|
||||
filenames = append(filenames, info.Name())
|
||||
}
|
||||
@@ -137,7 +136,7 @@ func (s *fsStore) Save(listRef reference.Reference, manifest reference.Reference
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return ioutil.WriteFile(filename, bytes, 0644)
|
||||
return os.WriteFile(filename, bytes, 0644)
|
||||
}
|
||||
|
||||
func (s *fsStore) createManifestListDirectory(transaction string) error {
|
||||
|
11
vendor/github.com/docker/cli/opts/opts.go
generated
vendored
11
vendor/github.com/docker/cli/opts/opts.go
generated
vendored
@@ -321,17 +321,6 @@ func ValidateSysctl(val string) (string, error) {
|
||||
return "", fmt.Errorf("sysctl '%s' is not whitelisted", val)
|
||||
}
|
||||
|
||||
// ValidateProgressOutput errors out if an invalid value is passed to --progress
|
||||
func ValidateProgressOutput(val string) error {
|
||||
valid := []string{"auto", "plain", "tty"}
|
||||
for _, s := range valid {
|
||||
if s == val {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("invalid value %q passed to --progress, valid values are: %s", val, strings.Join(valid, ", "))
|
||||
}
|
||||
|
||||
// FilterOpt is a flag type for validating filters
|
||||
type FilterOpt struct {
|
||||
filter filters.Args
|
||||
|
201
vendor/github.com/docker/compose-on-kubernetes/LICENSE
generated
vendored
201
vendor/github.com/docker/compose-on-kubernetes/LICENSE
generated
vendored
@@ -1,201 +0,0 @@
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
http://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
APPENDIX: How to apply the Apache License to your work.
|
||||
|
||||
To apply the Apache License to your work, attach the following
|
||||
boilerplate notice, with the fields enclosed by brackets "{}"
|
||||
replaced with your own identifying information. (Don't include
|
||||
the brackets!) The text should be enclosed in the appropriate
|
||||
comment syntax for the file format. We also recommend that a
|
||||
file or class name and description of purpose be included on the
|
||||
same "printed page" as the copyright notice for easier
|
||||
identification within third-party archives.
|
||||
|
||||
Copyright 2016 Docker, Inc.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
26
vendor/github.com/docker/compose-on-kubernetes/api/config.go
generated
vendored
26
vendor/github.com/docker/compose-on-kubernetes/api/config.go
generated
vendored
@@ -1,26 +0,0 @@
|
||||
package apis
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/docker/docker/pkg/homedir"
|
||||
"k8s.io/client-go/tools/clientcmd"
|
||||
)
|
||||
|
||||
// NewKubernetesConfig resolves the path to the desired Kubernetes configuration file based on
|
||||
// the KUBECONFIG environment variable and command line flags.
|
||||
func NewKubernetesConfig(configPath string) clientcmd.ClientConfig {
|
||||
kubeConfig := configPath
|
||||
if kubeConfig == "" {
|
||||
if config := os.Getenv("KUBECONFIG"); config != "" {
|
||||
kubeConfig = config
|
||||
} else {
|
||||
kubeConfig = filepath.Join(homedir.Get(), ".kube/config")
|
||||
}
|
||||
}
|
||||
|
||||
return clientcmd.NewNonInteractiveDeferredLoadingClientConfig(
|
||||
&clientcmd.ClientConfigLoadingRules{ExplicitPath: kubeConfig},
|
||||
&clientcmd.ConfigOverrides{})
|
||||
}
|
4
vendor/github.com/docker/compose-on-kubernetes/api/doc.go
generated
vendored
4
vendor/github.com/docker/compose-on-kubernetes/api/doc.go
generated
vendored
@@ -1,4 +0,0 @@
|
||||
//
|
||||
// +domain=docker.com
|
||||
|
||||
package apis
|
26
vendor/github.com/klauspost/compress/README.md
generated
vendored
26
vendor/github.com/klauspost/compress/README.md
generated
vendored
@@ -17,6 +17,17 @@ This package provides various compression algorithms.
|
||||
|
||||
# changelog
|
||||
|
||||
* Feb 22, 2022 (v1.14.4)
|
||||
* flate: Fix rare huffman only (-2) corruption. [#503](https://github.com/klauspost/compress/pull/503)
|
||||
* zip: Update deprecated CreateHeaderRaw to correctly call CreateRaw by @saracen in [#502](https://github.com/klauspost/compress/pull/502)
|
||||
* zip: don't read data descriptor early by @saracen in [#501](https://github.com/klauspost/compress/pull/501) #501
|
||||
* huff0: Use static decompression buffer up to 30% faster by @klauspost in [#499](https://github.com/klauspost/compress/pull/499) [#500](https://github.com/klauspost/compress/pull/500)
|
||||
|
||||
* Feb 17, 2022 (v1.14.3)
|
||||
* flate: Improve fastest levels compression speed ~10% more throughput. [#482](https://github.com/klauspost/compress/pull/482) [#489](https://github.com/klauspost/compress/pull/489) [#490](https://github.com/klauspost/compress/pull/490) [#491](https://github.com/klauspost/compress/pull/491) [#494](https://github.com/klauspost/compress/pull/494) [#478](https://github.com/klauspost/compress/pull/478)
|
||||
* flate: Faster decompression speed, ~5-10%. [#483](https://github.com/klauspost/compress/pull/483)
|
||||
* s2: Faster compression with Go v1.18 and amd64 microarch level 3+. [#484](https://github.com/klauspost/compress/pull/484) [#486](https://github.com/klauspost/compress/pull/486)
|
||||
|
||||
* Jan 25, 2022 (v1.14.2)
|
||||
* zstd: improve header decoder by @dsnet [#476](https://github.com/klauspost/compress/pull/476)
|
||||
* zstd: Add bigger default blocks [#469](https://github.com/klauspost/compress/pull/469)
|
||||
@@ -61,6 +72,9 @@ This package provides various compression algorithms.
|
||||
* zstd: Detect short invalid signatures [#382](https://github.com/klauspost/compress/pull/382)
|
||||
* zstd: Spawn decoder goroutine only if needed. [#380](https://github.com/klauspost/compress/pull/380)
|
||||
|
||||
<details>
|
||||
<summary>See changes to v1.12.x</summary>
|
||||
|
||||
* May 25, 2021 (v1.12.3)
|
||||
* deflate: Better/faster Huffman encoding [#374](https://github.com/klauspost/compress/pull/374)
|
||||
* deflate: Allocate less for history. [#375](https://github.com/klauspost/compress/pull/375)
|
||||
@@ -82,9 +96,10 @@ This package provides various compression algorithms.
|
||||
* s2c/s2d/s2sx: Always truncate when writing files [#352](https://github.com/klauspost/compress/pull/352)
|
||||
* zstd: Reduce memory usage further when using [WithLowerEncoderMem](https://pkg.go.dev/github.com/klauspost/compress/zstd#WithLowerEncoderMem) [#346](https://github.com/klauspost/compress/pull/346)
|
||||
* s2: Fix potential problem with amd64 assembly and profilers [#349](https://github.com/klauspost/compress/pull/349)
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>See changes prior to v1.12.1</summary>
|
||||
<summary>See changes to v1.11.x</summary>
|
||||
|
||||
* Mar 26, 2021 (v1.11.13)
|
||||
* zstd: Big speedup on small dictionary encodes [#344](https://github.com/klauspost/compress/pull/344) [#345](https://github.com/klauspost/compress/pull/345)
|
||||
@@ -143,7 +158,7 @@ This package provides various compression algorithms.
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>See changes prior to v1.11.0</summary>
|
||||
<summary>See changes to v1.10.x</summary>
|
||||
|
||||
* July 8, 2020 (v1.10.11)
|
||||
* zstd: Fix extra block when compressing with ReadFrom. [#278](https://github.com/klauspost/compress/pull/278)
|
||||
@@ -305,11 +320,6 @@ This package provides various compression algorithms.
|
||||
|
||||
# deflate usage
|
||||
|
||||
* [High Throughput Benchmark](http://blog.klauspost.com/go-gzipdeflate-benchmarks/).
|
||||
* [Small Payload/Webserver Benchmarks](http://blog.klauspost.com/gzip-performance-for-go-webservers/).
|
||||
* [Linear Time Compression](http://blog.klauspost.com/constant-time-gzipzip-compression/).
|
||||
* [Re-balancing Deflate Compression Levels](https://blog.klauspost.com/rebalancing-deflate-compression-levels/)
|
||||
|
||||
The packages are drop-in replacements for standard libraries. Simply replace the import path to use them:
|
||||
|
||||
| old import | new import | Documentation
|
||||
@@ -331,6 +341,8 @@ Memory usage is typically 1MB for a Writer. stdlib is in the same range.
|
||||
If you expect to have a lot of concurrently allocated Writers consider using
|
||||
the stateless compress described below.
|
||||
|
||||
For compression performance, see: [this spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing).
|
||||
|
||||
# Stateless compression
|
||||
|
||||
This package offers stateless compression as a special option for gzip/deflate.
|
||||
|
121
vendor/github.com/klauspost/compress/huff0/bitreader.go
generated
vendored
121
vendor/github.com/klauspost/compress/huff0/bitreader.go
generated
vendored
@@ -8,115 +8,10 @@ package huff0
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
)
|
||||
|
||||
// bitReader reads a bitstream in reverse.
|
||||
// The last set bit indicates the start of the stream and is used
|
||||
// for aligning the input.
|
||||
type bitReader struct {
|
||||
in []byte
|
||||
off uint // next byte to read is at in[off - 1]
|
||||
value uint64
|
||||
bitsRead uint8
|
||||
}
|
||||
|
||||
// init initializes and resets the bit reader.
|
||||
func (b *bitReader) init(in []byte) error {
|
||||
if len(in) < 1 {
|
||||
return errors.New("corrupt stream: too short")
|
||||
}
|
||||
b.in = in
|
||||
b.off = uint(len(in))
|
||||
// The highest bit of the last byte indicates where to start
|
||||
v := in[len(in)-1]
|
||||
if v == 0 {
|
||||
return errors.New("corrupt stream, did not find end of stream")
|
||||
}
|
||||
b.bitsRead = 64
|
||||
b.value = 0
|
||||
if len(in) >= 8 {
|
||||
b.fillFastStart()
|
||||
} else {
|
||||
b.fill()
|
||||
b.fill()
|
||||
}
|
||||
b.bitsRead += 8 - uint8(highBit32(uint32(v)))
|
||||
return nil
|
||||
}
|
||||
|
||||
// peekBitsFast requires that at least one bit is requested every time.
|
||||
// There are no checks if the buffer is filled.
|
||||
func (b *bitReader) peekBitsFast(n uint8) uint16 {
|
||||
const regMask = 64 - 1
|
||||
v := uint16((b.value << (b.bitsRead & regMask)) >> ((regMask + 1 - n) & regMask))
|
||||
return v
|
||||
}
|
||||
|
||||
// fillFast() will make sure at least 32 bits are available.
|
||||
// There must be at least 4 bytes available.
|
||||
func (b *bitReader) fillFast() {
|
||||
if b.bitsRead < 32 {
|
||||
return
|
||||
}
|
||||
|
||||
// 2 bounds checks.
|
||||
v := b.in[b.off-4 : b.off]
|
||||
v = v[:4]
|
||||
low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
|
||||
b.value = (b.value << 32) | uint64(low)
|
||||
b.bitsRead -= 32
|
||||
b.off -= 4
|
||||
}
|
||||
|
||||
func (b *bitReader) advance(n uint8) {
|
||||
b.bitsRead += n
|
||||
}
|
||||
|
||||
// fillFastStart() assumes the bitreader is empty and there is at least 8 bytes to read.
|
||||
func (b *bitReader) fillFastStart() {
|
||||
// Do single re-slice to avoid bounds checks.
|
||||
b.value = binary.LittleEndian.Uint64(b.in[b.off-8:])
|
||||
b.bitsRead = 0
|
||||
b.off -= 8
|
||||
}
|
||||
|
||||
// fill() will make sure at least 32 bits are available.
|
||||
func (b *bitReader) fill() {
|
||||
if b.bitsRead < 32 {
|
||||
return
|
||||
}
|
||||
if b.off > 4 {
|
||||
v := b.in[b.off-4:]
|
||||
v = v[:4]
|
||||
low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
|
||||
b.value = (b.value << 32) | uint64(low)
|
||||
b.bitsRead -= 32
|
||||
b.off -= 4
|
||||
return
|
||||
}
|
||||
for b.off > 0 {
|
||||
b.value = (b.value << 8) | uint64(b.in[b.off-1])
|
||||
b.bitsRead -= 8
|
||||
b.off--
|
||||
}
|
||||
}
|
||||
|
||||
// finished returns true if all bits have been read from the bit stream.
|
||||
func (b *bitReader) finished() bool {
|
||||
return b.off == 0 && b.bitsRead >= 64
|
||||
}
|
||||
|
||||
// close the bitstream and returns an error if out-of-buffer reads occurred.
|
||||
func (b *bitReader) close() error {
|
||||
// Release reference.
|
||||
b.in = nil
|
||||
if b.bitsRead > 64 {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// bitReader reads a bitstream in reverse.
|
||||
// The last set bit indicates the start of the stream and is used
|
||||
// for aligning the input.
|
||||
@@ -213,10 +108,17 @@ func (b *bitReaderBytes) finished() bool {
|
||||
return b.off == 0 && b.bitsRead >= 64
|
||||
}
|
||||
|
||||
func (b *bitReaderBytes) remaining() uint {
|
||||
return b.off*8 + uint(64-b.bitsRead)
|
||||
}
|
||||
|
||||
// close the bitstream and returns an error if out-of-buffer reads occurred.
|
||||
func (b *bitReaderBytes) close() error {
|
||||
// Release reference.
|
||||
b.in = nil
|
||||
if b.remaining() > 0 {
|
||||
return fmt.Errorf("corrupt input: %d bits remain on stream", b.remaining())
|
||||
}
|
||||
if b.bitsRead > 64 {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
@@ -318,10 +220,17 @@ func (b *bitReaderShifted) finished() bool {
|
||||
return b.off == 0 && b.bitsRead >= 64
|
||||
}
|
||||
|
||||
func (b *bitReaderShifted) remaining() uint {
|
||||
return b.off*8 + uint(64-b.bitsRead)
|
||||
}
|
||||
|
||||
// close the bitstream and returns an error if out-of-buffer reads occurred.
|
||||
func (b *bitReaderShifted) close() error {
|
||||
// Release reference.
|
||||
b.in = nil
|
||||
if b.remaining() > 0 {
|
||||
return fmt.Errorf("corrupt input: %d bits remain on stream", b.remaining())
|
||||
}
|
||||
if b.bitsRead > 64 {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
|
9
vendor/github.com/klauspost/compress/huff0/compress.go
generated
vendored
9
vendor/github.com/klauspost/compress/huff0/compress.go
generated
vendored
@@ -2,6 +2,7 @@ package huff0
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"math"
|
||||
"runtime"
|
||||
"sync"
|
||||
)
|
||||
@@ -289,6 +290,10 @@ func (s *Scratch) compress4X(src []byte) ([]byte, error) {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(s.Out)-idx > math.MaxUint16 {
|
||||
// We cannot store the size in the jump table
|
||||
return nil, ErrIncompressible
|
||||
}
|
||||
// Write compressed length as little endian before block.
|
||||
if i < 3 {
|
||||
// Last length is not written.
|
||||
@@ -332,6 +337,10 @@ func (s *Scratch) compress4Xp(src []byte) ([]byte, error) {
|
||||
return nil, errs[i]
|
||||
}
|
||||
o := s.tmpOut[i]
|
||||
if len(o) > math.MaxUint16 {
|
||||
// We cannot store the size in the jump table
|
||||
return nil, ErrIncompressible
|
||||
}
|
||||
// Write compressed length as little endian before block.
|
||||
if i < 3 {
|
||||
// Last length is not written.
|
||||
|
384
vendor/github.com/klauspost/compress/huff0/decompress.go
generated
vendored
384
vendor/github.com/klauspost/compress/huff0/decompress.go
generated
vendored
@@ -4,6 +4,7 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"sync"
|
||||
|
||||
"github.com/klauspost/compress/fse"
|
||||
)
|
||||
@@ -216,6 +217,7 @@ func (s *Scratch) Decoder() *Decoder {
|
||||
return &Decoder{
|
||||
dt: s.dt,
|
||||
actualTableLog: s.actualTableLog,
|
||||
bufs: &s.decPool,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -223,6 +225,15 @@ func (s *Scratch) Decoder() *Decoder {
|
||||
type Decoder struct {
|
||||
dt dTable
|
||||
actualTableLog uint8
|
||||
bufs *sync.Pool
|
||||
}
|
||||
|
||||
func (d *Decoder) buffer() *[4][256]byte {
|
||||
buf, ok := d.bufs.Get().(*[4][256]byte)
|
||||
if ok {
|
||||
return buf
|
||||
}
|
||||
return &[4][256]byte{}
|
||||
}
|
||||
|
||||
// Decompress1X will decompress a 1X encoded stream.
|
||||
@@ -249,7 +260,8 @@ func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) {
|
||||
dt := d.dt.single[:tlSize]
|
||||
|
||||
// Use temp table to avoid bound checks/append penalty.
|
||||
var buf [256]byte
|
||||
bufs := d.buffer()
|
||||
buf := &bufs[0]
|
||||
var off uint8
|
||||
|
||||
for br.off >= 8 {
|
||||
@@ -277,6 +289,7 @@ func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) {
|
||||
if off == 0 {
|
||||
if len(dst)+256 > maxDecodedSize {
|
||||
br.close()
|
||||
d.bufs.Put(bufs)
|
||||
return nil, ErrMaxDecodedSizeExceeded
|
||||
}
|
||||
dst = append(dst, buf[:]...)
|
||||
@@ -284,6 +297,7 @@ func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) {
|
||||
}
|
||||
|
||||
if len(dst)+int(off) > maxDecodedSize {
|
||||
d.bufs.Put(bufs)
|
||||
br.close()
|
||||
return nil, ErrMaxDecodedSizeExceeded
|
||||
}
|
||||
@@ -310,6 +324,7 @@ func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) {
|
||||
}
|
||||
}
|
||||
if len(dst) >= maxDecodedSize {
|
||||
d.bufs.Put(bufs)
|
||||
br.close()
|
||||
return nil, ErrMaxDecodedSizeExceeded
|
||||
}
|
||||
@@ -319,6 +334,7 @@ func (d *Decoder) Decompress1X(dst, src []byte) ([]byte, error) {
|
||||
bitsLeft -= nBits
|
||||
dst = append(dst, uint8(v.entry>>8))
|
||||
}
|
||||
d.bufs.Put(bufs)
|
||||
return dst, br.close()
|
||||
}
|
||||
|
||||
@@ -341,7 +357,8 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) {
|
||||
dt := d.dt.single[:256]
|
||||
|
||||
// Use temp table to avoid bound checks/append penalty.
|
||||
var buf [256]byte
|
||||
bufs := d.buffer()
|
||||
buf := &bufs[0]
|
||||
var off uint8
|
||||
|
||||
switch d.actualTableLog {
|
||||
@@ -369,6 +386,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) {
|
||||
if off == 0 {
|
||||
if len(dst)+256 > maxDecodedSize {
|
||||
br.close()
|
||||
d.bufs.Put(bufs)
|
||||
return nil, ErrMaxDecodedSizeExceeded
|
||||
}
|
||||
dst = append(dst, buf[:]...)
|
||||
@@ -398,6 +416,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) {
|
||||
if off == 0 {
|
||||
if len(dst)+256 > maxDecodedSize {
|
||||
br.close()
|
||||
d.bufs.Put(bufs)
|
||||
return nil, ErrMaxDecodedSizeExceeded
|
||||
}
|
||||
dst = append(dst, buf[:]...)
|
||||
@@ -426,6 +445,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) {
|
||||
off += 4
|
||||
if off == 0 {
|
||||
if len(dst)+256 > maxDecodedSize {
|
||||
d.bufs.Put(bufs)
|
||||
br.close()
|
||||
return nil, ErrMaxDecodedSizeExceeded
|
||||
}
|
||||
@@ -455,6 +475,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) {
|
||||
off += 4
|
||||
if off == 0 {
|
||||
if len(dst)+256 > maxDecodedSize {
|
||||
d.bufs.Put(bufs)
|
||||
br.close()
|
||||
return nil, ErrMaxDecodedSizeExceeded
|
||||
}
|
||||
@@ -484,6 +505,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) {
|
||||
off += 4
|
||||
if off == 0 {
|
||||
if len(dst)+256 > maxDecodedSize {
|
||||
d.bufs.Put(bufs)
|
||||
br.close()
|
||||
return nil, ErrMaxDecodedSizeExceeded
|
||||
}
|
||||
@@ -513,6 +535,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) {
|
||||
off += 4
|
||||
if off == 0 {
|
||||
if len(dst)+256 > maxDecodedSize {
|
||||
d.bufs.Put(bufs)
|
||||
br.close()
|
||||
return nil, ErrMaxDecodedSizeExceeded
|
||||
}
|
||||
@@ -542,6 +565,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) {
|
||||
off += 4
|
||||
if off == 0 {
|
||||
if len(dst)+256 > maxDecodedSize {
|
||||
d.bufs.Put(bufs)
|
||||
br.close()
|
||||
return nil, ErrMaxDecodedSizeExceeded
|
||||
}
|
||||
@@ -571,6 +595,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) {
|
||||
off += 4
|
||||
if off == 0 {
|
||||
if len(dst)+256 > maxDecodedSize {
|
||||
d.bufs.Put(bufs)
|
||||
br.close()
|
||||
return nil, ErrMaxDecodedSizeExceeded
|
||||
}
|
||||
@@ -578,10 +603,12 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) {
|
||||
}
|
||||
}
|
||||
default:
|
||||
d.bufs.Put(bufs)
|
||||
return nil, fmt.Errorf("invalid tablelog: %d", d.actualTableLog)
|
||||
}
|
||||
|
||||
if len(dst)+int(off) > maxDecodedSize {
|
||||
d.bufs.Put(bufs)
|
||||
br.close()
|
||||
return nil, ErrMaxDecodedSizeExceeded
|
||||
}
|
||||
@@ -601,6 +628,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) {
|
||||
}
|
||||
if len(dst) >= maxDecodedSize {
|
||||
br.close()
|
||||
d.bufs.Put(bufs)
|
||||
return nil, ErrMaxDecodedSizeExceeded
|
||||
}
|
||||
v := dt[br.peekByteFast()>>shift]
|
||||
@@ -609,6 +637,7 @@ func (d *Decoder) decompress1X8Bit(dst, src []byte) ([]byte, error) {
|
||||
bitsLeft -= int8(nBits)
|
||||
dst = append(dst, uint8(v.entry>>8))
|
||||
}
|
||||
d.bufs.Put(bufs)
|
||||
return dst, br.close()
|
||||
}
|
||||
|
||||
@@ -628,7 +657,8 @@ func (d *Decoder) decompress1X8BitExactly(dst, src []byte) ([]byte, error) {
|
||||
dt := d.dt.single[:256]
|
||||
|
||||
// Use temp table to avoid bound checks/append penalty.
|
||||
var buf [256]byte
|
||||
bufs := d.buffer()
|
||||
buf := &bufs[0]
|
||||
var off uint8
|
||||
|
||||
const shift = 56
|
||||
@@ -655,6 +685,7 @@ func (d *Decoder) decompress1X8BitExactly(dst, src []byte) ([]byte, error) {
|
||||
off += 4
|
||||
if off == 0 {
|
||||
if len(dst)+256 > maxDecodedSize {
|
||||
d.bufs.Put(bufs)
|
||||
br.close()
|
||||
return nil, ErrMaxDecodedSizeExceeded
|
||||
}
|
||||
@@ -663,6 +694,7 @@ func (d *Decoder) decompress1X8BitExactly(dst, src []byte) ([]byte, error) {
|
||||
}
|
||||
|
||||
if len(dst)+int(off) > maxDecodedSize {
|
||||
d.bufs.Put(bufs)
|
||||
br.close()
|
||||
return nil, ErrMaxDecodedSizeExceeded
|
||||
}
|
||||
@@ -679,6 +711,7 @@ func (d *Decoder) decompress1X8BitExactly(dst, src []byte) ([]byte, error) {
|
||||
}
|
||||
}
|
||||
if len(dst) >= maxDecodedSize {
|
||||
d.bufs.Put(bufs)
|
||||
br.close()
|
||||
return nil, ErrMaxDecodedSizeExceeded
|
||||
}
|
||||
@@ -688,6 +721,7 @@ func (d *Decoder) decompress1X8BitExactly(dst, src []byte) ([]byte, error) {
|
||||
bitsLeft -= int8(nBits)
|
||||
dst = append(dst, uint8(v.entry>>8))
|
||||
}
|
||||
d.bufs.Put(bufs)
|
||||
return dst, br.close()
|
||||
}
|
||||
|
||||
@@ -707,6 +741,7 @@ func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) {
|
||||
}
|
||||
|
||||
var br [4]bitReaderShifted
|
||||
// Decode "jump table"
|
||||
start := 6
|
||||
for i := 0; i < 3; i++ {
|
||||
length := int(src[i*2]) | (int(src[i*2+1]) << 8)
|
||||
@@ -735,12 +770,12 @@ func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) {
|
||||
single := d.dt.single[:tlSize]
|
||||
|
||||
// Use temp table to avoid bound checks/append penalty.
|
||||
var buf [256]byte
|
||||
buf := d.buffer()
|
||||
var off uint8
|
||||
var decoded int
|
||||
|
||||
// Decode 2 values from each decoder/loop.
|
||||
const bufoff = 256 / 4
|
||||
const bufoff = 256
|
||||
for {
|
||||
if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 {
|
||||
break
|
||||
@@ -758,8 +793,8 @@ func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) {
|
||||
v2 := single[val2&tlMask]
|
||||
br[stream].advance(uint8(v.entry))
|
||||
br[stream2].advance(uint8(v2.entry))
|
||||
buf[off+bufoff*stream] = uint8(v.entry >> 8)
|
||||
buf[off+bufoff*stream2] = uint8(v2.entry >> 8)
|
||||
buf[stream][off] = uint8(v.entry >> 8)
|
||||
buf[stream2][off] = uint8(v2.entry >> 8)
|
||||
|
||||
val = br[stream].peekBitsFast(d.actualTableLog)
|
||||
val2 = br[stream2].peekBitsFast(d.actualTableLog)
|
||||
@@ -767,8 +802,8 @@ func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) {
|
||||
v2 = single[val2&tlMask]
|
||||
br[stream].advance(uint8(v.entry))
|
||||
br[stream2].advance(uint8(v2.entry))
|
||||
buf[off+bufoff*stream+1] = uint8(v.entry >> 8)
|
||||
buf[off+bufoff*stream2+1] = uint8(v2.entry >> 8)
|
||||
buf[stream][off+1] = uint8(v.entry >> 8)
|
||||
buf[stream2][off+1] = uint8(v2.entry >> 8)
|
||||
}
|
||||
|
||||
{
|
||||
@@ -783,8 +818,8 @@ func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) {
|
||||
v2 := single[val2&tlMask]
|
||||
br[stream].advance(uint8(v.entry))
|
||||
br[stream2].advance(uint8(v2.entry))
|
||||
buf[off+bufoff*stream] = uint8(v.entry >> 8)
|
||||
buf[off+bufoff*stream2] = uint8(v2.entry >> 8)
|
||||
buf[stream][off] = uint8(v.entry >> 8)
|
||||
buf[stream2][off] = uint8(v2.entry >> 8)
|
||||
|
||||
val = br[stream].peekBitsFast(d.actualTableLog)
|
||||
val2 = br[stream2].peekBitsFast(d.actualTableLog)
|
||||
@@ -792,25 +827,26 @@ func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) {
|
||||
v2 = single[val2&tlMask]
|
||||
br[stream].advance(uint8(v.entry))
|
||||
br[stream2].advance(uint8(v2.entry))
|
||||
buf[off+bufoff*stream+1] = uint8(v.entry >> 8)
|
||||
buf[off+bufoff*stream2+1] = uint8(v2.entry >> 8)
|
||||
buf[stream][off+1] = uint8(v.entry >> 8)
|
||||
buf[stream2][off+1] = uint8(v2.entry >> 8)
|
||||
}
|
||||
|
||||
off += 2
|
||||
|
||||
if off == bufoff {
|
||||
if off == 0 {
|
||||
if bufoff > dstEvery {
|
||||
d.bufs.Put(buf)
|
||||
return nil, errors.New("corruption detected: stream overrun 1")
|
||||
}
|
||||
copy(out, buf[:bufoff])
|
||||
copy(out[dstEvery:], buf[bufoff:bufoff*2])
|
||||
copy(out[dstEvery*2:], buf[bufoff*2:bufoff*3])
|
||||
copy(out[dstEvery*3:], buf[bufoff*3:bufoff*4])
|
||||
off = 0
|
||||
copy(out, buf[0][:])
|
||||
copy(out[dstEvery:], buf[1][:])
|
||||
copy(out[dstEvery*2:], buf[2][:])
|
||||
copy(out[dstEvery*3:], buf[3][:])
|
||||
out = out[bufoff:]
|
||||
decoded += 256
|
||||
decoded += bufoff * 4
|
||||
// There must at least be 3 buffers left.
|
||||
if len(out) < dstEvery*3 {
|
||||
d.bufs.Put(buf)
|
||||
return nil, errors.New("corruption detected: stream overrun 2")
|
||||
}
|
||||
}
|
||||
@@ -818,41 +854,31 @@ func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) {
|
||||
if off > 0 {
|
||||
ioff := int(off)
|
||||
if len(out) < dstEvery*3+ioff {
|
||||
d.bufs.Put(buf)
|
||||
return nil, errors.New("corruption detected: stream overrun 3")
|
||||
}
|
||||
copy(out, buf[:off])
|
||||
copy(out[dstEvery:dstEvery+ioff], buf[bufoff:bufoff*2])
|
||||
copy(out[dstEvery*2:dstEvery*2+ioff], buf[bufoff*2:bufoff*3])
|
||||
copy(out[dstEvery*3:dstEvery*3+ioff], buf[bufoff*3:bufoff*4])
|
||||
copy(out, buf[0][:off])
|
||||
copy(out[dstEvery:], buf[1][:off])
|
||||
copy(out[dstEvery*2:], buf[2][:off])
|
||||
copy(out[dstEvery*3:], buf[3][:off])
|
||||
decoded += int(off) * 4
|
||||
out = out[off:]
|
||||
}
|
||||
|
||||
// Decode remaining.
|
||||
remainBytes := dstEvery - (decoded / 4)
|
||||
for i := range br {
|
||||
offset := dstEvery * i
|
||||
endsAt := offset + remainBytes
|
||||
if endsAt > len(out) {
|
||||
endsAt = len(out)
|
||||
}
|
||||
br := &br[i]
|
||||
bitsLeft := br.off*8 + uint(64-br.bitsRead)
|
||||
bitsLeft := br.remaining()
|
||||
for bitsLeft > 0 {
|
||||
br.fill()
|
||||
if false && br.bitsRead >= 32 {
|
||||
if br.off >= 4 {
|
||||
v := br.in[br.off-4:]
|
||||
v = v[:4]
|
||||
low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
|
||||
br.value = (br.value << 32) | uint64(low)
|
||||
br.bitsRead -= 32
|
||||
br.off -= 4
|
||||
} else {
|
||||
for br.off > 0 {
|
||||
br.value = (br.value << 8) | uint64(br.in[br.off-1])
|
||||
br.bitsRead -= 8
|
||||
br.off--
|
||||
}
|
||||
}
|
||||
}
|
||||
// end inline...
|
||||
if offset >= len(out) {
|
||||
if offset >= endsAt {
|
||||
d.bufs.Put(buf)
|
||||
return nil, errors.New("corruption detected: stream overrun 4")
|
||||
}
|
||||
|
||||
@@ -865,12 +891,17 @@ func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) {
|
||||
out[offset] = uint8(v >> 8)
|
||||
offset++
|
||||
}
|
||||
if offset != endsAt {
|
||||
d.bufs.Put(buf)
|
||||
return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt)
|
||||
}
|
||||
decoded += offset - dstEvery*i
|
||||
err = br.close()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
d.bufs.Put(buf)
|
||||
if dstSize != decoded {
|
||||
return nil, errors.New("corruption detected: short output block")
|
||||
}
|
||||
@@ -916,12 +947,12 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) {
|
||||
single := d.dt.single[:tlSize]
|
||||
|
||||
// Use temp table to avoid bound checks/append penalty.
|
||||
var buf [256]byte
|
||||
buf := d.buffer()
|
||||
var off uint8
|
||||
var decoded int
|
||||
|
||||
// Decode 4 values from each decoder/loop.
|
||||
const bufoff = 256 / 4
|
||||
const bufoff = 256
|
||||
for {
|
||||
if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 {
|
||||
break
|
||||
@@ -942,8 +973,8 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) {
|
||||
br1.value <<= v & 63
|
||||
br2.bitsRead += uint8(v2)
|
||||
br2.value <<= v2 & 63
|
||||
buf[off+bufoff*stream] = uint8(v >> 8)
|
||||
buf[off+bufoff*stream2] = uint8(v2 >> 8)
|
||||
buf[stream][off] = uint8(v >> 8)
|
||||
buf[stream2][off] = uint8(v2 >> 8)
|
||||
|
||||
v = single[uint8(br1.value>>shift)].entry
|
||||
v2 = single[uint8(br2.value>>shift)].entry
|
||||
@@ -951,8 +982,8 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) {
|
||||
br1.value <<= v & 63
|
||||
br2.bitsRead += uint8(v2)
|
||||
br2.value <<= v2 & 63
|
||||
buf[off+bufoff*stream+1] = uint8(v >> 8)
|
||||
buf[off+bufoff*stream2+1] = uint8(v2 >> 8)
|
||||
buf[stream][off+1] = uint8(v >> 8)
|
||||
buf[stream2][off+1] = uint8(v2 >> 8)
|
||||
|
||||
v = single[uint8(br1.value>>shift)].entry
|
||||
v2 = single[uint8(br2.value>>shift)].entry
|
||||
@@ -960,8 +991,8 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) {
|
||||
br1.value <<= v & 63
|
||||
br2.bitsRead += uint8(v2)
|
||||
br2.value <<= v2 & 63
|
||||
buf[off+bufoff*stream+2] = uint8(v >> 8)
|
||||
buf[off+bufoff*stream2+2] = uint8(v2 >> 8)
|
||||
buf[stream][off+2] = uint8(v >> 8)
|
||||
buf[stream2][off+2] = uint8(v2 >> 8)
|
||||
|
||||
v = single[uint8(br1.value>>shift)].entry
|
||||
v2 = single[uint8(br2.value>>shift)].entry
|
||||
@@ -969,8 +1000,8 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) {
|
||||
br1.value <<= v & 63
|
||||
br2.bitsRead += uint8(v2)
|
||||
br2.value <<= v2 & 63
|
||||
buf[off+bufoff*stream2+3] = uint8(v2 >> 8)
|
||||
buf[off+bufoff*stream+3] = uint8(v >> 8)
|
||||
buf[stream][off+3] = uint8(v >> 8)
|
||||
buf[stream2][off+3] = uint8(v2 >> 8)
|
||||
}
|
||||
|
||||
{
|
||||
@@ -987,8 +1018,8 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) {
|
||||
br1.value <<= v & 63
|
||||
br2.bitsRead += uint8(v2)
|
||||
br2.value <<= v2 & 63
|
||||
buf[off+bufoff*stream] = uint8(v >> 8)
|
||||
buf[off+bufoff*stream2] = uint8(v2 >> 8)
|
||||
buf[stream][off] = uint8(v >> 8)
|
||||
buf[stream2][off] = uint8(v2 >> 8)
|
||||
|
||||
v = single[uint8(br1.value>>shift)].entry
|
||||
v2 = single[uint8(br2.value>>shift)].entry
|
||||
@@ -996,8 +1027,8 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) {
|
||||
br1.value <<= v & 63
|
||||
br2.bitsRead += uint8(v2)
|
||||
br2.value <<= v2 & 63
|
||||
buf[off+bufoff*stream+1] = uint8(v >> 8)
|
||||
buf[off+bufoff*stream2+1] = uint8(v2 >> 8)
|
||||
buf[stream][off+1] = uint8(v >> 8)
|
||||
buf[stream2][off+1] = uint8(v2 >> 8)
|
||||
|
||||
v = single[uint8(br1.value>>shift)].entry
|
||||
v2 = single[uint8(br2.value>>shift)].entry
|
||||
@@ -1005,8 +1036,8 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) {
|
||||
br1.value <<= v & 63
|
||||
br2.bitsRead += uint8(v2)
|
||||
br2.value <<= v2 & 63
|
||||
buf[off+bufoff*stream+2] = uint8(v >> 8)
|
||||
buf[off+bufoff*stream2+2] = uint8(v2 >> 8)
|
||||
buf[stream][off+2] = uint8(v >> 8)
|
||||
buf[stream2][off+2] = uint8(v2 >> 8)
|
||||
|
||||
v = single[uint8(br1.value>>shift)].entry
|
||||
v2 = single[uint8(br2.value>>shift)].entry
|
||||
@@ -1014,25 +1045,26 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) {
|
||||
br1.value <<= v & 63
|
||||
br2.bitsRead += uint8(v2)
|
||||
br2.value <<= v2 & 63
|
||||
buf[off+bufoff*stream2+3] = uint8(v2 >> 8)
|
||||
buf[off+bufoff*stream+3] = uint8(v >> 8)
|
||||
buf[stream][off+3] = uint8(v >> 8)
|
||||
buf[stream2][off+3] = uint8(v2 >> 8)
|
||||
}
|
||||
|
||||
off += 4
|
||||
|
||||
if off == bufoff {
|
||||
if off == 0 {
|
||||
if bufoff > dstEvery {
|
||||
d.bufs.Put(buf)
|
||||
return nil, errors.New("corruption detected: stream overrun 1")
|
||||
}
|
||||
copy(out, buf[:bufoff])
|
||||
copy(out[dstEvery:], buf[bufoff:bufoff*2])
|
||||
copy(out[dstEvery*2:], buf[bufoff*2:bufoff*3])
|
||||
copy(out[dstEvery*3:], buf[bufoff*3:bufoff*4])
|
||||
off = 0
|
||||
copy(out, buf[0][:])
|
||||
copy(out[dstEvery:], buf[1][:])
|
||||
copy(out[dstEvery*2:], buf[2][:])
|
||||
copy(out[dstEvery*3:], buf[3][:])
|
||||
out = out[bufoff:]
|
||||
decoded += 256
|
||||
decoded += bufoff * 4
|
||||
// There must at least be 3 buffers left.
|
||||
if len(out) < dstEvery*3 {
|
||||
d.bufs.Put(buf)
|
||||
return nil, errors.New("corruption detected: stream overrun 2")
|
||||
}
|
||||
}
|
||||
@@ -1040,23 +1072,31 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) {
|
||||
if off > 0 {
|
||||
ioff := int(off)
|
||||
if len(out) < dstEvery*3+ioff {
|
||||
d.bufs.Put(buf)
|
||||
return nil, errors.New("corruption detected: stream overrun 3")
|
||||
}
|
||||
copy(out, buf[:off])
|
||||
copy(out[dstEvery:dstEvery+ioff], buf[bufoff:bufoff*2])
|
||||
copy(out[dstEvery*2:dstEvery*2+ioff], buf[bufoff*2:bufoff*3])
|
||||
copy(out[dstEvery*3:dstEvery*3+ioff], buf[bufoff*3:bufoff*4])
|
||||
copy(out, buf[0][:off])
|
||||
copy(out[dstEvery:], buf[1][:off])
|
||||
copy(out[dstEvery*2:], buf[2][:off])
|
||||
copy(out[dstEvery*3:], buf[3][:off])
|
||||
decoded += int(off) * 4
|
||||
out = out[off:]
|
||||
}
|
||||
|
||||
// Decode remaining.
|
||||
// Decode remaining.
|
||||
remainBytes := dstEvery - (decoded / 4)
|
||||
for i := range br {
|
||||
offset := dstEvery * i
|
||||
endsAt := offset + remainBytes
|
||||
if endsAt > len(out) {
|
||||
endsAt = len(out)
|
||||
}
|
||||
br := &br[i]
|
||||
bitsLeft := int(br.off*8) + int(64-br.bitsRead)
|
||||
bitsLeft := br.remaining()
|
||||
for bitsLeft > 0 {
|
||||
if br.finished() {
|
||||
d.bufs.Put(buf)
|
||||
return nil, io.ErrUnexpectedEOF
|
||||
}
|
||||
if br.bitsRead >= 56 {
|
||||
@@ -1076,7 +1116,8 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) {
|
||||
}
|
||||
}
|
||||
// end inline...
|
||||
if offset >= len(out) {
|
||||
if offset >= endsAt {
|
||||
d.bufs.Put(buf)
|
||||
return nil, errors.New("corruption detected: stream overrun 4")
|
||||
}
|
||||
|
||||
@@ -1084,16 +1125,22 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) {
|
||||
v := single[uint8(br.value>>shift)].entry
|
||||
nBits := uint8(v)
|
||||
br.advance(nBits)
|
||||
bitsLeft -= int(nBits)
|
||||
bitsLeft -= uint(nBits)
|
||||
out[offset] = uint8(v >> 8)
|
||||
offset++
|
||||
}
|
||||
if offset != endsAt {
|
||||
d.bufs.Put(buf)
|
||||
return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt)
|
||||
}
|
||||
decoded += offset - dstEvery*i
|
||||
err = br.close()
|
||||
if err != nil {
|
||||
d.bufs.Put(buf)
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
d.bufs.Put(buf)
|
||||
if dstSize != decoded {
|
||||
return nil, errors.New("corruption detected: short output block")
|
||||
}
|
||||
@@ -1135,12 +1182,12 @@ func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) {
|
||||
single := d.dt.single[:tlSize]
|
||||
|
||||
// Use temp table to avoid bound checks/append penalty.
|
||||
var buf [256]byte
|
||||
buf := d.buffer()
|
||||
var off uint8
|
||||
var decoded int
|
||||
|
||||
// Decode 4 values from each decoder/loop.
|
||||
const bufoff = 256 / 4
|
||||
const bufoff = 256
|
||||
for {
|
||||
if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 {
|
||||
break
|
||||
@@ -1150,104 +1197,109 @@ func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) {
|
||||
// Interleave 2 decodes.
|
||||
const stream = 0
|
||||
const stream2 = 1
|
||||
br[stream].fillFast()
|
||||
br[stream2].fillFast()
|
||||
br1 := &br[stream]
|
||||
br2 := &br[stream2]
|
||||
br1.fillFast()
|
||||
br2.fillFast()
|
||||
|
||||
v := single[uint8(br[stream].value>>shift)].entry
|
||||
v2 := single[uint8(br[stream2].value>>shift)].entry
|
||||
br[stream].bitsRead += uint8(v)
|
||||
br[stream].value <<= v & 63
|
||||
br[stream2].bitsRead += uint8(v2)
|
||||
br[stream2].value <<= v2 & 63
|
||||
buf[off+bufoff*stream] = uint8(v >> 8)
|
||||
buf[off+bufoff*stream2] = uint8(v2 >> 8)
|
||||
v := single[uint8(br1.value>>shift)].entry
|
||||
v2 := single[uint8(br2.value>>shift)].entry
|
||||
br1.bitsRead += uint8(v)
|
||||
br1.value <<= v & 63
|
||||
br2.bitsRead += uint8(v2)
|
||||
br2.value <<= v2 & 63
|
||||
buf[stream][off] = uint8(v >> 8)
|
||||
buf[stream2][off] = uint8(v2 >> 8)
|
||||
|
||||
v = single[uint8(br[stream].value>>shift)].entry
|
||||
v2 = single[uint8(br[stream2].value>>shift)].entry
|
||||
br[stream].bitsRead += uint8(v)
|
||||
br[stream].value <<= v & 63
|
||||
br[stream2].bitsRead += uint8(v2)
|
||||
br[stream2].value <<= v2 & 63
|
||||
buf[off+bufoff*stream+1] = uint8(v >> 8)
|
||||
buf[off+bufoff*stream2+1] = uint8(v2 >> 8)
|
||||
v = single[uint8(br1.value>>shift)].entry
|
||||
v2 = single[uint8(br2.value>>shift)].entry
|
||||
br1.bitsRead += uint8(v)
|
||||
br1.value <<= v & 63
|
||||
br2.bitsRead += uint8(v2)
|
||||
br2.value <<= v2 & 63
|
||||
buf[stream][off+1] = uint8(v >> 8)
|
||||
buf[stream2][off+1] = uint8(v2 >> 8)
|
||||
|
||||
v = single[uint8(br[stream].value>>shift)].entry
|
||||
v2 = single[uint8(br[stream2].value>>shift)].entry
|
||||
br[stream].bitsRead += uint8(v)
|
||||
br[stream].value <<= v & 63
|
||||
br[stream2].bitsRead += uint8(v2)
|
||||
br[stream2].value <<= v2 & 63
|
||||
buf[off+bufoff*stream+2] = uint8(v >> 8)
|
||||
buf[off+bufoff*stream2+2] = uint8(v2 >> 8)
|
||||
v = single[uint8(br1.value>>shift)].entry
|
||||
v2 = single[uint8(br2.value>>shift)].entry
|
||||
br1.bitsRead += uint8(v)
|
||||
br1.value <<= v & 63
|
||||
br2.bitsRead += uint8(v2)
|
||||
br2.value <<= v2 & 63
|
||||
buf[stream][off+2] = uint8(v >> 8)
|
||||
buf[stream2][off+2] = uint8(v2 >> 8)
|
||||
|
||||
v = single[uint8(br[stream].value>>shift)].entry
|
||||
v2 = single[uint8(br[stream2].value>>shift)].entry
|
||||
br[stream].bitsRead += uint8(v)
|
||||
br[stream].value <<= v & 63
|
||||
br[stream2].bitsRead += uint8(v2)
|
||||
br[stream2].value <<= v2 & 63
|
||||
buf[off+bufoff*stream+3] = uint8(v >> 8)
|
||||
buf[off+bufoff*stream2+3] = uint8(v2 >> 8)
|
||||
v = single[uint8(br1.value>>shift)].entry
|
||||
v2 = single[uint8(br2.value>>shift)].entry
|
||||
br1.bitsRead += uint8(v)
|
||||
br1.value <<= v & 63
|
||||
br2.bitsRead += uint8(v2)
|
||||
br2.value <<= v2 & 63
|
||||
buf[stream][off+3] = uint8(v >> 8)
|
||||
buf[stream2][off+3] = uint8(v2 >> 8)
|
||||
}
|
||||
|
||||
{
|
||||
const stream = 2
|
||||
const stream2 = 3
|
||||
br[stream].fillFast()
|
||||
br[stream2].fillFast()
|
||||
br1 := &br[stream]
|
||||
br2 := &br[stream2]
|
||||
br1.fillFast()
|
||||
br2.fillFast()
|
||||
|
||||
v := single[uint8(br[stream].value>>shift)].entry
|
||||
v2 := single[uint8(br[stream2].value>>shift)].entry
|
||||
br[stream].bitsRead += uint8(v)
|
||||
br[stream].value <<= v & 63
|
||||
br[stream2].bitsRead += uint8(v2)
|
||||
br[stream2].value <<= v2 & 63
|
||||
buf[off+bufoff*stream] = uint8(v >> 8)
|
||||
buf[off+bufoff*stream2] = uint8(v2 >> 8)
|
||||
v := single[uint8(br1.value>>shift)].entry
|
||||
v2 := single[uint8(br2.value>>shift)].entry
|
||||
br1.bitsRead += uint8(v)
|
||||
br1.value <<= v & 63
|
||||
br2.bitsRead += uint8(v2)
|
||||
br2.value <<= v2 & 63
|
||||
buf[stream][off] = uint8(v >> 8)
|
||||
buf[stream2][off] = uint8(v2 >> 8)
|
||||
|
||||
v = single[uint8(br[stream].value>>shift)].entry
|
||||
v2 = single[uint8(br[stream2].value>>shift)].entry
|
||||
br[stream].bitsRead += uint8(v)
|
||||
br[stream].value <<= v & 63
|
||||
br[stream2].bitsRead += uint8(v2)
|
||||
br[stream2].value <<= v2 & 63
|
||||
buf[off+bufoff*stream+1] = uint8(v >> 8)
|
||||
buf[off+bufoff*stream2+1] = uint8(v2 >> 8)
|
||||
v = single[uint8(br1.value>>shift)].entry
|
||||
v2 = single[uint8(br2.value>>shift)].entry
|
||||
br1.bitsRead += uint8(v)
|
||||
br1.value <<= v & 63
|
||||
br2.bitsRead += uint8(v2)
|
||||
br2.value <<= v2 & 63
|
||||
buf[stream][off+1] = uint8(v >> 8)
|
||||
buf[stream2][off+1] = uint8(v2 >> 8)
|
||||
|
||||
v = single[uint8(br[stream].value>>shift)].entry
|
||||
v2 = single[uint8(br[stream2].value>>shift)].entry
|
||||
br[stream].bitsRead += uint8(v)
|
||||
br[stream].value <<= v & 63
|
||||
br[stream2].bitsRead += uint8(v2)
|
||||
br[stream2].value <<= v2 & 63
|
||||
buf[off+bufoff*stream+2] = uint8(v >> 8)
|
||||
buf[off+bufoff*stream2+2] = uint8(v2 >> 8)
|
||||
v = single[uint8(br1.value>>shift)].entry
|
||||
v2 = single[uint8(br2.value>>shift)].entry
|
||||
br1.bitsRead += uint8(v)
|
||||
br1.value <<= v & 63
|
||||
br2.bitsRead += uint8(v2)
|
||||
br2.value <<= v2 & 63
|
||||
buf[stream][off+2] = uint8(v >> 8)
|
||||
buf[stream2][off+2] = uint8(v2 >> 8)
|
||||
|
||||
v = single[uint8(br[stream].value>>shift)].entry
|
||||
v2 = single[uint8(br[stream2].value>>shift)].entry
|
||||
br[stream].bitsRead += uint8(v)
|
||||
br[stream].value <<= v & 63
|
||||
br[stream2].bitsRead += uint8(v2)
|
||||
br[stream2].value <<= v2 & 63
|
||||
buf[off+bufoff*stream+3] = uint8(v >> 8)
|
||||
buf[off+bufoff*stream2+3] = uint8(v2 >> 8)
|
||||
v = single[uint8(br1.value>>shift)].entry
|
||||
v2 = single[uint8(br2.value>>shift)].entry
|
||||
br1.bitsRead += uint8(v)
|
||||
br1.value <<= v & 63
|
||||
br2.bitsRead += uint8(v2)
|
||||
br2.value <<= v2 & 63
|
||||
buf[stream][off+3] = uint8(v >> 8)
|
||||
buf[stream2][off+3] = uint8(v2 >> 8)
|
||||
}
|
||||
|
||||
off += 4
|
||||
|
||||
if off == bufoff {
|
||||
if off == 0 {
|
||||
if bufoff > dstEvery {
|
||||
d.bufs.Put(buf)
|
||||
return nil, errors.New("corruption detected: stream overrun 1")
|
||||
}
|
||||
copy(out, buf[:bufoff])
|
||||
copy(out[dstEvery:], buf[bufoff:bufoff*2])
|
||||
copy(out[dstEvery*2:], buf[bufoff*2:bufoff*3])
|
||||
copy(out[dstEvery*3:], buf[bufoff*3:bufoff*4])
|
||||
off = 0
|
||||
copy(out, buf[0][:])
|
||||
copy(out[dstEvery:], buf[1][:])
|
||||
copy(out[dstEvery*2:], buf[2][:])
|
||||
copy(out[dstEvery*3:], buf[3][:])
|
||||
out = out[bufoff:]
|
||||
decoded += 256
|
||||
decoded += bufoff * 4
|
||||
// There must at least be 3 buffers left.
|
||||
if len(out) < dstEvery*3 {
|
||||
d.bufs.Put(buf)
|
||||
return nil, errors.New("corruption detected: stream overrun 2")
|
||||
}
|
||||
}
|
||||
@@ -1257,21 +1309,27 @@ func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) {
|
||||
if len(out) < dstEvery*3+ioff {
|
||||
return nil, errors.New("corruption detected: stream overrun 3")
|
||||
}
|
||||
copy(out, buf[:off])
|
||||
copy(out[dstEvery:dstEvery+ioff], buf[bufoff:bufoff*2])
|
||||
copy(out[dstEvery*2:dstEvery*2+ioff], buf[bufoff*2:bufoff*3])
|
||||
copy(out[dstEvery*3:dstEvery*3+ioff], buf[bufoff*3:bufoff*4])
|
||||
copy(out, buf[0][:off])
|
||||
copy(out[dstEvery:], buf[1][:off])
|
||||
copy(out[dstEvery*2:], buf[2][:off])
|
||||
copy(out[dstEvery*3:], buf[3][:off])
|
||||
decoded += int(off) * 4
|
||||
out = out[off:]
|
||||
}
|
||||
|
||||
// Decode remaining.
|
||||
remainBytes := dstEvery - (decoded / 4)
|
||||
for i := range br {
|
||||
offset := dstEvery * i
|
||||
endsAt := offset + remainBytes
|
||||
if endsAt > len(out) {
|
||||
endsAt = len(out)
|
||||
}
|
||||
br := &br[i]
|
||||
bitsLeft := int(br.off*8) + int(64-br.bitsRead)
|
||||
bitsLeft := br.remaining()
|
||||
for bitsLeft > 0 {
|
||||
if br.finished() {
|
||||
d.bufs.Put(buf)
|
||||
return nil, io.ErrUnexpectedEOF
|
||||
}
|
||||
if br.bitsRead >= 56 {
|
||||
@@ -1291,7 +1349,8 @@ func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) {
|
||||
}
|
||||
}
|
||||
// end inline...
|
||||
if offset >= len(out) {
|
||||
if offset >= endsAt {
|
||||
d.bufs.Put(buf)
|
||||
return nil, errors.New("corruption detected: stream overrun 4")
|
||||
}
|
||||
|
||||
@@ -1299,16 +1358,23 @@ func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) {
|
||||
v := single[br.peekByteFast()].entry
|
||||
nBits := uint8(v)
|
||||
br.advance(nBits)
|
||||
bitsLeft -= int(nBits)
|
||||
bitsLeft -= uint(nBits)
|
||||
out[offset] = uint8(v >> 8)
|
||||
offset++
|
||||
}
|
||||
if offset != endsAt {
|
||||
d.bufs.Put(buf)
|
||||
return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt)
|
||||
}
|
||||
|
||||
decoded += offset - dstEvery*i
|
||||
err = br.close()
|
||||
if err != nil {
|
||||
d.bufs.Put(buf)
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
d.bufs.Put(buf)
|
||||
if dstSize != decoded {
|
||||
return nil, errors.New("corruption detected: short output block")
|
||||
}
|
||||
|
2
vendor/github.com/klauspost/compress/huff0/huff0.go
generated
vendored
2
vendor/github.com/klauspost/compress/huff0/huff0.go
generated
vendored
@@ -8,6 +8,7 @@ import (
|
||||
"fmt"
|
||||
"math"
|
||||
"math/bits"
|
||||
"sync"
|
||||
|
||||
"github.com/klauspost/compress/fse"
|
||||
)
|
||||
@@ -116,6 +117,7 @@ type Scratch struct {
|
||||
nodes []nodeElt
|
||||
tmpOut [4][]byte
|
||||
fse *fse.Scratch
|
||||
decPool sync.Pool // *[4][256]byte buffers.
|
||||
huffWeight [maxSymbolValue + 1]byte
|
||||
}
|
||||
|
||||
|
34
vendor/github.com/klauspost/compress/zstd/README.md
generated
vendored
34
vendor/github.com/klauspost/compress/zstd/README.md
generated
vendored
@@ -78,6 +78,9 @@ of a stream. This is independent of the `WithEncoderConcurrency(n)`, but that is
|
||||
in the future. So if you want to limit concurrency for future updates, specify the concurrency
|
||||
you would like.
|
||||
|
||||
If you would like stream encoding to be done without spawning async goroutines, use `WithEncoderConcurrency(1)`
|
||||
which will compress input as each block is completed, blocking on writes until each has completed.
|
||||
|
||||
You can specify your desired compression level using `WithEncoderLevel()` option. Currently only pre-defined
|
||||
compression settings can be specified.
|
||||
|
||||
@@ -104,7 +107,8 @@ and seems to ignore concatenated streams, even though [it is part of the spec](h
|
||||
For compressing small blocks, the returned encoder has a function called `EncodeAll(src, dst []byte) []byte`.
|
||||
|
||||
`EncodeAll` will encode all input in src and append it to dst.
|
||||
This function can be called concurrently, but each call will only run on a single goroutine.
|
||||
This function can be called concurrently.
|
||||
Each call will only run on a same goroutine as the caller.
|
||||
|
||||
Encoded blocks can be concatenated and the result will be the combined input stream.
|
||||
Data compressed with EncodeAll can be decoded with the Decoder, using either a stream or `DecodeAll`.
|
||||
@@ -283,8 +287,13 @@ func Decompress(in io.Reader, out io.Writer) error {
|
||||
}
|
||||
```
|
||||
|
||||
It is important to use the "Close" function when you no longer need the Reader to stop running goroutines.
|
||||
See "Allocation-less operation" below.
|
||||
It is important to use the "Close" function when you no longer need the Reader to stop running goroutines,
|
||||
when running with default settings.
|
||||
Goroutines will exit once an error has been returned, including `io.EOF` at the end of a stream.
|
||||
|
||||
Streams are decoded concurrently in 4 asynchronous stages to give the best possible throughput.
|
||||
However, if you prefer synchronous decompression, use `WithDecoderConcurrency(1)` which will decompress data
|
||||
as it is being requested only.
|
||||
|
||||
For decoding buffers, it could look something like this:
|
||||
|
||||
@@ -293,7 +302,7 @@ import "github.com/klauspost/compress/zstd"
|
||||
|
||||
// Create a reader that caches decompressors.
|
||||
// For this operation type we supply a nil Reader.
|
||||
var decoder, _ = zstd.NewReader(nil)
|
||||
var decoder, _ = zstd.NewReader(nil, WithDecoderConcurrency(0))
|
||||
|
||||
// Decompress a buffer. We don't supply a destination buffer,
|
||||
// so it will be allocated by the decoder.
|
||||
@@ -304,8 +313,11 @@ func Decompress(src []byte) ([]byte, error) {
|
||||
|
||||
Both of these cases should provide the functionality needed.
|
||||
The decoder can be used for *concurrent* decompression of multiple buffers.
|
||||
By default 4 decompressors will be created.
|
||||
|
||||
It will only allow a certain number of concurrent operations to run.
|
||||
To tweak that yourself use the `WithDecoderConcurrency(n)` option when creating the decoder.
|
||||
It is possible to use `WithDecoderConcurrency(0)` to create GOMAXPROCS decoders.
|
||||
|
||||
### Dictionaries
|
||||
|
||||
@@ -357,18 +369,20 @@ In this case no unneeded allocations should be made.
|
||||
The buffer decoder does everything on the same goroutine and does nothing concurrently.
|
||||
It can however decode several buffers concurrently. Use `WithDecoderConcurrency(n)` to limit that.
|
||||
|
||||
The stream decoder operates on
|
||||
The stream decoder will create goroutines that:
|
||||
|
||||
* One goroutine reads input and splits the input to several block decoders.
|
||||
* A number of decoders will decode blocks.
|
||||
* A goroutine coordinates these blocks and sends history from one to the next.
|
||||
1) Reads input and splits the input into blocks.
|
||||
2) Decompression of literals.
|
||||
3) Decompression of sequences.
|
||||
4) Reconstruction of output stream.
|
||||
|
||||
So effectively this also means the decoder will "read ahead" and prepare data to always be available for output.
|
||||
|
||||
The concurrency level will, for streams, determine how many blocks ahead the compression will start.
|
||||
|
||||
Since "blocks" are quite dependent on the output of the previous block stream decoding will only have limited concurrency.
|
||||
|
||||
In practice this means that concurrency is often limited to utilizing about 2 cores effectively.
|
||||
|
||||
In practice this means that concurrency is often limited to utilizing about 3 cores effectively.
|
||||
|
||||
### Benchmarks
|
||||
|
||||
|
4
vendor/github.com/klauspost/compress/zstd/bitreader.go
generated
vendored
4
vendor/github.com/klauspost/compress/zstd/bitreader.go
generated
vendored
@@ -7,6 +7,7 @@ package zstd
|
||||
import (
|
||||
"encoding/binary"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math/bits"
|
||||
)
|
||||
@@ -132,6 +133,9 @@ func (b *bitReader) remain() uint {
|
||||
func (b *bitReader) close() error {
|
||||
// Release reference.
|
||||
b.in = nil
|
||||
if !b.finished() {
|
||||
return fmt.Errorf("%d extra bits on block, should be 0", b.remain())
|
||||
}
|
||||
if b.bitsRead > 64 {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
|
410
vendor/github.com/klauspost/compress/zstd/blockdec.go
generated
vendored
410
vendor/github.com/klauspost/compress/zstd/blockdec.go
generated
vendored
@@ -76,16 +76,25 @@ type blockDec struct {
|
||||
// Window size of the block.
|
||||
WindowSize uint64
|
||||
|
||||
history chan *history
|
||||
input chan struct{}
|
||||
result chan decodeOutput
|
||||
err error
|
||||
decWG sync.WaitGroup
|
||||
|
||||
// Check against this crc
|
||||
checkCRC []byte
|
||||
|
||||
// Frame to use for singlethreaded decoding.
|
||||
// Should not be used by the decoder itself since parent may be another frame.
|
||||
localFrame *frameDec
|
||||
|
||||
sequence []seqVals
|
||||
|
||||
async struct {
|
||||
newHist *history
|
||||
literals []byte
|
||||
seqData []byte
|
||||
seqSize int // Size of uncompressed sequences
|
||||
fcs uint64
|
||||
}
|
||||
|
||||
// Block is RLE, this is the size.
|
||||
RLESize uint32
|
||||
tmp [4]byte
|
||||
@@ -109,12 +118,7 @@ func (b *blockDec) String() string {
|
||||
func newBlockDec(lowMem bool) *blockDec {
|
||||
b := blockDec{
|
||||
lowMem: lowMem,
|
||||
result: make(chan decodeOutput, 1),
|
||||
input: make(chan struct{}, 1),
|
||||
history: make(chan *history, 1),
|
||||
}
|
||||
b.decWG.Add(1)
|
||||
go b.startDecoder()
|
||||
return &b
|
||||
}
|
||||
|
||||
@@ -137,6 +141,12 @@ func (b *blockDec) reset(br byteBuffer, windowSize uint64) error {
|
||||
case blockTypeReserved:
|
||||
return ErrReservedBlockType
|
||||
case blockTypeRLE:
|
||||
if cSize > maxCompressedBlockSize || cSize > int(b.WindowSize) {
|
||||
if debugDecoder {
|
||||
printf("rle block too big: csize:%d block: %+v\n", uint64(cSize), b)
|
||||
}
|
||||
return ErrWindowSizeExceeded
|
||||
}
|
||||
b.RLESize = uint32(cSize)
|
||||
if b.lowMem {
|
||||
maxSize = cSize
|
||||
@@ -158,6 +168,13 @@ func (b *blockDec) reset(br byteBuffer, windowSize uint64) error {
|
||||
return ErrCompressedSizeTooBig
|
||||
}
|
||||
case blockTypeRaw:
|
||||
if cSize > maxCompressedBlockSize || cSize > int(b.WindowSize) {
|
||||
if debugDecoder {
|
||||
printf("rle block too big: csize:%d block: %+v\n", uint64(cSize), b)
|
||||
}
|
||||
return ErrWindowSizeExceeded
|
||||
}
|
||||
|
||||
b.RLESize = 0
|
||||
// We do not need a destination for raw blocks.
|
||||
maxSize = -1
|
||||
@@ -192,85 +209,14 @@ func (b *blockDec) sendErr(err error) {
|
||||
b.Last = true
|
||||
b.Type = blockTypeReserved
|
||||
b.err = err
|
||||
b.input <- struct{}{}
|
||||
}
|
||||
|
||||
// Close will release resources.
|
||||
// Closed blockDec cannot be reset.
|
||||
func (b *blockDec) Close() {
|
||||
close(b.input)
|
||||
close(b.history)
|
||||
close(b.result)
|
||||
b.decWG.Wait()
|
||||
}
|
||||
|
||||
// decodeAsync will prepare decoding the block when it receives input.
|
||||
// This will separate output and history.
|
||||
func (b *blockDec) startDecoder() {
|
||||
defer b.decWG.Done()
|
||||
for range b.input {
|
||||
//println("blockDec: Got block input")
|
||||
switch b.Type {
|
||||
case blockTypeRLE:
|
||||
if cap(b.dst) < int(b.RLESize) {
|
||||
if b.lowMem {
|
||||
b.dst = make([]byte, b.RLESize)
|
||||
} else {
|
||||
b.dst = make([]byte, maxBlockSize)
|
||||
}
|
||||
}
|
||||
o := decodeOutput{
|
||||
d: b,
|
||||
b: b.dst[:b.RLESize],
|
||||
err: nil,
|
||||
}
|
||||
v := b.data[0]
|
||||
for i := range o.b {
|
||||
o.b[i] = v
|
||||
}
|
||||
hist := <-b.history
|
||||
hist.append(o.b)
|
||||
b.result <- o
|
||||
case blockTypeRaw:
|
||||
o := decodeOutput{
|
||||
d: b,
|
||||
b: b.data,
|
||||
err: nil,
|
||||
}
|
||||
hist := <-b.history
|
||||
hist.append(o.b)
|
||||
b.result <- o
|
||||
case blockTypeCompressed:
|
||||
b.dst = b.dst[:0]
|
||||
err := b.decodeCompressed(nil)
|
||||
o := decodeOutput{
|
||||
d: b,
|
||||
b: b.dst,
|
||||
err: err,
|
||||
}
|
||||
if debugDecoder {
|
||||
println("Decompressed to", len(b.dst), "bytes, error:", err)
|
||||
}
|
||||
b.result <- o
|
||||
case blockTypeReserved:
|
||||
// Used for returning errors.
|
||||
<-b.history
|
||||
b.result <- decodeOutput{
|
||||
d: b,
|
||||
b: nil,
|
||||
err: b.err,
|
||||
}
|
||||
default:
|
||||
panic("Invalid block type")
|
||||
}
|
||||
if debugDecoder {
|
||||
println("blockDec: Finished block")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// decodeAsync will prepare decoding the block when it receives the history.
|
||||
// If history is provided, it will not fetch it from the channel.
|
||||
// decodeBuf
|
||||
func (b *blockDec) decodeBuf(hist *history) error {
|
||||
switch b.Type {
|
||||
case blockTypeRLE:
|
||||
@@ -293,14 +239,23 @@ func (b *blockDec) decodeBuf(hist *history) error {
|
||||
return nil
|
||||
case blockTypeCompressed:
|
||||
saved := b.dst
|
||||
// Append directly to history
|
||||
if hist.ignoreBuffer == 0 {
|
||||
b.dst = hist.b
|
||||
hist.b = nil
|
||||
} else {
|
||||
b.dst = b.dst[:0]
|
||||
}
|
||||
err := b.decodeCompressed(hist)
|
||||
if debugDecoder {
|
||||
println("Decompressed to total", len(b.dst), "bytes, hash:", xxhash.Sum64(b.dst), "error:", err)
|
||||
}
|
||||
if hist.ignoreBuffer == 0 {
|
||||
hist.b = b.dst
|
||||
b.dst = saved
|
||||
} else {
|
||||
hist.appendKeep(b.dst)
|
||||
}
|
||||
return err
|
||||
case blockTypeReserved:
|
||||
// Used for returning errors.
|
||||
@@ -310,30 +265,18 @@ func (b *blockDec) decodeBuf(hist *history) error {
|
||||
}
|
||||
}
|
||||
|
||||
// decodeCompressed will start decompressing a block.
|
||||
// If no history is supplied the decoder will decodeAsync as much as possible
|
||||
// before fetching from blockDec.history
|
||||
func (b *blockDec) decodeCompressed(hist *history) error {
|
||||
in := b.data
|
||||
delayedHistory := hist == nil
|
||||
|
||||
if delayedHistory {
|
||||
// We must always grab history.
|
||||
defer func() {
|
||||
if hist == nil {
|
||||
<-b.history
|
||||
}
|
||||
}()
|
||||
}
|
||||
func (b *blockDec) decodeLiterals(in []byte, hist *history) (remain []byte, err error) {
|
||||
// There must be at least one byte for Literals_Block_Type and one for Sequences_Section_Header
|
||||
if len(in) < 2 {
|
||||
return ErrBlockTooSmall
|
||||
return in, ErrBlockTooSmall
|
||||
}
|
||||
|
||||
litType := literalsBlockType(in[0] & 3)
|
||||
var litRegenSize int
|
||||
var litCompSize int
|
||||
sizeFormat := (in[0] >> 2) & 3
|
||||
var fourStreams bool
|
||||
var literals []byte
|
||||
switch litType {
|
||||
case literalsBlockRaw, literalsBlockRLE:
|
||||
switch sizeFormat {
|
||||
@@ -349,7 +292,7 @@ func (b *blockDec) decodeCompressed(hist *history) error {
|
||||
// Regenerated_Size uses 20 bits (0-1048575). Literals_Section_Header uses 3 bytes.
|
||||
if len(in) < 3 {
|
||||
println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in))
|
||||
return ErrBlockTooSmall
|
||||
return in, ErrBlockTooSmall
|
||||
}
|
||||
litRegenSize = int(in[0]>>4) + (int(in[1]) << 4) + (int(in[2]) << 12)
|
||||
in = in[3:]
|
||||
@@ -360,7 +303,7 @@ func (b *blockDec) decodeCompressed(hist *history) error {
|
||||
// Both Regenerated_Size and Compressed_Size use 10 bits (0-1023).
|
||||
if len(in) < 3 {
|
||||
println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in))
|
||||
return ErrBlockTooSmall
|
||||
return in, ErrBlockTooSmall
|
||||
}
|
||||
n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12)
|
||||
litRegenSize = int(n & 1023)
|
||||
@@ -371,7 +314,7 @@ func (b *blockDec) decodeCompressed(hist *history) error {
|
||||
fourStreams = true
|
||||
if len(in) < 4 {
|
||||
println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in))
|
||||
return ErrBlockTooSmall
|
||||
return in, ErrBlockTooSmall
|
||||
}
|
||||
n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) + (uint64(in[3]) << 20)
|
||||
litRegenSize = int(n & 16383)
|
||||
@@ -381,7 +324,7 @@ func (b *blockDec) decodeCompressed(hist *history) error {
|
||||
fourStreams = true
|
||||
if len(in) < 5 {
|
||||
println("too small: litType:", litType, " sizeFormat", sizeFormat, len(in))
|
||||
return ErrBlockTooSmall
|
||||
return in, ErrBlockTooSmall
|
||||
}
|
||||
n := uint64(in[0]>>4) + (uint64(in[1]) << 4) + (uint64(in[2]) << 12) + (uint64(in[3]) << 20) + (uint64(in[4]) << 28)
|
||||
litRegenSize = int(n & 262143)
|
||||
@@ -392,13 +335,15 @@ func (b *blockDec) decodeCompressed(hist *history) error {
|
||||
if debugDecoder {
|
||||
println("literals type:", litType, "litRegenSize:", litRegenSize, "litCompSize:", litCompSize, "sizeFormat:", sizeFormat, "4X:", fourStreams)
|
||||
}
|
||||
var literals []byte
|
||||
var huff *huff0.Scratch
|
||||
if litRegenSize > int(b.WindowSize) || litRegenSize > maxCompressedBlockSize {
|
||||
return in, ErrWindowSizeExceeded
|
||||
}
|
||||
|
||||
switch litType {
|
||||
case literalsBlockRaw:
|
||||
if len(in) < litRegenSize {
|
||||
println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litRegenSize)
|
||||
return ErrBlockTooSmall
|
||||
return in, ErrBlockTooSmall
|
||||
}
|
||||
literals = in[:litRegenSize]
|
||||
in = in[litRegenSize:]
|
||||
@@ -406,7 +351,7 @@ func (b *blockDec) decodeCompressed(hist *history) error {
|
||||
case literalsBlockRLE:
|
||||
if len(in) < 1 {
|
||||
println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", 1)
|
||||
return ErrBlockTooSmall
|
||||
return in, ErrBlockTooSmall
|
||||
}
|
||||
if cap(b.literalBuf) < litRegenSize {
|
||||
if b.lowMem {
|
||||
@@ -417,7 +362,6 @@ func (b *blockDec) decodeCompressed(hist *history) error {
|
||||
b.literalBuf = make([]byte, litRegenSize)
|
||||
} else {
|
||||
b.literalBuf = make([]byte, litRegenSize, maxCompressedLiteralSize)
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -433,7 +377,7 @@ func (b *blockDec) decodeCompressed(hist *history) error {
|
||||
case literalsBlockTreeless:
|
||||
if len(in) < litCompSize {
|
||||
println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litCompSize)
|
||||
return ErrBlockTooSmall
|
||||
return in, ErrBlockTooSmall
|
||||
}
|
||||
// Store compressed literals, so we defer decoding until we get history.
|
||||
literals = in[:litCompSize]
|
||||
@@ -441,15 +385,10 @@ func (b *blockDec) decodeCompressed(hist *history) error {
|
||||
if debugDecoder {
|
||||
printf("Found %d compressed literals\n", litCompSize)
|
||||
}
|
||||
case literalsBlockCompressed:
|
||||
if len(in) < litCompSize {
|
||||
println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litCompSize)
|
||||
return ErrBlockTooSmall
|
||||
huff := hist.huffTree
|
||||
if huff == nil {
|
||||
return in, errors.New("literal block was treeless, but no history was defined")
|
||||
}
|
||||
literals = in[:litCompSize]
|
||||
in = in[litCompSize:]
|
||||
huff = huffDecoderPool.Get().(*huff0.Scratch)
|
||||
var err error
|
||||
// Ensure we have space to store it.
|
||||
if cap(b.literalBuf) < litRegenSize {
|
||||
if b.lowMem {
|
||||
@@ -458,14 +397,53 @@ func (b *blockDec) decodeCompressed(hist *history) error {
|
||||
b.literalBuf = make([]byte, 0, maxCompressedLiteralSize)
|
||||
}
|
||||
}
|
||||
var err error
|
||||
// Use our out buffer.
|
||||
huff.MaxDecodedSize = maxCompressedBlockSize
|
||||
if fourStreams {
|
||||
literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals)
|
||||
} else {
|
||||
literals, err = huff.Decoder().Decompress1X(b.literalBuf[:0:litRegenSize], literals)
|
||||
}
|
||||
// Make sure we don't leak our literals buffer
|
||||
if err != nil {
|
||||
println("decompressing literals:", err)
|
||||
return in, err
|
||||
}
|
||||
if len(literals) != litRegenSize {
|
||||
return in, fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals))
|
||||
}
|
||||
|
||||
case literalsBlockCompressed:
|
||||
if len(in) < litCompSize {
|
||||
println("too small: litType:", litType, " sizeFormat", sizeFormat, "remain:", len(in), "want:", litCompSize)
|
||||
return in, ErrBlockTooSmall
|
||||
}
|
||||
literals = in[:litCompSize]
|
||||
in = in[litCompSize:]
|
||||
// Ensure we have space to store it.
|
||||
if cap(b.literalBuf) < litRegenSize {
|
||||
if b.lowMem {
|
||||
b.literalBuf = make([]byte, 0, litRegenSize)
|
||||
} else {
|
||||
b.literalBuf = make([]byte, 0, maxCompressedBlockSize)
|
||||
}
|
||||
}
|
||||
huff := hist.huffTree
|
||||
if huff == nil || (hist.dict != nil && huff == hist.dict.litEnc) {
|
||||
huff = huffDecoderPool.Get().(*huff0.Scratch)
|
||||
if huff == nil {
|
||||
huff = &huff0.Scratch{}
|
||||
}
|
||||
}
|
||||
var err error
|
||||
huff, literals, err = huff0.ReadTable(literals, huff)
|
||||
if err != nil {
|
||||
println("reading huffman table:", err)
|
||||
return err
|
||||
return in, err
|
||||
}
|
||||
hist.huffTree = huff
|
||||
huff.MaxDecodedSize = maxCompressedBlockSize
|
||||
// Use our out buffer.
|
||||
if fourStreams {
|
||||
literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals)
|
||||
@@ -474,24 +452,52 @@ func (b *blockDec) decodeCompressed(hist *history) error {
|
||||
}
|
||||
if err != nil {
|
||||
println("decoding compressed literals:", err)
|
||||
return err
|
||||
return in, err
|
||||
}
|
||||
// Make sure we don't leak our literals buffer
|
||||
if len(literals) != litRegenSize {
|
||||
return fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals))
|
||||
return in, fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals))
|
||||
}
|
||||
if debugDecoder {
|
||||
printf("Decompressed %d literals into %d bytes\n", litCompSize, litRegenSize)
|
||||
}
|
||||
}
|
||||
hist.decoders.literals = literals
|
||||
return in, nil
|
||||
}
|
||||
|
||||
// decodeCompressed will start decompressing a block.
|
||||
func (b *blockDec) decodeCompressed(hist *history) error {
|
||||
in := b.data
|
||||
in, err := b.decodeLiterals(in, hist)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
err = b.prepareSequences(in, hist)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if hist.decoders.nSeqs == 0 {
|
||||
b.dst = append(b.dst, hist.decoders.literals...)
|
||||
return nil
|
||||
}
|
||||
err = hist.decoders.decodeSync(hist)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
b.dst = hist.decoders.out
|
||||
hist.recentOffsets = hist.decoders.prevOffset
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *blockDec) prepareSequences(in []byte, hist *history) (err error) {
|
||||
// Decode Sequences
|
||||
// https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#sequences-section
|
||||
if len(in) < 1 {
|
||||
return ErrBlockTooSmall
|
||||
}
|
||||
var nSeqs int
|
||||
seqHeader := in[0]
|
||||
nSeqs := 0
|
||||
switch {
|
||||
case seqHeader == 0:
|
||||
in = in[1:]
|
||||
@@ -512,7 +518,8 @@ func (b *blockDec) decodeCompressed(hist *history) error {
|
||||
in = in[3:]
|
||||
}
|
||||
|
||||
var seqs = &sequenceDecs{}
|
||||
var seqs = &hist.decoders
|
||||
seqs.nSeqs = nSeqs
|
||||
if nSeqs > 0 {
|
||||
if len(in) < 1 {
|
||||
return ErrBlockTooSmall
|
||||
@@ -541,6 +548,9 @@ func (b *blockDec) decodeCompressed(hist *history) error {
|
||||
}
|
||||
switch mode {
|
||||
case compModePredefined:
|
||||
if seq.fse != nil && !seq.fse.preDefined {
|
||||
fseDecoderPool.Put(seq.fse)
|
||||
}
|
||||
seq.fse = &fsePredef[i]
|
||||
case compModeRLE:
|
||||
if br.remain() < 1 {
|
||||
@@ -548,34 +558,36 @@ func (b *blockDec) decodeCompressed(hist *history) error {
|
||||
}
|
||||
v := br.Uint8()
|
||||
br.advance(1)
|
||||
dec := fseDecoderPool.Get().(*fseDecoder)
|
||||
if seq.fse == nil || seq.fse.preDefined {
|
||||
seq.fse = fseDecoderPool.Get().(*fseDecoder)
|
||||
}
|
||||
symb, err := decSymbolValue(v, symbolTableX[i])
|
||||
if err != nil {
|
||||
printf("RLE Transform table (%v) error: %v", tableIndex(i), err)
|
||||
return err
|
||||
}
|
||||
dec.setRLE(symb)
|
||||
seq.fse = dec
|
||||
seq.fse.setRLE(symb)
|
||||
if debugDecoder {
|
||||
printf("RLE set to %+v, code: %v", symb, v)
|
||||
}
|
||||
case compModeFSE:
|
||||
println("Reading table for", tableIndex(i))
|
||||
dec := fseDecoderPool.Get().(*fseDecoder)
|
||||
err := dec.readNCount(&br, uint16(maxTableSymbol[i]))
|
||||
if seq.fse == nil || seq.fse.preDefined {
|
||||
seq.fse = fseDecoderPool.Get().(*fseDecoder)
|
||||
}
|
||||
err := seq.fse.readNCount(&br, uint16(maxTableSymbol[i]))
|
||||
if err != nil {
|
||||
println("Read table error:", err)
|
||||
return err
|
||||
}
|
||||
err = dec.transform(symbolTableX[i])
|
||||
err = seq.fse.transform(symbolTableX[i])
|
||||
if err != nil {
|
||||
println("Transform table error:", err)
|
||||
return err
|
||||
}
|
||||
if debugDecoder {
|
||||
println("Read table ok", "symbolLen:", dec.symbolLen)
|
||||
println("Read table ok", "symbolLen:", seq.fse.symbolLen)
|
||||
}
|
||||
seq.fse = dec
|
||||
case compModeRepeat:
|
||||
seq.repeat = true
|
||||
}
|
||||
@@ -585,140 +597,88 @@ func (b *blockDec) decodeCompressed(hist *history) error {
|
||||
}
|
||||
in = br.unread()
|
||||
}
|
||||
|
||||
// Wait for history.
|
||||
// All time spent after this is critical since it is strictly sequential.
|
||||
if hist == nil {
|
||||
hist = <-b.history
|
||||
if hist.error {
|
||||
return ErrDecoderClosed
|
||||
}
|
||||
}
|
||||
|
||||
// Decode treeless literal block.
|
||||
if litType == literalsBlockTreeless {
|
||||
// TODO: We could send the history early WITHOUT the stream history.
|
||||
// This would allow decoding treeless literals before the byte history is available.
|
||||
// Silencia stats: Treeless 4393, with: 32775, total: 37168, 11% treeless.
|
||||
// So not much obvious gain here.
|
||||
|
||||
if hist.huffTree == nil {
|
||||
return errors.New("literal block was treeless, but no history was defined")
|
||||
}
|
||||
// Ensure we have space to store it.
|
||||
if cap(b.literalBuf) < litRegenSize {
|
||||
if b.lowMem {
|
||||
b.literalBuf = make([]byte, 0, litRegenSize)
|
||||
} else {
|
||||
b.literalBuf = make([]byte, 0, maxCompressedLiteralSize)
|
||||
}
|
||||
}
|
||||
var err error
|
||||
// Use our out buffer.
|
||||
huff = hist.huffTree
|
||||
if fourStreams {
|
||||
literals, err = huff.Decoder().Decompress4X(b.literalBuf[:0:litRegenSize], literals)
|
||||
} else {
|
||||
literals, err = huff.Decoder().Decompress1X(b.literalBuf[:0:litRegenSize], literals)
|
||||
}
|
||||
// Make sure we don't leak our literals buffer
|
||||
if err != nil {
|
||||
println("decompressing literals:", err)
|
||||
return err
|
||||
}
|
||||
if len(literals) != litRegenSize {
|
||||
return fmt.Errorf("literal output size mismatch want %d, got %d", litRegenSize, len(literals))
|
||||
}
|
||||
} else {
|
||||
if hist.huffTree != nil && huff != nil {
|
||||
if hist.dict == nil || hist.dict.litEnc != hist.huffTree {
|
||||
huffDecoderPool.Put(hist.huffTree)
|
||||
}
|
||||
hist.huffTree = nil
|
||||
}
|
||||
}
|
||||
if huff != nil {
|
||||
hist.huffTree = huff
|
||||
}
|
||||
if debugDecoder {
|
||||
println("Final literals:", len(literals), "hash:", xxhash.Sum64(literals), "and", nSeqs, "sequences.")
|
||||
println("Literals:", len(seqs.literals), "hash:", xxhash.Sum64(seqs.literals), "and", seqs.nSeqs, "sequences.")
|
||||
}
|
||||
|
||||
if nSeqs == 0 {
|
||||
// Decompressed content is defined entirely as Literals Section content.
|
||||
b.dst = append(b.dst, literals...)
|
||||
if delayedHistory {
|
||||
hist.append(literals)
|
||||
if len(b.sequence) > 0 {
|
||||
b.sequence = b.sequence[:0]
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
seqs, err := seqs.mergeHistory(&hist.decoders)
|
||||
if err != nil {
|
||||
return err
|
||||
br := seqs.br
|
||||
if br == nil {
|
||||
br = &bitReader{}
|
||||
}
|
||||
if debugDecoder {
|
||||
println("History merged ok")
|
||||
}
|
||||
br := &bitReader{}
|
||||
if err := br.init(in); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// TODO: Investigate if sending history without decoders are faster.
|
||||
// This would allow the sequences to be decoded async and only have to construct stream history.
|
||||
// If only recent offsets were not transferred, this would be an obvious win.
|
||||
// Also, if first 3 sequences don't reference recent offsets, all sequences can be decoded.
|
||||
if err := seqs.initialize(br, hist, b.dst); err != nil {
|
||||
println("initializing sequences:", err)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *blockDec) decodeSequences(hist *history) error {
|
||||
if cap(b.sequence) < hist.decoders.nSeqs {
|
||||
if b.lowMem {
|
||||
b.sequence = make([]seqVals, 0, hist.decoders.nSeqs)
|
||||
} else {
|
||||
b.sequence = make([]seqVals, 0, 0x7F00+0xffff)
|
||||
}
|
||||
}
|
||||
b.sequence = b.sequence[:hist.decoders.nSeqs]
|
||||
if hist.decoders.nSeqs == 0 {
|
||||
hist.decoders.seqSize = len(hist.decoders.literals)
|
||||
return nil
|
||||
}
|
||||
hist.decoders.prevOffset = hist.recentOffsets
|
||||
err := hist.decoders.decode(b.sequence)
|
||||
hist.recentOffsets = hist.decoders.prevOffset
|
||||
return err
|
||||
}
|
||||
|
||||
func (b *blockDec) executeSequences(hist *history) error {
|
||||
hbytes := hist.b
|
||||
if len(hbytes) > hist.windowSize {
|
||||
hbytes = hbytes[len(hbytes)-hist.windowSize:]
|
||||
// We do not need history any more.
|
||||
// We do not need history anymore.
|
||||
if hist.dict != nil {
|
||||
hist.dict.content = nil
|
||||
}
|
||||
}
|
||||
|
||||
if err := seqs.initialize(br, hist, literals, b.dst); err != nil {
|
||||
println("initializing sequences:", err)
|
||||
return err
|
||||
}
|
||||
|
||||
err = seqs.decode(nSeqs, br, hbytes)
|
||||
hist.decoders.windowSize = hist.windowSize
|
||||
hist.decoders.out = b.dst[:0]
|
||||
err := hist.decoders.execute(b.sequence, hbytes)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if !br.finished() {
|
||||
return fmt.Errorf("%d extra bits on block, should be 0", br.remain())
|
||||
}
|
||||
return b.updateHistory(hist)
|
||||
}
|
||||
|
||||
err = br.close()
|
||||
if err != nil {
|
||||
printf("Closing sequences: %v, %+v\n", err, *br)
|
||||
}
|
||||
func (b *blockDec) updateHistory(hist *history) error {
|
||||
if len(b.data) > maxCompressedBlockSize {
|
||||
return fmt.Errorf("compressed block size too large (%d)", len(b.data))
|
||||
}
|
||||
// Set output and release references.
|
||||
b.dst = seqs.out
|
||||
seqs.out, seqs.literals, seqs.hist = nil, nil, nil
|
||||
b.dst = hist.decoders.out
|
||||
hist.recentOffsets = hist.decoders.prevOffset
|
||||
|
||||
if !delayedHistory {
|
||||
// If we don't have delayed history, no need to update.
|
||||
hist.recentOffsets = seqs.prevOffset
|
||||
return nil
|
||||
}
|
||||
if b.Last {
|
||||
// if last block we don't care about history.
|
||||
println("Last block, no history returned")
|
||||
hist.b = hist.b[:0]
|
||||
return nil
|
||||
}
|
||||
} else {
|
||||
hist.append(b.dst)
|
||||
hist.recentOffsets = seqs.prevOffset
|
||||
if debugDecoder {
|
||||
println("Finished block with literals:", len(literals), "and", nSeqs, "sequences.")
|
||||
println("Finished block with ", len(b.sequence), "sequences. Added", len(b.dst), "to history, now length", len(hist.b))
|
||||
}
|
||||
}
|
||||
hist.decoders.out, hist.decoders.literals = nil, nil
|
||||
|
||||
return nil
|
||||
}
|
||||
|
3
vendor/github.com/klauspost/compress/zstd/bytebuf.go
generated
vendored
3
vendor/github.com/klauspost/compress/zstd/bytebuf.go
generated
vendored
@@ -113,6 +113,9 @@ func (r *readerWrapper) readBig(n int, dst []byte) ([]byte, error) {
|
||||
func (r *readerWrapper) readByte() (byte, error) {
|
||||
n2, err := r.r.Read(r.tmp[:1])
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
return 0, err
|
||||
}
|
||||
if n2 != 1 {
|
||||
|
594
vendor/github.com/klauspost/compress/zstd/decoder.go
generated
vendored
594
vendor/github.com/klauspost/compress/zstd/decoder.go
generated
vendored
@@ -5,9 +5,13 @@
|
||||
package zstd
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"io"
|
||||
"sync"
|
||||
|
||||
"github.com/klauspost/compress/zstd/internal/xxhash"
|
||||
)
|
||||
|
||||
// Decoder provides decoding of zstandard streams.
|
||||
@@ -22,12 +26,19 @@ type Decoder struct {
|
||||
// Unreferenced decoders, ready for use.
|
||||
decoders chan *blockDec
|
||||
|
||||
// Streams ready to be decoded.
|
||||
stream chan decodeStream
|
||||
|
||||
// Current read position used for Reader functionality.
|
||||
current decoderState
|
||||
|
||||
// sync stream decoding
|
||||
syncStream struct {
|
||||
decodedFrame uint64
|
||||
br readerWrapper
|
||||
enabled bool
|
||||
inFrame bool
|
||||
}
|
||||
|
||||
frame *frameDec
|
||||
|
||||
// Custom dictionaries.
|
||||
// Always uses copies.
|
||||
dicts map[uint32]dict
|
||||
@@ -46,7 +57,10 @@ type decoderState struct {
|
||||
output chan decodeOutput
|
||||
|
||||
// cancel remaining output.
|
||||
cancel chan struct{}
|
||||
cancel context.CancelFunc
|
||||
|
||||
// crc of current frame
|
||||
crc *xxhash.Digest
|
||||
|
||||
flushed bool
|
||||
}
|
||||
@@ -81,7 +95,7 @@ func NewReader(r io.Reader, opts ...DOption) (*Decoder, error) {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
d.current.output = make(chan decodeOutput, d.o.concurrent)
|
||||
d.current.crc = xxhash.New()
|
||||
d.current.flushed = true
|
||||
|
||||
if r == nil {
|
||||
@@ -130,7 +144,7 @@ func (d *Decoder) Read(p []byte) (int, error) {
|
||||
break
|
||||
}
|
||||
if !d.nextBlock(n == 0) {
|
||||
return n, nil
|
||||
return n, d.current.err
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -162,6 +176,7 @@ func (d *Decoder) Reset(r io.Reader) error {
|
||||
|
||||
d.drainOutput()
|
||||
|
||||
d.syncStream.br.r = nil
|
||||
if r == nil {
|
||||
d.current.err = ErrDecoderNilInput
|
||||
if len(d.current.b) > 0 {
|
||||
@@ -195,33 +210,39 @@ func (d *Decoder) Reset(r io.Reader) error {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
if d.stream == nil {
|
||||
d.stream = make(chan decodeStream, 1)
|
||||
d.streamWg.Add(1)
|
||||
go d.startStreamDecoder(d.stream)
|
||||
}
|
||||
|
||||
// Remove current block.
|
||||
d.stashDecoder()
|
||||
d.current.decodeOutput = decodeOutput{}
|
||||
d.current.err = nil
|
||||
d.current.cancel = make(chan struct{})
|
||||
d.current.flushed = false
|
||||
d.current.d = nil
|
||||
|
||||
d.stream <- decodeStream{
|
||||
r: r,
|
||||
output: d.current.output,
|
||||
cancel: d.current.cancel,
|
||||
// Ensure no-one else is still running...
|
||||
d.streamWg.Wait()
|
||||
if d.frame == nil {
|
||||
d.frame = newFrameDec(d.o)
|
||||
}
|
||||
|
||||
if d.o.concurrent == 1 {
|
||||
return d.startSyncDecoder(r)
|
||||
}
|
||||
|
||||
d.current.output = make(chan decodeOutput, d.o.concurrent)
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
d.current.cancel = cancel
|
||||
d.streamWg.Add(1)
|
||||
go d.startStreamDecoder(ctx, r, d.current.output)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// drainOutput will drain the output until errEndOfStream is sent.
|
||||
func (d *Decoder) drainOutput() {
|
||||
if d.current.cancel != nil {
|
||||
if debugDecoder {
|
||||
println("cancelling current")
|
||||
close(d.current.cancel)
|
||||
}
|
||||
d.current.cancel()
|
||||
d.current.cancel = nil
|
||||
}
|
||||
if d.current.d != nil {
|
||||
@@ -243,12 +264,9 @@ func (d *Decoder) drainOutput() {
|
||||
}
|
||||
d.decoders <- v.d
|
||||
}
|
||||
if v.err == errEndOfStream {
|
||||
println("current flushed")
|
||||
}
|
||||
d.current.output = nil
|
||||
d.current.flushed = true
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// WriteTo writes data to w until there's no more data to write or when an error occurs.
|
||||
@@ -287,7 +305,7 @@ func (d *Decoder) WriteTo(w io.Writer) (int64, error) {
|
||||
// DecodeAll can be used concurrently.
|
||||
// The Decoder concurrency limits will be respected.
|
||||
func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) {
|
||||
if d.current.err == ErrDecoderClosed {
|
||||
if d.decoders == nil {
|
||||
return dst, ErrDecoderClosed
|
||||
}
|
||||
|
||||
@@ -300,6 +318,9 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) {
|
||||
}
|
||||
frame.rawInput = nil
|
||||
frame.bBuf = nil
|
||||
if frame.history.decoders.br != nil {
|
||||
frame.history.decoders.br.in = nil
|
||||
}
|
||||
d.decoders <- block
|
||||
}()
|
||||
frame.bBuf = input
|
||||
@@ -307,27 +328,31 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) {
|
||||
for {
|
||||
frame.history.reset()
|
||||
err := frame.reset(&frame.bBuf)
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
if debugDecoder {
|
||||
println("frame reset return EOF")
|
||||
}
|
||||
return dst, nil
|
||||
}
|
||||
return dst, err
|
||||
}
|
||||
if frame.DictionaryID != nil {
|
||||
dict, ok := d.dicts[*frame.DictionaryID]
|
||||
if !ok {
|
||||
return nil, ErrUnknownDictionary
|
||||
}
|
||||
if debugDecoder {
|
||||
println("setting dict", frame.DictionaryID)
|
||||
}
|
||||
frame.history.setDict(&dict)
|
||||
}
|
||||
if err != nil {
|
||||
return dst, err
|
||||
}
|
||||
|
||||
if frame.FrameContentSize > d.o.maxDecodedSize-uint64(len(dst)) {
|
||||
return dst, ErrDecoderSizeExceeded
|
||||
}
|
||||
if frame.FrameContentSize > 0 && frame.FrameContentSize < 1<<30 {
|
||||
// Never preallocate moe than 1 GB up front.
|
||||
// Never preallocate more than 1 GB up front.
|
||||
if cap(dst)-len(dst) < int(frame.FrameContentSize) {
|
||||
dst2 := make([]byte, len(dst), len(dst)+int(frame.FrameContentSize))
|
||||
copy(dst2, dst)
|
||||
@@ -368,6 +393,161 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) {
|
||||
// If non-blocking mode is used the returned boolean will be false
|
||||
// if no data was available without blocking.
|
||||
func (d *Decoder) nextBlock(blocking bool) (ok bool) {
|
||||
if d.current.err != nil {
|
||||
// Keep error state.
|
||||
return false
|
||||
}
|
||||
d.current.b = d.current.b[:0]
|
||||
|
||||
// SYNC:
|
||||
if d.syncStream.enabled {
|
||||
if !blocking {
|
||||
return false
|
||||
}
|
||||
ok = d.nextBlockSync()
|
||||
if !ok {
|
||||
d.stashDecoder()
|
||||
}
|
||||
return ok
|
||||
}
|
||||
|
||||
//ASYNC:
|
||||
d.stashDecoder()
|
||||
if blocking {
|
||||
d.current.decodeOutput, ok = <-d.current.output
|
||||
} else {
|
||||
select {
|
||||
case d.current.decodeOutput, ok = <-d.current.output:
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
if !ok {
|
||||
// This should not happen, so signal error state...
|
||||
d.current.err = io.ErrUnexpectedEOF
|
||||
return false
|
||||
}
|
||||
next := d.current.decodeOutput
|
||||
if next.d != nil && next.d.async.newHist != nil {
|
||||
d.current.crc.Reset()
|
||||
}
|
||||
if debugDecoder {
|
||||
var tmp [4]byte
|
||||
binary.LittleEndian.PutUint32(tmp[:], uint32(xxhash.Sum64(next.b)))
|
||||
println("got", len(d.current.b), "bytes, error:", d.current.err, "data crc:", tmp)
|
||||
}
|
||||
|
||||
if len(next.b) > 0 {
|
||||
n, err := d.current.crc.Write(next.b)
|
||||
if err == nil {
|
||||
if n != len(next.b) {
|
||||
d.current.err = io.ErrShortWrite
|
||||
}
|
||||
}
|
||||
}
|
||||
if next.err == nil && next.d != nil && len(next.d.checkCRC) != 0 {
|
||||
got := d.current.crc.Sum64()
|
||||
var tmp [4]byte
|
||||
binary.LittleEndian.PutUint32(tmp[:], uint32(got))
|
||||
if !bytes.Equal(tmp[:], next.d.checkCRC) && !ignoreCRC {
|
||||
if debugDecoder {
|
||||
println("CRC Check Failed:", tmp[:], " (got) !=", next.d.checkCRC, "(on stream)")
|
||||
}
|
||||
d.current.err = ErrCRCMismatch
|
||||
} else {
|
||||
if debugDecoder {
|
||||
println("CRC ok", tmp[:])
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
func (d *Decoder) nextBlockSync() (ok bool) {
|
||||
if d.current.d == nil {
|
||||
d.current.d = <-d.decoders
|
||||
}
|
||||
for len(d.current.b) == 0 {
|
||||
if !d.syncStream.inFrame {
|
||||
d.frame.history.reset()
|
||||
d.current.err = d.frame.reset(&d.syncStream.br)
|
||||
if d.current.err != nil {
|
||||
return false
|
||||
}
|
||||
if d.frame.DictionaryID != nil {
|
||||
dict, ok := d.dicts[*d.frame.DictionaryID]
|
||||
if !ok {
|
||||
d.current.err = ErrUnknownDictionary
|
||||
return false
|
||||
} else {
|
||||
d.frame.history.setDict(&dict)
|
||||
}
|
||||
}
|
||||
if d.frame.WindowSize > d.o.maxDecodedSize || d.frame.WindowSize > d.o.maxWindowSize {
|
||||
d.current.err = ErrDecoderSizeExceeded
|
||||
return false
|
||||
}
|
||||
|
||||
d.syncStream.decodedFrame = 0
|
||||
d.syncStream.inFrame = true
|
||||
}
|
||||
d.current.err = d.frame.next(d.current.d)
|
||||
if d.current.err != nil {
|
||||
return false
|
||||
}
|
||||
d.frame.history.ensureBlock()
|
||||
if debugDecoder {
|
||||
println("History trimmed:", len(d.frame.history.b), "decoded already:", d.syncStream.decodedFrame)
|
||||
}
|
||||
histBefore := len(d.frame.history.b)
|
||||
d.current.err = d.current.d.decodeBuf(&d.frame.history)
|
||||
|
||||
if d.current.err != nil {
|
||||
println("error after:", d.current.err)
|
||||
return false
|
||||
}
|
||||
d.current.b = d.frame.history.b[histBefore:]
|
||||
if debugDecoder {
|
||||
println("history after:", len(d.frame.history.b))
|
||||
}
|
||||
|
||||
// Check frame size (before CRC)
|
||||
d.syncStream.decodedFrame += uint64(len(d.current.b))
|
||||
if d.frame.FrameContentSize > 0 && d.syncStream.decodedFrame > d.frame.FrameContentSize {
|
||||
if debugDecoder {
|
||||
printf("DecodedFrame (%d) > FrameContentSize (%d)\n", d.syncStream.decodedFrame, d.frame.FrameContentSize)
|
||||
}
|
||||
d.current.err = ErrFrameSizeExceeded
|
||||
return false
|
||||
}
|
||||
|
||||
// Check FCS
|
||||
if d.current.d.Last && d.frame.FrameContentSize > 0 && d.syncStream.decodedFrame != d.frame.FrameContentSize {
|
||||
if debugDecoder {
|
||||
printf("DecodedFrame (%d) != FrameContentSize (%d)\n", d.syncStream.decodedFrame, d.frame.FrameContentSize)
|
||||
}
|
||||
d.current.err = ErrFrameSizeMismatch
|
||||
return false
|
||||
}
|
||||
|
||||
// Update/Check CRC
|
||||
if d.frame.HasCheckSum {
|
||||
d.frame.crc.Write(d.current.b)
|
||||
if d.current.d.Last {
|
||||
d.current.err = d.frame.checkCRC()
|
||||
if d.current.err != nil {
|
||||
println("CRC error:", d.current.err)
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
d.syncStream.inFrame = !d.current.d.Last
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (d *Decoder) stashDecoder() {
|
||||
if d.current.d != nil {
|
||||
if debugDecoder {
|
||||
printf("re-adding current decoder %p", d.current.d)
|
||||
@@ -375,24 +555,6 @@ func (d *Decoder) nextBlock(blocking bool) (ok bool) {
|
||||
d.decoders <- d.current.d
|
||||
d.current.d = nil
|
||||
}
|
||||
if d.current.err != nil {
|
||||
// Keep error state.
|
||||
return blocking
|
||||
}
|
||||
|
||||
if blocking {
|
||||
d.current.decodeOutput = <-d.current.output
|
||||
} else {
|
||||
select {
|
||||
case d.current.decodeOutput = <-d.current.output:
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
if debugDecoder {
|
||||
println("got", len(d.current.b), "bytes, error:", d.current.err)
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Close will release all resources.
|
||||
@@ -402,10 +564,10 @@ func (d *Decoder) Close() {
|
||||
return
|
||||
}
|
||||
d.drainOutput()
|
||||
if d.stream != nil {
|
||||
close(d.stream)
|
||||
if d.current.cancel != nil {
|
||||
d.current.cancel()
|
||||
d.streamWg.Wait()
|
||||
d.stream = nil
|
||||
d.current.cancel = nil
|
||||
}
|
||||
if d.decoders != nil {
|
||||
close(d.decoders)
|
||||
@@ -456,39 +618,229 @@ type decodeOutput struct {
|
||||
err error
|
||||
}
|
||||
|
||||
type decodeStream struct {
|
||||
r io.Reader
|
||||
|
||||
// Blocks ready to be written to output.
|
||||
output chan decodeOutput
|
||||
|
||||
// cancel reading from the input
|
||||
cancel chan struct{}
|
||||
func (d *Decoder) startSyncDecoder(r io.Reader) error {
|
||||
d.frame.history.reset()
|
||||
d.syncStream.br = readerWrapper{r: r}
|
||||
d.syncStream.inFrame = false
|
||||
d.syncStream.enabled = true
|
||||
d.syncStream.decodedFrame = 0
|
||||
return nil
|
||||
}
|
||||
|
||||
// errEndOfStream indicates that everything from the stream was read.
|
||||
var errEndOfStream = errors.New("end-of-stream")
|
||||
|
||||
// Create Decoder:
|
||||
// Spawn n block decoders. These accept tasks to decode a block.
|
||||
// Create goroutine that handles stream processing, this will send history to decoders as they are available.
|
||||
// Decoders update the history as they decode.
|
||||
// When a block is returned:
|
||||
// a) history is sent to the next decoder,
|
||||
// b) content written to CRC.
|
||||
// c) return data to WRITER.
|
||||
// d) wait for next block to return data.
|
||||
// Once WRITTEN, the decoders reused by the writer frame decoder for re-use.
|
||||
func (d *Decoder) startStreamDecoder(inStream chan decodeStream) {
|
||||
// ASYNC:
|
||||
// Spawn 4 go routines.
|
||||
// 0: Read frames and decode blocks.
|
||||
// 1: Decode block and literals. Receives hufftree and seqdecs, returns seqdecs and huff tree.
|
||||
// 2: Wait for recentOffsets if needed. Decode sequences, send recentOffsets.
|
||||
// 3: Wait for stream history, execute sequences, send stream history.
|
||||
func (d *Decoder) startStreamDecoder(ctx context.Context, r io.Reader, output chan decodeOutput) {
|
||||
defer d.streamWg.Done()
|
||||
frame := newFrameDec(d.o)
|
||||
for stream := range inStream {
|
||||
if debugDecoder {
|
||||
println("got new stream")
|
||||
br := readerWrapper{r: r}
|
||||
|
||||
var seqPrepare = make(chan *blockDec, d.o.concurrent)
|
||||
var seqDecode = make(chan *blockDec, d.o.concurrent)
|
||||
var seqExecute = make(chan *blockDec, d.o.concurrent)
|
||||
|
||||
// Async 1: Prepare blocks...
|
||||
go func() {
|
||||
var hist history
|
||||
var hasErr bool
|
||||
for block := range seqPrepare {
|
||||
if hasErr {
|
||||
if block != nil {
|
||||
seqDecode <- block
|
||||
}
|
||||
br := readerWrapper{r: stream.r}
|
||||
decodeStream:
|
||||
continue
|
||||
}
|
||||
if block.async.newHist != nil {
|
||||
if debugDecoder {
|
||||
println("Async 1: new history")
|
||||
}
|
||||
hist.reset()
|
||||
if block.async.newHist.dict != nil {
|
||||
hist.setDict(block.async.newHist.dict)
|
||||
}
|
||||
}
|
||||
if block.err != nil || block.Type != blockTypeCompressed {
|
||||
hasErr = block.err != nil
|
||||
seqDecode <- block
|
||||
continue
|
||||
}
|
||||
|
||||
remain, err := block.decodeLiterals(block.data, &hist)
|
||||
block.err = err
|
||||
hasErr = block.err != nil
|
||||
if err == nil {
|
||||
block.async.literals = hist.decoders.literals
|
||||
block.async.seqData = remain
|
||||
} else if debugDecoder {
|
||||
println("decodeLiterals error:", err)
|
||||
}
|
||||
seqDecode <- block
|
||||
}
|
||||
close(seqDecode)
|
||||
}()
|
||||
|
||||
// Async 2: Decode sequences...
|
||||
go func() {
|
||||
var hist history
|
||||
var hasErr bool
|
||||
|
||||
for block := range seqDecode {
|
||||
if hasErr {
|
||||
if block != nil {
|
||||
seqExecute <- block
|
||||
}
|
||||
continue
|
||||
}
|
||||
if block.async.newHist != nil {
|
||||
if debugDecoder {
|
||||
println("Async 2: new history, recent:", block.async.newHist.recentOffsets)
|
||||
}
|
||||
hist.decoders = block.async.newHist.decoders
|
||||
hist.recentOffsets = block.async.newHist.recentOffsets
|
||||
if block.async.newHist.dict != nil {
|
||||
hist.setDict(block.async.newHist.dict)
|
||||
}
|
||||
}
|
||||
if block.err != nil || block.Type != blockTypeCompressed {
|
||||
hasErr = block.err != nil
|
||||
seqExecute <- block
|
||||
continue
|
||||
}
|
||||
|
||||
hist.decoders.literals = block.async.literals
|
||||
block.err = block.prepareSequences(block.async.seqData, &hist)
|
||||
if debugDecoder && block.err != nil {
|
||||
println("prepareSequences returned:", block.err)
|
||||
}
|
||||
hasErr = block.err != nil
|
||||
if block.err == nil {
|
||||
block.err = block.decodeSequences(&hist)
|
||||
if debugDecoder && block.err != nil {
|
||||
println("decodeSequences returned:", block.err)
|
||||
}
|
||||
hasErr = block.err != nil
|
||||
// block.async.sequence = hist.decoders.seq[:hist.decoders.nSeqs]
|
||||
block.async.seqSize = hist.decoders.seqSize
|
||||
}
|
||||
seqExecute <- block
|
||||
}
|
||||
close(seqExecute)
|
||||
}()
|
||||
|
||||
var wg sync.WaitGroup
|
||||
wg.Add(1)
|
||||
|
||||
// Async 3: Execute sequences...
|
||||
frameHistCache := d.frame.history.b
|
||||
go func() {
|
||||
var hist history
|
||||
var decodedFrame uint64
|
||||
var fcs uint64
|
||||
var hasErr bool
|
||||
for block := range seqExecute {
|
||||
out := decodeOutput{err: block.err, d: block}
|
||||
if block.err != nil || hasErr {
|
||||
hasErr = true
|
||||
output <- out
|
||||
continue
|
||||
}
|
||||
if block.async.newHist != nil {
|
||||
if debugDecoder {
|
||||
println("Async 3: new history")
|
||||
}
|
||||
hist.windowSize = block.async.newHist.windowSize
|
||||
hist.allocFrameBuffer = block.async.newHist.allocFrameBuffer
|
||||
if block.async.newHist.dict != nil {
|
||||
hist.setDict(block.async.newHist.dict)
|
||||
}
|
||||
|
||||
if cap(hist.b) < hist.allocFrameBuffer {
|
||||
if cap(frameHistCache) >= hist.allocFrameBuffer {
|
||||
hist.b = frameHistCache
|
||||
} else {
|
||||
hist.b = make([]byte, 0, hist.allocFrameBuffer)
|
||||
println("Alloc history sized", hist.allocFrameBuffer)
|
||||
}
|
||||
}
|
||||
hist.b = hist.b[:0]
|
||||
fcs = block.async.fcs
|
||||
decodedFrame = 0
|
||||
}
|
||||
do := decodeOutput{err: block.err, d: block}
|
||||
switch block.Type {
|
||||
case blockTypeRLE:
|
||||
if debugDecoder {
|
||||
println("add rle block length:", block.RLESize)
|
||||
}
|
||||
|
||||
if cap(block.dst) < int(block.RLESize) {
|
||||
if block.lowMem {
|
||||
block.dst = make([]byte, block.RLESize)
|
||||
} else {
|
||||
block.dst = make([]byte, maxBlockSize)
|
||||
}
|
||||
}
|
||||
block.dst = block.dst[:block.RLESize]
|
||||
v := block.data[0]
|
||||
for i := range block.dst {
|
||||
block.dst[i] = v
|
||||
}
|
||||
hist.append(block.dst)
|
||||
do.b = block.dst
|
||||
case blockTypeRaw:
|
||||
if debugDecoder {
|
||||
println("add raw block length:", len(block.data))
|
||||
}
|
||||
hist.append(block.data)
|
||||
do.b = block.data
|
||||
case blockTypeCompressed:
|
||||
if debugDecoder {
|
||||
println("execute with history length:", len(hist.b), "window:", hist.windowSize)
|
||||
}
|
||||
hist.decoders.seqSize = block.async.seqSize
|
||||
hist.decoders.literals = block.async.literals
|
||||
do.err = block.executeSequences(&hist)
|
||||
hasErr = do.err != nil
|
||||
if debugDecoder && hasErr {
|
||||
println("executeSequences returned:", do.err)
|
||||
}
|
||||
do.b = block.dst
|
||||
}
|
||||
if !hasErr {
|
||||
decodedFrame += uint64(len(do.b))
|
||||
if fcs > 0 && decodedFrame > fcs {
|
||||
println("fcs exceeded", block.Last, fcs, decodedFrame)
|
||||
do.err = ErrFrameSizeExceeded
|
||||
hasErr = true
|
||||
} else if block.Last && fcs > 0 && decodedFrame != fcs {
|
||||
do.err = ErrFrameSizeMismatch
|
||||
hasErr = true
|
||||
} else {
|
||||
if debugDecoder {
|
||||
println("fcs ok", block.Last, fcs, decodedFrame)
|
||||
}
|
||||
}
|
||||
}
|
||||
output <- do
|
||||
}
|
||||
close(output)
|
||||
frameHistCache = hist.b
|
||||
wg.Done()
|
||||
if debugDecoder {
|
||||
println("decoder goroutines finished")
|
||||
}
|
||||
}()
|
||||
|
||||
decodeStream:
|
||||
for {
|
||||
frame := d.frame
|
||||
if debugDecoder {
|
||||
println("New frame...")
|
||||
}
|
||||
var historySent bool
|
||||
frame.history.reset()
|
||||
err := frame.reset(&br)
|
||||
if debugDecoder && err != nil {
|
||||
@@ -502,54 +854,70 @@ func (d *Decoder) startStreamDecoder(inStream chan decodeStream) {
|
||||
frame.history.setDict(&dict)
|
||||
}
|
||||
}
|
||||
if err == nil && d.frame.WindowSize > d.o.maxWindowSize {
|
||||
err = ErrDecoderSizeExceeded
|
||||
}
|
||||
if err != nil {
|
||||
stream.output <- decodeOutput{
|
||||
err: err,
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
case dec := <-d.decoders:
|
||||
dec.sendErr(err)
|
||||
seqPrepare <- dec
|
||||
}
|
||||
break
|
||||
}
|
||||
if debugDecoder {
|
||||
println("starting frame decoder")
|
||||
break decodeStream
|
||||
}
|
||||
|
||||
// This goroutine will forward history between frames.
|
||||
frame.frameDone.Add(1)
|
||||
frame.initAsync()
|
||||
|
||||
go frame.startDecoder(stream.output)
|
||||
decodeFrame:
|
||||
// Go through all blocks of the frame.
|
||||
for {
|
||||
dec := <-d.decoders
|
||||
var dec *blockDec
|
||||
select {
|
||||
case <-stream.cancel:
|
||||
if !frame.sendErr(dec, io.EOF) {
|
||||
// To not let the decoder dangle, send it back.
|
||||
stream.output <- decodeOutput{d: dec}
|
||||
}
|
||||
case <-ctx.Done():
|
||||
break decodeStream
|
||||
default:
|
||||
case dec = <-d.decoders:
|
||||
// Once we have a decoder, we MUST return it.
|
||||
}
|
||||
err := frame.next(dec)
|
||||
switch err {
|
||||
case io.EOF:
|
||||
// End of current frame, no error
|
||||
println("EOF on next block")
|
||||
break decodeFrame
|
||||
case nil:
|
||||
continue
|
||||
default:
|
||||
println("block decoder returned", err)
|
||||
if !historySent {
|
||||
h := frame.history
|
||||
if debugDecoder {
|
||||
println("Alloc History:", h.allocFrameBuffer)
|
||||
}
|
||||
dec.async.newHist = &h
|
||||
dec.async.fcs = frame.FrameContentSize
|
||||
historySent = true
|
||||
} else {
|
||||
dec.async.newHist = nil
|
||||
}
|
||||
if debugDecoder && err != nil {
|
||||
println("next block returned error:", err)
|
||||
}
|
||||
dec.err = err
|
||||
dec.checkCRC = nil
|
||||
if dec.Last && frame.HasCheckSum && err == nil {
|
||||
crc, err := frame.rawInput.readSmall(4)
|
||||
if err != nil {
|
||||
println("CRC missing?", err)
|
||||
dec.err = err
|
||||
}
|
||||
var tmp [4]byte
|
||||
copy(tmp[:], crc)
|
||||
dec.checkCRC = tmp[:]
|
||||
if debugDecoder {
|
||||
println("found crc to check:", dec.checkCRC)
|
||||
}
|
||||
}
|
||||
err = dec.err
|
||||
last := dec.Last
|
||||
seqPrepare <- dec
|
||||
if err != nil {
|
||||
break decodeStream
|
||||
}
|
||||
if last {
|
||||
break
|
||||
}
|
||||
// All blocks have started decoding, check if there are more frames.
|
||||
println("waiting for done")
|
||||
frame.frameDone.Wait()
|
||||
println("done waiting...")
|
||||
}
|
||||
frame.frameDone.Wait()
|
||||
println("Sending EOS")
|
||||
stream.output <- decodeOutput{err: errEndOfStream}
|
||||
}
|
||||
close(seqPrepare)
|
||||
wg.Wait()
|
||||
d.frame.history.b = frameHistCache
|
||||
}
|
||||
|
22
vendor/github.com/klauspost/compress/zstd/decoder_options.go
generated
vendored
22
vendor/github.com/klauspost/compress/zstd/decoder_options.go
generated
vendored
@@ -28,6 +28,9 @@ func (o *decoderOptions) setDefault() {
|
||||
concurrent: runtime.GOMAXPROCS(0),
|
||||
maxWindowSize: MaxWindowSize,
|
||||
}
|
||||
if o.concurrent > 4 {
|
||||
o.concurrent = 4
|
||||
}
|
||||
o.maxDecodedSize = 1 << 63
|
||||
}
|
||||
|
||||
@@ -37,16 +40,25 @@ func WithDecoderLowmem(b bool) DOption {
|
||||
return func(o *decoderOptions) error { o.lowMem = b; return nil }
|
||||
}
|
||||
|
||||
// WithDecoderConcurrency will set the concurrency,
|
||||
// meaning the maximum number of decoders to run concurrently.
|
||||
// The value supplied must be at least 1.
|
||||
// By default this will be set to GOMAXPROCS.
|
||||
// WithDecoderConcurrency sets the number of created decoders.
|
||||
// When decoding block with DecodeAll, this will limit the number
|
||||
// of possible concurrently running decodes.
|
||||
// When decoding streams, this will limit the number of
|
||||
// inflight blocks.
|
||||
// When decoding streams and setting maximum to 1,
|
||||
// no async decoding will be done.
|
||||
// When a value of 0 is provided GOMAXPROCS will be used.
|
||||
// By default this will be set to 4 or GOMAXPROCS, whatever is lower.
|
||||
func WithDecoderConcurrency(n int) DOption {
|
||||
return func(o *decoderOptions) error {
|
||||
if n <= 0 {
|
||||
if n < 0 {
|
||||
return errors.New("concurrency must be at least 1")
|
||||
}
|
||||
if n == 0 {
|
||||
o.concurrent = runtime.GOMAXPROCS(0)
|
||||
} else {
|
||||
o.concurrent = n
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
52
vendor/github.com/klauspost/compress/zstd/encoder.go
generated
vendored
52
vendor/github.com/klauspost/compress/zstd/encoder.go
generated
vendored
@@ -98,23 +98,25 @@ func (e *Encoder) Reset(w io.Writer) {
|
||||
if cap(s.filling) == 0 {
|
||||
s.filling = make([]byte, 0, e.o.blockSize)
|
||||
}
|
||||
if e.o.concurrent > 1 {
|
||||
if cap(s.current) == 0 {
|
||||
s.current = make([]byte, 0, e.o.blockSize)
|
||||
}
|
||||
if cap(s.previous) == 0 {
|
||||
s.previous = make([]byte, 0, e.o.blockSize)
|
||||
}
|
||||
if s.encoder == nil {
|
||||
s.encoder = e.o.encoder()
|
||||
}
|
||||
s.current = s.current[:0]
|
||||
s.previous = s.previous[:0]
|
||||
if s.writing == nil {
|
||||
s.writing = &blockEnc{lowMem: e.o.lowMem}
|
||||
s.writing.init()
|
||||
}
|
||||
s.writing.initNewEncode()
|
||||
}
|
||||
if s.encoder == nil {
|
||||
s.encoder = e.o.encoder()
|
||||
}
|
||||
s.filling = s.filling[:0]
|
||||
s.current = s.current[:0]
|
||||
s.previous = s.previous[:0]
|
||||
s.encoder.Reset(e.o.dict, false)
|
||||
s.headerWritten = false
|
||||
s.eofWritten = false
|
||||
@@ -258,6 +260,46 @@ func (e *Encoder) nextBlock(final bool) error {
|
||||
return s.err
|
||||
}
|
||||
|
||||
// SYNC:
|
||||
if e.o.concurrent == 1 {
|
||||
src := s.filling
|
||||
s.nInput += int64(len(s.filling))
|
||||
if debugEncoder {
|
||||
println("Adding sync block,", len(src), "bytes, final:", final)
|
||||
}
|
||||
enc := s.encoder
|
||||
blk := enc.Block()
|
||||
blk.reset(nil)
|
||||
enc.Encode(blk, src)
|
||||
blk.last = final
|
||||
if final {
|
||||
s.eofWritten = true
|
||||
}
|
||||
|
||||
err := errIncompressible
|
||||
// If we got the exact same number of literals as input,
|
||||
// assume the literals cannot be compressed.
|
||||
if len(src) != len(blk.literals) || len(src) != e.o.blockSize {
|
||||
err = blk.encode(src, e.o.noEntropy, !e.o.allLitEntropy)
|
||||
}
|
||||
switch err {
|
||||
case errIncompressible:
|
||||
if debugEncoder {
|
||||
println("Storing incompressible block as raw")
|
||||
}
|
||||
blk.encodeRaw(src)
|
||||
// In fast mode, we do not transfer offsets, so we don't have to deal with changing the.
|
||||
case nil:
|
||||
default:
|
||||
s.err = err
|
||||
return err
|
||||
}
|
||||
_, s.err = s.w.Write(blk.output)
|
||||
s.nWritten += int64(len(blk.output))
|
||||
s.filling = s.filling[:0]
|
||||
return s.err
|
||||
}
|
||||
|
||||
// Move blocks forward.
|
||||
s.filling, s.current, s.previous = s.previous[:0], s.filling, s.current
|
||||
s.nInput += int64(len(s.current))
|
||||
|
1
vendor/github.com/klauspost/compress/zstd/encoder_options.go
generated
vendored
1
vendor/github.com/klauspost/compress/zstd/encoder_options.go
generated
vendored
@@ -76,6 +76,7 @@ func WithEncoderCRC(b bool) EOption {
|
||||
// WithEncoderConcurrency will set the concurrency,
|
||||
// meaning the maximum number of encoders to run concurrently.
|
||||
// The value supplied must be at least 1.
|
||||
// For streams, setting a value of 1 will disable async compression.
|
||||
// By default this will be set to GOMAXPROCS.
|
||||
func WithEncoderConcurrency(n int) EOption {
|
||||
return func(o *encoderOptions) error {
|
||||
|
200
vendor/github.com/klauspost/compress/zstd/framedec.go
generated
vendored
200
vendor/github.com/klauspost/compress/zstd/framedec.go
generated
vendored
@@ -8,23 +8,17 @@ import (
|
||||
"bytes"
|
||||
"encoding/hex"
|
||||
"errors"
|
||||
"hash"
|
||||
"io"
|
||||
"sync"
|
||||
|
||||
"github.com/klauspost/compress/zstd/internal/xxhash"
|
||||
)
|
||||
|
||||
type frameDec struct {
|
||||
o decoderOptions
|
||||
crc hash.Hash64
|
||||
offset int64
|
||||
crc *xxhash.Digest
|
||||
|
||||
WindowSize uint64
|
||||
|
||||
// In order queue of blocks being decoded.
|
||||
decoding chan *blockDec
|
||||
|
||||
// Frame history passed between blocks
|
||||
history history
|
||||
|
||||
@@ -34,15 +28,10 @@ type frameDec struct {
|
||||
bBuf byteBuf
|
||||
|
||||
FrameContentSize uint64
|
||||
frameDone sync.WaitGroup
|
||||
|
||||
DictionaryID *uint32
|
||||
HasCheckSum bool
|
||||
SingleSegment bool
|
||||
|
||||
// asyncRunning indicates whether the async routine processes input on 'decoding'.
|
||||
asyncRunningMu sync.Mutex
|
||||
asyncRunning bool
|
||||
}
|
||||
|
||||
const (
|
||||
@@ -229,9 +218,10 @@ func (d *frameDec) reset(br byteBuffer) error {
|
||||
d.FrameContentSize = uint64(d1) | (uint64(d2) << 32)
|
||||
}
|
||||
if debugDecoder {
|
||||
println("field size bits:", v, "fcsSize:", fcsSize, "FrameContentSize:", d.FrameContentSize, hex.EncodeToString(b[:fcsSize]), "singleseg:", d.SingleSegment, "window:", d.WindowSize)
|
||||
println("Read FCS:", d.FrameContentSize)
|
||||
}
|
||||
}
|
||||
|
||||
// Move this to shared.
|
||||
d.HasCheckSum = fhd&(1<<2) != 0
|
||||
if d.HasCheckSum {
|
||||
@@ -264,10 +254,16 @@ func (d *frameDec) reset(br byteBuffer) error {
|
||||
}
|
||||
d.history.windowSize = int(d.WindowSize)
|
||||
if d.o.lowMem && d.history.windowSize < maxBlockSize {
|
||||
d.history.maxSize = d.history.windowSize * 2
|
||||
d.history.allocFrameBuffer = d.history.windowSize * 2
|
||||
// TODO: Maybe use FrameContent size
|
||||
} else {
|
||||
d.history.maxSize = d.history.windowSize + maxBlockSize
|
||||
d.history.allocFrameBuffer = d.history.windowSize + maxBlockSize
|
||||
}
|
||||
|
||||
if debugDecoder {
|
||||
println("Frame: Dict:", d.DictionaryID, "FrameContentSize:", d.FrameContentSize, "singleseg:", d.SingleSegment, "window:", d.WindowSize, "crc:", d.HasCheckSum)
|
||||
}
|
||||
|
||||
// history contains input - maybe we do something
|
||||
d.rawInput = br
|
||||
return nil
|
||||
@@ -276,47 +272,16 @@ func (d *frameDec) reset(br byteBuffer) error {
|
||||
// next will start decoding the next block from stream.
|
||||
func (d *frameDec) next(block *blockDec) error {
|
||||
if debugDecoder {
|
||||
printf("decoding new block %p:%p", block, block.data)
|
||||
println("decoding new block")
|
||||
}
|
||||
err := block.reset(d.rawInput, d.WindowSize)
|
||||
if err != nil {
|
||||
println("block error:", err)
|
||||
// Signal the frame decoder we have a problem.
|
||||
d.sendErr(block, err)
|
||||
block.sendErr(err)
|
||||
return err
|
||||
}
|
||||
block.input <- struct{}{}
|
||||
if debugDecoder {
|
||||
println("next block:", block)
|
||||
}
|
||||
d.asyncRunningMu.Lock()
|
||||
defer d.asyncRunningMu.Unlock()
|
||||
if !d.asyncRunning {
|
||||
return nil
|
||||
}
|
||||
if block.Last {
|
||||
// We indicate the frame is done by sending io.EOF
|
||||
d.decoding <- block
|
||||
return io.EOF
|
||||
}
|
||||
d.decoding <- block
|
||||
return nil
|
||||
}
|
||||
|
||||
// sendEOF will queue an error block on the frame.
|
||||
// This will cause the frame decoder to return when it encounters the block.
|
||||
// Returns true if the decoder was added.
|
||||
func (d *frameDec) sendErr(block *blockDec, err error) bool {
|
||||
d.asyncRunningMu.Lock()
|
||||
defer d.asyncRunningMu.Unlock()
|
||||
if !d.asyncRunning {
|
||||
return false
|
||||
}
|
||||
|
||||
println("sending error", err.Error())
|
||||
block.sendErr(err)
|
||||
d.decoding <- block
|
||||
return true
|
||||
}
|
||||
|
||||
// checkCRC will check the checksum if the frame has one.
|
||||
@@ -340,7 +305,7 @@ func (d *frameDec) checkCRC() error {
|
||||
return err
|
||||
}
|
||||
|
||||
if !bytes.Equal(tmp[:], want) {
|
||||
if !bytes.Equal(tmp[:], want) && !ignoreCRC {
|
||||
if debugDecoder {
|
||||
println("CRC Check Failed:", tmp[:], "!=", want)
|
||||
}
|
||||
@@ -352,131 +317,13 @@ func (d *frameDec) checkCRC() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (d *frameDec) initAsync() {
|
||||
if !d.o.lowMem && !d.SingleSegment {
|
||||
// set max extra size history to 2MB.
|
||||
d.history.maxSize = d.history.windowSize + maxBlockSize
|
||||
}
|
||||
// re-alloc if more than one extra block size.
|
||||
if d.o.lowMem && cap(d.history.b) > d.history.maxSize+maxBlockSize {
|
||||
d.history.b = make([]byte, 0, d.history.maxSize)
|
||||
}
|
||||
if cap(d.history.b) < d.history.maxSize {
|
||||
d.history.b = make([]byte, 0, d.history.maxSize)
|
||||
}
|
||||
if cap(d.decoding) < d.o.concurrent {
|
||||
d.decoding = make(chan *blockDec, d.o.concurrent)
|
||||
}
|
||||
if debugDecoder {
|
||||
h := d.history
|
||||
printf("history init. len: %d, cap: %d", len(h.b), cap(h.b))
|
||||
}
|
||||
d.asyncRunningMu.Lock()
|
||||
d.asyncRunning = true
|
||||
d.asyncRunningMu.Unlock()
|
||||
}
|
||||
|
||||
// startDecoder will start decoding blocks and write them to the writer.
|
||||
// The decoder will stop as soon as an error occurs or at end of frame.
|
||||
// When the frame has finished decoding the *bufio.Reader
|
||||
// containing the remaining input will be sent on frameDec.frameDone.
|
||||
func (d *frameDec) startDecoder(output chan decodeOutput) {
|
||||
written := int64(0)
|
||||
|
||||
defer func() {
|
||||
d.asyncRunningMu.Lock()
|
||||
d.asyncRunning = false
|
||||
d.asyncRunningMu.Unlock()
|
||||
|
||||
// Drain the currently decoding.
|
||||
d.history.error = true
|
||||
flushdone:
|
||||
for {
|
||||
select {
|
||||
case b := <-d.decoding:
|
||||
b.history <- &d.history
|
||||
output <- <-b.result
|
||||
default:
|
||||
break flushdone
|
||||
}
|
||||
}
|
||||
println("frame decoder done, signalling done")
|
||||
d.frameDone.Done()
|
||||
}()
|
||||
// Get decoder for first block.
|
||||
block := <-d.decoding
|
||||
block.history <- &d.history
|
||||
for {
|
||||
var next *blockDec
|
||||
// Get result
|
||||
r := <-block.result
|
||||
if r.err != nil {
|
||||
println("Result contained error", r.err)
|
||||
output <- r
|
||||
return
|
||||
}
|
||||
if debugDecoder {
|
||||
println("got result, from ", d.offset, "to", d.offset+int64(len(r.b)))
|
||||
d.offset += int64(len(r.b))
|
||||
}
|
||||
if !block.Last {
|
||||
// Send history to next block
|
||||
select {
|
||||
case next = <-d.decoding:
|
||||
if debugDecoder {
|
||||
println("Sending ", len(d.history.b), "bytes as history")
|
||||
}
|
||||
next.history <- &d.history
|
||||
default:
|
||||
// Wait until we have sent the block, so
|
||||
// other decoders can potentially get the decoder.
|
||||
next = nil
|
||||
}
|
||||
}
|
||||
|
||||
// Add checksum, async to decoding.
|
||||
if d.HasCheckSum {
|
||||
n, err := d.crc.Write(r.b)
|
||||
if err != nil {
|
||||
r.err = err
|
||||
if n != len(r.b) {
|
||||
r.err = io.ErrShortWrite
|
||||
}
|
||||
output <- r
|
||||
return
|
||||
}
|
||||
}
|
||||
written += int64(len(r.b))
|
||||
if d.SingleSegment && uint64(written) > d.FrameContentSize {
|
||||
println("runDecoder: single segment and", uint64(written), ">", d.FrameContentSize)
|
||||
r.err = ErrFrameSizeExceeded
|
||||
output <- r
|
||||
return
|
||||
}
|
||||
if block.Last {
|
||||
r.err = d.checkCRC()
|
||||
output <- r
|
||||
return
|
||||
}
|
||||
output <- r
|
||||
if next == nil {
|
||||
// There was no decoder available, we wait for one now that we have sent to the writer.
|
||||
if debugDecoder {
|
||||
println("Sending ", len(d.history.b), " bytes as history")
|
||||
}
|
||||
next = <-d.decoding
|
||||
next.history <- &d.history
|
||||
}
|
||||
block = next
|
||||
}
|
||||
}
|
||||
|
||||
// runDecoder will create a sync decoder that will decode a block of data.
|
||||
func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) {
|
||||
saved := d.history.b
|
||||
|
||||
// We use the history for output to avoid copying it.
|
||||
d.history.b = dst
|
||||
d.history.ignoreBuffer = len(dst)
|
||||
// Store input length, so we only check new data.
|
||||
crcStart := len(dst)
|
||||
var err error
|
||||
@@ -489,7 +336,7 @@ func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) {
|
||||
println("next block:", dec)
|
||||
}
|
||||
err = dec.decodeBuf(&d.history)
|
||||
if err != nil || dec.Last {
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
if uint64(len(d.history.b)) > d.o.maxDecodedSize {
|
||||
@@ -501,10 +348,23 @@ func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) {
|
||||
err = ErrFrameSizeExceeded
|
||||
break
|
||||
}
|
||||
if d.FrameContentSize > 0 && uint64(len(d.history.b)-crcStart) > d.FrameContentSize {
|
||||
println("runDecoder: FrameContentSize exceeded", uint64(len(d.history.b)-crcStart), ">", d.FrameContentSize)
|
||||
err = ErrFrameSizeExceeded
|
||||
break
|
||||
}
|
||||
if dec.Last {
|
||||
break
|
||||
}
|
||||
if debugDecoder && d.FrameContentSize > 0 {
|
||||
println("runDecoder: FrameContentSize", uint64(len(d.history.b)-crcStart), "<=", d.FrameContentSize)
|
||||
}
|
||||
}
|
||||
dst = d.history.b
|
||||
if err == nil {
|
||||
if d.HasCheckSum {
|
||||
if d.FrameContentSize > 0 && uint64(len(d.history.b)-crcStart) != d.FrameContentSize {
|
||||
err = ErrFrameSizeMismatch
|
||||
} else if d.HasCheckSum {
|
||||
var n int
|
||||
n, err = d.crc.Write(dst[crcStart:])
|
||||
if err == nil {
|
||||
|
11
vendor/github.com/klauspost/compress/zstd/fuzz.go
generated
vendored
Normal file
11
vendor/github.com/klauspost/compress/zstd/fuzz.go
generated
vendored
Normal file
@@ -0,0 +1,11 @@
|
||||
//go:build gofuzz
|
||||
// +build gofuzz
|
||||
|
||||
// Copyright 2019+ Klaus Post. All rights reserved.
|
||||
// License information can be found in the LICENSE file.
|
||||
// Based on work by Yann Collet, released under BSD License.
|
||||
|
||||
package zstd
|
||||
|
||||
// ignoreCRC can be used for fuzz testing to ignore CRC values...
|
||||
const ignoreCRC = true
|
11
vendor/github.com/klauspost/compress/zstd/fuzz_none.go
generated
vendored
Normal file
11
vendor/github.com/klauspost/compress/zstd/fuzz_none.go
generated
vendored
Normal file
@@ -0,0 +1,11 @@
|
||||
//go:build !gofuzz
|
||||
// +build !gofuzz
|
||||
|
||||
// Copyright 2019+ Klaus Post. All rights reserved.
|
||||
// License information can be found in the LICENSE file.
|
||||
// Based on work by Yann Collet, released under BSD License.
|
||||
|
||||
package zstd
|
||||
|
||||
// ignoreCRC can be used for fuzz testing to ignore CRC values...
|
||||
const ignoreCRC = false
|
38
vendor/github.com/klauspost/compress/zstd/history.go
generated
vendored
38
vendor/github.com/klauspost/compress/zstd/history.go
generated
vendored
@@ -10,12 +10,22 @@ import (
|
||||
|
||||
// history contains the information transferred between blocks.
|
||||
type history struct {
|
||||
b []byte
|
||||
// Literal decompression
|
||||
huffTree *huff0.Scratch
|
||||
recentOffsets [3]int
|
||||
|
||||
// Sequence decompression
|
||||
decoders sequenceDecs
|
||||
recentOffsets [3]int
|
||||
|
||||
// History buffer...
|
||||
b []byte
|
||||
|
||||
// ignoreBuffer is meant to ignore a number of bytes
|
||||
// when checking for matches in history
|
||||
ignoreBuffer int
|
||||
|
||||
windowSize int
|
||||
maxSize int
|
||||
allocFrameBuffer int // needed?
|
||||
error bool
|
||||
dict *dict
|
||||
}
|
||||
@@ -24,6 +34,7 @@ type history struct {
|
||||
// The history must already have been initialized to the desired size.
|
||||
func (h *history) reset() {
|
||||
h.b = h.b[:0]
|
||||
h.ignoreBuffer = 0
|
||||
h.error = false
|
||||
h.recentOffsets = [3]int{1, 4, 8}
|
||||
if f := h.decoders.litLengths.fse; f != nil && !f.preDefined {
|
||||
@@ -35,7 +46,7 @@ func (h *history) reset() {
|
||||
if f := h.decoders.matchLengths.fse; f != nil && !f.preDefined {
|
||||
fseDecoderPool.Put(f)
|
||||
}
|
||||
h.decoders = sequenceDecs{}
|
||||
h.decoders = sequenceDecs{br: h.decoders.br}
|
||||
if h.huffTree != nil {
|
||||
if h.dict == nil || h.dict.litEnc != h.huffTree {
|
||||
huffDecoderPool.Put(h.huffTree)
|
||||
@@ -54,6 +65,7 @@ func (h *history) setDict(dict *dict) {
|
||||
h.decoders.litLengths = dict.llDec
|
||||
h.decoders.offsets = dict.ofDec
|
||||
h.decoders.matchLengths = dict.mlDec
|
||||
h.decoders.dict = dict.content
|
||||
h.recentOffsets = dict.offsets
|
||||
h.huffTree = dict.litEnc
|
||||
}
|
||||
@@ -83,6 +95,24 @@ func (h *history) append(b []byte) {
|
||||
copy(h.b[h.windowSize-len(b):], b)
|
||||
}
|
||||
|
||||
// ensureBlock will ensure there is space for at least one block...
|
||||
func (h *history) ensureBlock() {
|
||||
if cap(h.b) < h.allocFrameBuffer {
|
||||
h.b = make([]byte, 0, h.allocFrameBuffer)
|
||||
return
|
||||
}
|
||||
|
||||
avail := cap(h.b) - len(h.b)
|
||||
if avail >= h.windowSize || avail > maxCompressedBlockSize {
|
||||
return
|
||||
}
|
||||
// Move data down so we only have window size left.
|
||||
// We know we have less than window size in b at this point.
|
||||
discard := len(h.b) - h.windowSize
|
||||
copy(h.b, h.b[discard:])
|
||||
h.b = h.b[:h.windowSize]
|
||||
}
|
||||
|
||||
// append bytes to history without ever discarding anything.
|
||||
func (h *history) appendKeep(b []byte) {
|
||||
h.b = append(h.b, b...)
|
||||
|
335
vendor/github.com/klauspost/compress/zstd/seqdec.go
generated
vendored
335
vendor/github.com/klauspost/compress/zstd/seqdec.go
generated
vendored
@@ -20,6 +20,10 @@ type seq struct {
|
||||
llCode, mlCode, ofCode uint8
|
||||
}
|
||||
|
||||
type seqVals struct {
|
||||
ll, ml, mo int
|
||||
}
|
||||
|
||||
func (s seq) String() string {
|
||||
if s.offset <= 3 {
|
||||
if s.offset == 0 {
|
||||
@@ -61,16 +65,18 @@ type sequenceDecs struct {
|
||||
offsets sequenceDec
|
||||
matchLengths sequenceDec
|
||||
prevOffset [3]int
|
||||
hist []byte
|
||||
dict []byte
|
||||
literals []byte
|
||||
out []byte
|
||||
nSeqs int
|
||||
br *bitReader
|
||||
seqSize int
|
||||
windowSize int
|
||||
maxBits uint8
|
||||
}
|
||||
|
||||
// initialize all 3 decoders from the stream input.
|
||||
func (s *sequenceDecs) initialize(br *bitReader, hist *history, literals, out []byte) error {
|
||||
func (s *sequenceDecs) initialize(br *bitReader, hist *history, out []byte) error {
|
||||
if err := s.litLengths.init(br); err != nil {
|
||||
return errors.New("litLengths:" + err.Error())
|
||||
}
|
||||
@@ -80,8 +86,7 @@ func (s *sequenceDecs) initialize(br *bitReader, hist *history, literals, out []
|
||||
if err := s.matchLengths.init(br); err != nil {
|
||||
return errors.New("matchLengths:" + err.Error())
|
||||
}
|
||||
s.literals = literals
|
||||
s.hist = hist.b
|
||||
s.br = br
|
||||
s.prevOffset = hist.recentOffsets
|
||||
s.maxBits = s.litLengths.fse.maxBits + s.offsets.fse.maxBits + s.matchLengths.fse.maxBits
|
||||
s.windowSize = hist.windowSize
|
||||
@@ -94,11 +99,254 @@ func (s *sequenceDecs) initialize(br *bitReader, hist *history, literals, out []
|
||||
}
|
||||
|
||||
// decode sequences from the stream with the provided history.
|
||||
func (s *sequenceDecs) decode(seqs int, br *bitReader, hist []byte) error {
|
||||
func (s *sequenceDecs) decode(seqs []seqVals) error {
|
||||
br := s.br
|
||||
|
||||
// Grab full sizes tables, to avoid bounds checks.
|
||||
llTable, mlTable, ofTable := s.litLengths.fse.dt[:maxTablesize], s.matchLengths.fse.dt[:maxTablesize], s.offsets.fse.dt[:maxTablesize]
|
||||
llState, mlState, ofState := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state
|
||||
s.seqSize = 0
|
||||
litRemain := len(s.literals)
|
||||
|
||||
for i := range seqs {
|
||||
var ll, mo, ml int
|
||||
if br.off > 4+((maxOffsetBits+16+16)>>3) {
|
||||
// inlined function:
|
||||
// ll, mo, ml = s.nextFast(br, llState, mlState, ofState)
|
||||
|
||||
// Final will not read from stream.
|
||||
var llB, mlB, moB uint8
|
||||
ll, llB = llState.final()
|
||||
ml, mlB = mlState.final()
|
||||
mo, moB = ofState.final()
|
||||
|
||||
// extra bits are stored in reverse order.
|
||||
br.fillFast()
|
||||
mo += br.getBits(moB)
|
||||
if s.maxBits > 32 {
|
||||
br.fillFast()
|
||||
}
|
||||
ml += br.getBits(mlB)
|
||||
ll += br.getBits(llB)
|
||||
|
||||
if moB > 1 {
|
||||
s.prevOffset[2] = s.prevOffset[1]
|
||||
s.prevOffset[1] = s.prevOffset[0]
|
||||
s.prevOffset[0] = mo
|
||||
} else {
|
||||
// mo = s.adjustOffset(mo, ll, moB)
|
||||
// Inlined for rather big speedup
|
||||
if ll == 0 {
|
||||
// There is an exception though, when current sequence's literals_length = 0.
|
||||
// In this case, repeated offsets are shifted by one, so an offset_value of 1 means Repeated_Offset2,
|
||||
// an offset_value of 2 means Repeated_Offset3, and an offset_value of 3 means Repeated_Offset1 - 1_byte.
|
||||
mo++
|
||||
}
|
||||
|
||||
if mo == 0 {
|
||||
mo = s.prevOffset[0]
|
||||
} else {
|
||||
var temp int
|
||||
if mo == 3 {
|
||||
temp = s.prevOffset[0] - 1
|
||||
} else {
|
||||
temp = s.prevOffset[mo]
|
||||
}
|
||||
|
||||
if temp == 0 {
|
||||
// 0 is not valid; input is corrupted; force offset to 1
|
||||
println("WARNING: temp was 0")
|
||||
temp = 1
|
||||
}
|
||||
|
||||
if mo != 1 {
|
||||
s.prevOffset[2] = s.prevOffset[1]
|
||||
}
|
||||
s.prevOffset[1] = s.prevOffset[0]
|
||||
s.prevOffset[0] = temp
|
||||
mo = temp
|
||||
}
|
||||
}
|
||||
br.fillFast()
|
||||
} else {
|
||||
if br.overread() {
|
||||
if debugDecoder {
|
||||
printf("reading sequence %d, exceeded available data\n", i)
|
||||
}
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
ll, mo, ml = s.next(br, llState, mlState, ofState)
|
||||
br.fill()
|
||||
}
|
||||
|
||||
if debugSequences {
|
||||
println("Seq", i, "Litlen:", ll, "mo:", mo, "(abs) ml:", ml)
|
||||
}
|
||||
// Evaluate.
|
||||
// We might be doing this async, so do it early.
|
||||
if mo == 0 && ml > 0 {
|
||||
return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml)
|
||||
}
|
||||
if ml > maxMatchLen {
|
||||
return fmt.Errorf("match len (%d) bigger than max allowed length", ml)
|
||||
}
|
||||
s.seqSize += ll + ml
|
||||
if s.seqSize > maxBlockSize {
|
||||
return fmt.Errorf("output (%d) bigger than max block size", s.seqSize)
|
||||
}
|
||||
litRemain -= ll
|
||||
if litRemain < 0 {
|
||||
return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, litRemain+ll)
|
||||
}
|
||||
seqs[i] = seqVals{
|
||||
ll: ll,
|
||||
ml: ml,
|
||||
mo: mo,
|
||||
}
|
||||
if i == len(seqs)-1 {
|
||||
// This is the last sequence, so we shouldn't update state.
|
||||
break
|
||||
}
|
||||
|
||||
// Manually inlined, ~ 5-20% faster
|
||||
// Update all 3 states at once. Approx 20% faster.
|
||||
nBits := llState.nbBits() + mlState.nbBits() + ofState.nbBits()
|
||||
if nBits == 0 {
|
||||
llState = llTable[llState.newState()&maxTableMask]
|
||||
mlState = mlTable[mlState.newState()&maxTableMask]
|
||||
ofState = ofTable[ofState.newState()&maxTableMask]
|
||||
} else {
|
||||
bits := br.get32BitsFast(nBits)
|
||||
lowBits := uint16(bits >> ((ofState.nbBits() + mlState.nbBits()) & 31))
|
||||
llState = llTable[(llState.newState()+lowBits)&maxTableMask]
|
||||
|
||||
lowBits = uint16(bits >> (ofState.nbBits() & 31))
|
||||
lowBits &= bitMask[mlState.nbBits()&15]
|
||||
mlState = mlTable[(mlState.newState()+lowBits)&maxTableMask]
|
||||
|
||||
lowBits = uint16(bits) & bitMask[ofState.nbBits()&15]
|
||||
ofState = ofTable[(ofState.newState()+lowBits)&maxTableMask]
|
||||
}
|
||||
}
|
||||
s.seqSize += litRemain
|
||||
if s.seqSize > maxBlockSize {
|
||||
return fmt.Errorf("output (%d) bigger than max block size", s.seqSize)
|
||||
}
|
||||
err := br.close()
|
||||
if err != nil {
|
||||
printf("Closing sequences: %v, %+v\n", err, *br)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// execute will execute the decoded sequence with the provided history.
|
||||
// The sequence must be evaluated before being sent.
|
||||
func (s *sequenceDecs) execute(seqs []seqVals, hist []byte) error {
|
||||
// Ensure we have enough output size...
|
||||
if len(s.out)+s.seqSize > cap(s.out) {
|
||||
addBytes := s.seqSize + len(s.out)
|
||||
s.out = append(s.out, make([]byte, addBytes)...)
|
||||
s.out = s.out[:len(s.out)-addBytes]
|
||||
}
|
||||
|
||||
if debugDecoder {
|
||||
printf("Execute %d seqs with hist %d, dict %d, literals: %d into %d bytes\n", len(seqs), len(hist), len(s.dict), len(s.literals), s.seqSize)
|
||||
}
|
||||
|
||||
var t = len(s.out)
|
||||
out := s.out[:t+s.seqSize]
|
||||
|
||||
for _, seq := range seqs {
|
||||
// Add literals
|
||||
copy(out[t:], s.literals[:seq.ll])
|
||||
t += seq.ll
|
||||
s.literals = s.literals[seq.ll:]
|
||||
|
||||
// Copy from dictionary...
|
||||
if seq.mo > t+len(hist) || seq.mo > s.windowSize {
|
||||
if len(s.dict) == 0 {
|
||||
return fmt.Errorf("match offset (%d) bigger than current history (%d)", seq.mo, t+len(hist))
|
||||
}
|
||||
|
||||
// we may be in dictionary.
|
||||
dictO := len(s.dict) - (seq.mo - (t + len(hist)))
|
||||
if dictO < 0 || dictO >= len(s.dict) {
|
||||
return fmt.Errorf("match offset (%d) bigger than current history+dict (%d)", seq.mo, t+len(hist)+len(s.dict))
|
||||
}
|
||||
end := dictO + seq.ml
|
||||
if end > len(s.dict) {
|
||||
n := len(s.dict) - dictO
|
||||
copy(out[t:], s.dict[dictO:])
|
||||
t += n
|
||||
seq.ml -= n
|
||||
} else {
|
||||
copy(out[t:], s.dict[dictO:end])
|
||||
t += end - dictO
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
// Copy from history.
|
||||
if v := seq.mo - t; v > 0 {
|
||||
// v is the start position in history from end.
|
||||
start := len(hist) - v
|
||||
if seq.ml > v {
|
||||
// Some goes into current block.
|
||||
// Copy remainder of history
|
||||
copy(out[t:], hist[start:])
|
||||
t += v
|
||||
seq.ml -= v
|
||||
} else {
|
||||
copy(out[t:], hist[start:start+seq.ml])
|
||||
t += seq.ml
|
||||
continue
|
||||
}
|
||||
}
|
||||
// We must be in current buffer now
|
||||
if seq.ml > 0 {
|
||||
start := t - seq.mo
|
||||
if seq.ml <= t-start {
|
||||
// No overlap
|
||||
copy(out[t:], out[start:start+seq.ml])
|
||||
t += seq.ml
|
||||
continue
|
||||
} else {
|
||||
// Overlapping copy
|
||||
// Extend destination slice and copy one byte at the time.
|
||||
src := out[start : start+seq.ml]
|
||||
dst := out[t:]
|
||||
dst = dst[:len(src)]
|
||||
t += len(src)
|
||||
// Destination is the space we just added.
|
||||
for i := range src {
|
||||
dst[i] = src[i]
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// Add final literals
|
||||
copy(out[t:], s.literals)
|
||||
if debugDecoder {
|
||||
t += len(s.literals)
|
||||
if t != len(out) {
|
||||
panic(fmt.Errorf("length mismatch, want %d, got %d, ss: %d", len(out), t, s.seqSize))
|
||||
}
|
||||
}
|
||||
s.out = out
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// decode sequences from the stream with the provided history.
|
||||
func (s *sequenceDecs) decodeSync(history *history) error {
|
||||
br := s.br
|
||||
seqs := s.nSeqs
|
||||
startSize := len(s.out)
|
||||
// Grab full sizes tables, to avoid bounds checks.
|
||||
llTable, mlTable, ofTable := s.litLengths.fse.dt[:maxTablesize], s.matchLengths.fse.dt[:maxTablesize], s.offsets.fse.dt[:maxTablesize]
|
||||
llState, mlState, ofState := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state
|
||||
hist := history.b[history.ignoreBuffer:]
|
||||
out := s.out
|
||||
|
||||
for i := seqs - 1; i >= 0; i-- {
|
||||
if br.overread() {
|
||||
@@ -151,7 +399,7 @@ func (s *sequenceDecs) decode(seqs int, br *bitReader, hist []byte) error {
|
||||
|
||||
if temp == 0 {
|
||||
// 0 is not valid; input is corrupted; force offset to 1
|
||||
println("temp was 0")
|
||||
println("WARNING: temp was 0")
|
||||
temp = 1
|
||||
}
|
||||
|
||||
@@ -176,51 +424,49 @@ func (s *sequenceDecs) decode(seqs int, br *bitReader, hist []byte) error {
|
||||
if ll > len(s.literals) {
|
||||
return fmt.Errorf("unexpected literal count, want %d bytes, but only %d is available", ll, len(s.literals))
|
||||
}
|
||||
size := ll + ml + len(s.out)
|
||||
size := ll + ml + len(out)
|
||||
if size-startSize > maxBlockSize {
|
||||
return fmt.Errorf("output (%d) bigger than max block size", size)
|
||||
}
|
||||
if size > cap(s.out) {
|
||||
if size > cap(out) {
|
||||
// Not enough size, which can happen under high volume block streaming conditions
|
||||
// but could be if destination slice is too small for sync operations.
|
||||
// over-allocating here can create a large amount of GC pressure so we try to keep
|
||||
// it as contained as possible
|
||||
used := len(s.out) - startSize
|
||||
used := len(out) - startSize
|
||||
addBytes := 256 + ll + ml + used>>2
|
||||
// Clamp to max block size.
|
||||
if used+addBytes > maxBlockSize {
|
||||
addBytes = maxBlockSize - used
|
||||
}
|
||||
s.out = append(s.out, make([]byte, addBytes)...)
|
||||
s.out = s.out[:len(s.out)-addBytes]
|
||||
out = append(out, make([]byte, addBytes)...)
|
||||
out = out[:len(out)-addBytes]
|
||||
}
|
||||
if ml > maxMatchLen {
|
||||
return fmt.Errorf("match len (%d) bigger than max allowed length", ml)
|
||||
}
|
||||
|
||||
// Add literals
|
||||
s.out = append(s.out, s.literals[:ll]...)
|
||||
out = append(out, s.literals[:ll]...)
|
||||
s.literals = s.literals[ll:]
|
||||
out := s.out
|
||||
|
||||
if mo == 0 && ml > 0 {
|
||||
return fmt.Errorf("zero matchoff and matchlen (%d) > 0", ml)
|
||||
}
|
||||
|
||||
if mo > len(s.out)+len(hist) || mo > s.windowSize {
|
||||
if mo > len(out)+len(hist) || mo > s.windowSize {
|
||||
if len(s.dict) == 0 {
|
||||
return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(s.out)+len(hist))
|
||||
return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(out)+len(hist))
|
||||
}
|
||||
|
||||
// we may be in dictionary.
|
||||
dictO := len(s.dict) - (mo - (len(s.out) + len(hist)))
|
||||
dictO := len(s.dict) - (mo - (len(out) + len(hist)))
|
||||
if dictO < 0 || dictO >= len(s.dict) {
|
||||
return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(s.out)+len(hist))
|
||||
return fmt.Errorf("match offset (%d) bigger than current history (%d)", mo, len(out)+len(hist))
|
||||
}
|
||||
end := dictO + ml
|
||||
if end > len(s.dict) {
|
||||
out = append(out, s.dict[dictO:]...)
|
||||
mo -= len(s.dict) - dictO
|
||||
ml -= len(s.dict) - dictO
|
||||
} else {
|
||||
out = append(out, s.dict[dictO:end]...)
|
||||
@@ -231,26 +477,25 @@ func (s *sequenceDecs) decode(seqs int, br *bitReader, hist []byte) error {
|
||||
|
||||
// Copy from history.
|
||||
// TODO: Blocks without history could be made to ignore this completely.
|
||||
if v := mo - len(s.out); v > 0 {
|
||||
if v := mo - len(out); v > 0 {
|
||||
// v is the start position in history from end.
|
||||
start := len(s.hist) - v
|
||||
start := len(hist) - v
|
||||
if ml > v {
|
||||
// Some goes into current block.
|
||||
// Copy remainder of history
|
||||
out = append(out, s.hist[start:]...)
|
||||
mo -= v
|
||||
out = append(out, hist[start:]...)
|
||||
ml -= v
|
||||
} else {
|
||||
out = append(out, s.hist[start:start+ml]...)
|
||||
out = append(out, hist[start:start+ml]...)
|
||||
ml = 0
|
||||
}
|
||||
}
|
||||
// We must be in current buffer now
|
||||
if ml > 0 {
|
||||
start := len(s.out) - mo
|
||||
if ml <= len(s.out)-start {
|
||||
start := len(out) - mo
|
||||
if ml <= len(out)-start {
|
||||
// No overlap
|
||||
out = append(out, s.out[start:start+ml]...)
|
||||
out = append(out, out[start:start+ml]...)
|
||||
} else {
|
||||
// Overlapping copy
|
||||
// Extend destination slice and copy one byte at the time.
|
||||
@@ -264,7 +509,6 @@ func (s *sequenceDecs) decode(seqs int, br *bitReader, hist []byte) error {
|
||||
}
|
||||
}
|
||||
}
|
||||
s.out = out
|
||||
if i == 0 {
|
||||
// This is the last sequence, so we shouldn't update state.
|
||||
break
|
||||
@@ -292,8 +536,8 @@ func (s *sequenceDecs) decode(seqs int, br *bitReader, hist []byte) error {
|
||||
}
|
||||
|
||||
// Add final literals
|
||||
s.out = append(s.out, s.literals...)
|
||||
return nil
|
||||
s.out = append(out, s.literals...)
|
||||
return br.close()
|
||||
}
|
||||
|
||||
// update states, at least 27 bits must be available.
|
||||
@@ -457,36 +701,3 @@ func (s *sequenceDecs) adjustOffset(offset, litLen int, offsetB uint8) int {
|
||||
s.prevOffset[0] = temp
|
||||
return temp
|
||||
}
|
||||
|
||||
// mergeHistory will merge history.
|
||||
func (s *sequenceDecs) mergeHistory(hist *sequenceDecs) (*sequenceDecs, error) {
|
||||
for i := uint(0); i < 3; i++ {
|
||||
var sNew, sHist *sequenceDec
|
||||
switch i {
|
||||
default:
|
||||
// same as "case 0":
|
||||
sNew = &s.litLengths
|
||||
sHist = &hist.litLengths
|
||||
case 1:
|
||||
sNew = &s.offsets
|
||||
sHist = &hist.offsets
|
||||
case 2:
|
||||
sNew = &s.matchLengths
|
||||
sHist = &hist.matchLengths
|
||||
}
|
||||
if sNew.repeat {
|
||||
if sHist.fse == nil {
|
||||
return nil, fmt.Errorf("sequence stream %d, repeat requested, but no history", i)
|
||||
}
|
||||
continue
|
||||
}
|
||||
if sNew.fse == nil {
|
||||
return nil, fmt.Errorf("sequence stream %d, no fse found", i)
|
||||
}
|
||||
if sHist.fse != nil && !sHist.fse.preDefined {
|
||||
fseDecoderPool.Put(sHist.fse)
|
||||
}
|
||||
sHist.fse = sNew.fse
|
||||
}
|
||||
return hist, nil
|
||||
}
|
||||
|
4
vendor/github.com/klauspost/compress/zstd/zstd.go
generated
vendored
4
vendor/github.com/klauspost/compress/zstd/zstd.go
generated
vendored
@@ -75,6 +75,10 @@ var (
|
||||
// This is only returned if SingleSegment is specified on the frame.
|
||||
ErrFrameSizeExceeded = errors.New("frame size exceeded")
|
||||
|
||||
// ErrFrameSizeMismatch is returned if the stated frame size does not match the expected size.
|
||||
// This is only returned if SingleSegment is specified on the frame.
|
||||
ErrFrameSizeMismatch = errors.New("frame size does not match size on stream")
|
||||
|
||||
// ErrCRCMismatch is returned if CRC mismatches.
|
||||
ErrCRCMismatch = errors.New("CRC check failed")
|
||||
|
||||
|
4
vendor/github.com/mitchellh/mapstructure/CHANGELOG.md
generated
vendored
4
vendor/github.com/mitchellh/mapstructure/CHANGELOG.md
generated
vendored
@@ -1,3 +1,7 @@
|
||||
## 1.4.3
|
||||
|
||||
* Fix cases where `json.Number` didn't decode properly [GH-261]
|
||||
|
||||
## 1.4.2
|
||||
|
||||
* Custom name matchers to support any sort of casing, formatting, etc. for
|
||||
|
8
vendor/github.com/mitchellh/mapstructure/mapstructure.go
generated
vendored
8
vendor/github.com/mitchellh/mapstructure/mapstructure.go
generated
vendored
@@ -684,16 +684,12 @@ func (d *Decoder) decodeUint(name string, data interface{}, val reflect.Value) e
|
||||
}
|
||||
case dataType.PkgPath() == "encoding/json" && dataType.Name() == "Number":
|
||||
jn := data.(json.Number)
|
||||
i, err := jn.Int64()
|
||||
i, err := strconv.ParseUint(string(jn), 0, 64)
|
||||
if err != nil {
|
||||
return fmt.Errorf(
|
||||
"error decoding json.Number into %s: %s", name, err)
|
||||
}
|
||||
if i < 0 && !d.config.WeaklyTypedInput {
|
||||
return fmt.Errorf("cannot parse '%s', %d overflows uint",
|
||||
name, i)
|
||||
}
|
||||
val.SetUint(uint64(i))
|
||||
val.SetUint(i)
|
||||
default:
|
||||
return fmt.Errorf(
|
||||
"'%s' expected type '%s', got unconvertible type '%s', value: '%v'",
|
||||
|
8
vendor/github.com/moby/buildkit/client/prune.go
generated
vendored
8
vendor/github.com/moby/buildkit/client/prune.go
generated
vendored
@@ -59,10 +59,10 @@ type PruneOption interface {
|
||||
}
|
||||
|
||||
type PruneInfo struct {
|
||||
Filter []string
|
||||
All bool
|
||||
KeepDuration time.Duration
|
||||
KeepBytes int64
|
||||
Filter []string `json:"filter"`
|
||||
All bool `json:"all"`
|
||||
KeepDuration time.Duration `json:"keepDuration"`
|
||||
KeepBytes int64 `json:"keepBytes"`
|
||||
}
|
||||
|
||||
type pruneOptionFunc func(*PruneInfo)
|
||||
|
8
vendor/github.com/moby/buildkit/client/workers.go
generated
vendored
8
vendor/github.com/moby/buildkit/client/workers.go
generated
vendored
@@ -13,10 +13,10 @@ import (
|
||||
|
||||
// WorkerInfo contains information about a worker
|
||||
type WorkerInfo struct {
|
||||
ID string
|
||||
Labels map[string]string
|
||||
Platforms []ocispecs.Platform
|
||||
GCPolicy []PruneInfo
|
||||
ID string `json:"id"`
|
||||
Labels map[string]string `json:"labels"`
|
||||
Platforms []ocispecs.Platform `json:"platforms"`
|
||||
GCPolicy []PruneInfo `json:"gcPolicy"`
|
||||
}
|
||||
|
||||
// ListWorkers lists all active workers
|
||||
|
1
vendor/github.com/moby/buildkit/cmd/buildkitd/config/config.go
generated
vendored
1
vendor/github.com/moby/buildkit/cmd/buildkitd/config/config.go
generated
vendored
@@ -70,6 +70,7 @@ type OCIConfig struct {
|
||||
// For use in storing the OCI worker binary name that will replace buildkit-runc
|
||||
Binary string `toml:"binary"`
|
||||
ProxySnapshotterPath string `toml:"proxySnapshotterPath"`
|
||||
DefaultCgroupParent string `toml:"defaultCgroupParent"`
|
||||
|
||||
// StargzSnapshotterConfig is configuration for stargz snapshotter.
|
||||
// We use a generic map[string]interface{} in order to remove the dependency
|
||||
|
3
vendor/github.com/moby/buildkit/exporter/containerimage/exptypes/types.go
generated
vendored
3
vendor/github.com/moby/buildkit/exporter/containerimage/exptypes/types.go
generated
vendored
@@ -1,7 +1,6 @@
|
||||
package exptypes
|
||||
|
||||
import (
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
)
|
||||
|
||||
@@ -16,8 +15,6 @@ const (
|
||||
ExporterPlatformsKey = "refs.platforms"
|
||||
)
|
||||
|
||||
const EmptyGZLayer = digest.Digest("sha256:4f4fb700ef54461cfa02571ae0db9a0dc1e0cdb5577484a6d75e68dc38e8acc1")
|
||||
|
||||
type Platforms struct {
|
||||
Platforms []Platform
|
||||
}
|
||||
|
32
vendor/github.com/moby/buildkit/util/imageutil/buildinfo.go
generated
vendored
Normal file
32
vendor/github.com/moby/buildkit/util/imageutil/buildinfo.go
generated
vendored
Normal file
@@ -0,0 +1,32 @@
|
||||
package imageutil
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
|
||||
binfotypes "github.com/moby/buildkit/util/buildinfo/types"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// BuildInfo returns build info from image config.
|
||||
func BuildInfo(dt []byte) (*binfotypes.BuildInfo, error) {
|
||||
if len(dt) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
var config binfotypes.ImageConfig
|
||||
if err := json.Unmarshal(dt, &config); err != nil {
|
||||
return nil, errors.Wrap(err, "failed to unmarshal image config")
|
||||
}
|
||||
if len(config.BuildInfo) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
dtbi, err := base64.StdEncoding.DecodeString(config.BuildInfo)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var bi binfotypes.BuildInfo
|
||||
if err = json.Unmarshal(dtbi, &bi); err != nil {
|
||||
return nil, errors.Wrap(err, "failed to decode buildinfo from image config")
|
||||
}
|
||||
return &bi, nil
|
||||
}
|
57
vendor/github.com/moby/buildkit/util/progress/progressui/display.go
generated
vendored
57
vendor/github.com/moby/buildkit/util/progress/progressui/display.go
generated
vendored
@@ -119,7 +119,6 @@ type trace struct {
|
||||
localTimeDiff time.Duration
|
||||
vertexes []*vertex
|
||||
byDigest map[digest.Digest]*vertex
|
||||
nextIndex int
|
||||
updates map[digest.Digest]struct{}
|
||||
modeConsole bool
|
||||
groups map[string]*vertexGroup // group id -> group
|
||||
@@ -156,7 +155,7 @@ type vertex struct {
|
||||
// Interval start time in unix nano -> interval. Using a map ensures
|
||||
// that updates for the same interval overwrite their previous updates.
|
||||
intervals map[int64]interval
|
||||
mostRecentStart *time.Time
|
||||
mergedIntervals []interval
|
||||
|
||||
// whether the vertex should be hidden due to being in a progress group
|
||||
// that doesn't have any non-weak members that have started
|
||||
@@ -171,17 +170,23 @@ func (v *vertex) update(c int) {
|
||||
v.count += c
|
||||
}
|
||||
|
||||
func (v *vertex) mostRecentInterval() *interval {
|
||||
if v.isStarted() {
|
||||
ival := v.mergedIntervals[len(v.mergedIntervals)-1]
|
||||
return &ival
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (v *vertex) isStarted() bool {
|
||||
return len(v.intervals) > 0
|
||||
return len(v.mergedIntervals) > 0
|
||||
}
|
||||
|
||||
func (v *vertex) isCompleted() bool {
|
||||
for _, ival := range v.intervals {
|
||||
if ival.stop == nil {
|
||||
if ival := v.mostRecentInterval(); ival != nil {
|
||||
return ival.stop != nil
|
||||
}
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
type vertexGroup struct {
|
||||
@@ -208,9 +213,6 @@ func (vg *vertexGroup) refresh() (changed, newlyStarted, newlyRevealed bool) {
|
||||
newlyStarted = true
|
||||
}
|
||||
vg.intervals[subVtx.Started.UnixNano()] = newInterval
|
||||
if vg.mostRecentStart == nil || subVtx.Started.After(*vg.mostRecentStart) {
|
||||
vg.mostRecentStart = subVtx.Started
|
||||
}
|
||||
|
||||
if !subVtx.ProgressGroup.Weak {
|
||||
vg.hidden = false
|
||||
@@ -241,6 +243,12 @@ func (vg *vertexGroup) refresh() (changed, newlyStarted, newlyRevealed bool) {
|
||||
newlyRevealed = true
|
||||
}
|
||||
|
||||
var ivals []interval
|
||||
for _, ival := range vg.intervals {
|
||||
ivals = append(ivals, ival)
|
||||
}
|
||||
vg.mergedIntervals = mergeIntervals(ivals)
|
||||
|
||||
return changed, newlyStarted, newlyRevealed
|
||||
}
|
||||
|
||||
@@ -410,7 +418,6 @@ func (t *trace) update(s *client.SolveStatus, termWidth int) {
|
||||
if v.ProgressGroup != nil {
|
||||
group, ok := t.groups[v.ProgressGroup.Id]
|
||||
if !ok {
|
||||
t.nextIndex++
|
||||
group = &vertexGroup{
|
||||
vertex: &vertex{
|
||||
Vertex: &client.Vertex{
|
||||
@@ -419,7 +426,6 @@ func (t *trace) update(s *client.SolveStatus, termWidth int) {
|
||||
},
|
||||
byID: make(map[string]*status),
|
||||
statusUpdates: make(map[string]struct{}),
|
||||
index: t.nextIndex,
|
||||
intervals: make(map[int64]interval),
|
||||
hidden: true,
|
||||
},
|
||||
@@ -441,11 +447,9 @@ func (t *trace) update(s *client.SolveStatus, termWidth int) {
|
||||
}
|
||||
prev, ok := t.byDigest[v.Digest]
|
||||
if !ok {
|
||||
t.nextIndex++
|
||||
t.byDigest[v.Digest] = &vertex{
|
||||
byID: make(map[string]*status),
|
||||
statusUpdates: make(map[string]struct{}),
|
||||
index: t.nextIndex,
|
||||
intervals: make(map[int64]interval),
|
||||
}
|
||||
if t.modeConsole {
|
||||
@@ -468,9 +472,11 @@ func (t *trace) update(s *client.SolveStatus, termWidth int) {
|
||||
start: v.Started,
|
||||
stop: v.Completed,
|
||||
}
|
||||
if t.byDigest[v.Digest].mostRecentStart == nil || v.Started.After(*t.byDigest[v.Digest].mostRecentStart) {
|
||||
t.byDigest[v.Digest].mostRecentStart = v.Started
|
||||
var ivals []interval
|
||||
for _, ival := range t.byDigest[v.Digest].intervals {
|
||||
ivals = append(ivals, ival)
|
||||
}
|
||||
t.byDigest[v.Digest].mergedIntervals = mergeIntervals(ivals)
|
||||
}
|
||||
t.byDigest[v.Digest].jobCached = false
|
||||
}
|
||||
@@ -479,7 +485,7 @@ func (t *trace) update(s *client.SolveStatus, termWidth int) {
|
||||
changed, newlyStarted, newlyRevealed := group.refresh()
|
||||
if newlyStarted {
|
||||
if t.localTimeDiff == 0 {
|
||||
t.localTimeDiff = time.Since(*group.mostRecentStart)
|
||||
t.localTimeDiff = time.Since(*group.mergedIntervals[0].start)
|
||||
}
|
||||
}
|
||||
if group.hidden {
|
||||
@@ -539,8 +545,8 @@ func (t *trace) update(s *client.SolveStatus, termWidth int) {
|
||||
v.logs[len(v.logs)-1] = append(v.logs[len(v.logs)-1], dt...)
|
||||
} else {
|
||||
ts := time.Duration(0)
|
||||
if v.isStarted() {
|
||||
ts = l.Timestamp.Sub(*v.mostRecentStart)
|
||||
if ival := v.mostRecentInterval(); ival != nil {
|
||||
ts = l.Timestamp.Sub(*ival.start)
|
||||
}
|
||||
prec := 1
|
||||
sec := ts.Seconds()
|
||||
@@ -653,15 +659,14 @@ func (t *trace) displayInfo() (d displayInfo) {
|
||||
}
|
||||
for _, w := range v.warnings {
|
||||
msg := "WARN: " + string(w.Short)
|
||||
mostRecentStart := v.mostRecentStart
|
||||
var mostRecentStop *time.Time
|
||||
if mostRecentStart != nil {
|
||||
mostRecentStop = v.intervals[mostRecentStart.UnixNano()].stop
|
||||
var mostRecentInterval interval
|
||||
if ival := v.mostRecentInterval(); ival != nil {
|
||||
mostRecentInterval = *ival
|
||||
}
|
||||
j := &job{
|
||||
intervals: []interval{{
|
||||
start: addTime(mostRecentStart, t.localTimeDiff),
|
||||
stop: addTime(mostRecentStop, t.localTimeDiff),
|
||||
start: addTime(mostRecentInterval.start, t.localTimeDiff),
|
||||
stop: addTime(mostRecentInterval.stop, t.localTimeDiff),
|
||||
}},
|
||||
name: msg,
|
||||
isCanceled: true,
|
||||
|
42
vendor/github.com/moby/buildkit/util/progress/progressui/printer.go
generated
vendored
42
vendor/github.com/moby/buildkit/util/progress/progressui/printer.go
generated
vendored
@@ -31,6 +31,7 @@ type textMux struct {
|
||||
current digest.Digest
|
||||
last map[string]lastStatus
|
||||
notFirst bool
|
||||
nextIndex int
|
||||
}
|
||||
|
||||
func (p *textMux) printVtx(t *trace, dgst digest.Digest) {
|
||||
@@ -43,6 +44,11 @@ func (p *textMux) printVtx(t *trace, dgst digest.Digest) {
|
||||
return
|
||||
}
|
||||
|
||||
if v.index == 0 {
|
||||
p.nextIndex++
|
||||
v.index = p.nextIndex
|
||||
}
|
||||
|
||||
if dgst != p.current {
|
||||
if p.current != "" {
|
||||
old := t.byDigest[p.current]
|
||||
@@ -75,6 +81,7 @@ func (p *textMux) printVtx(t *trace, dgst digest.Digest) {
|
||||
}
|
||||
v.events = v.events[:0]
|
||||
|
||||
isOpenStatus := false // remote cache loading can currently produce status updates without active vertex
|
||||
for _, s := range v.statuses {
|
||||
if _, ok := v.statusUpdates[s.ID]; ok {
|
||||
doPrint := true
|
||||
@@ -118,6 +125,8 @@ func (p *textMux) printVtx(t *trace, dgst digest.Digest) {
|
||||
}
|
||||
if s.Completed != nil {
|
||||
tm += " done"
|
||||
} else {
|
||||
isOpenStatus = true
|
||||
}
|
||||
fmt.Fprintf(p.w, "#%d %s%s%s\n", v.index, s.ID, bytes, tm)
|
||||
}
|
||||
@@ -157,7 +166,7 @@ func (p *textMux) printVtx(t *trace, dgst digest.Digest) {
|
||||
}
|
||||
|
||||
p.current = dgst
|
||||
if v.Completed != nil {
|
||||
if v.isCompleted() && !isOpenStatus {
|
||||
p.current = ""
|
||||
v.count = 0
|
||||
|
||||
@@ -174,8 +183,17 @@ func (p *textMux) printVtx(t *trace, dgst digest.Digest) {
|
||||
fmt.Fprintf(p.w, "#%d CACHED\n", v.index)
|
||||
} else {
|
||||
tm := ""
|
||||
if v.Started != nil {
|
||||
tm = fmt.Sprintf(" %.1fs", v.Completed.Sub(*v.Started).Seconds())
|
||||
var ivals []interval
|
||||
for _, ival := range v.intervals {
|
||||
ivals = append(ivals, ival)
|
||||
}
|
||||
ivals = mergeIntervals(ivals)
|
||||
if len(ivals) > 0 {
|
||||
var dt float64
|
||||
for _, ival := range ivals {
|
||||
dt += ival.duration().Seconds()
|
||||
}
|
||||
tm = fmt.Sprintf(" %.1fs", dt)
|
||||
}
|
||||
fmt.Fprintf(p.w, "#%d DONE%s\n", v.index, tm)
|
||||
}
|
||||
@@ -190,7 +208,9 @@ func sortCompleted(t *trace, m map[digest.Digest]struct{}) []digest.Digest {
|
||||
out = append(out, k)
|
||||
}
|
||||
sort.Slice(out, func(i, j int) bool {
|
||||
return t.byDigest[out[i]].Completed.Before(*t.byDigest[out[j]].Completed)
|
||||
vtxi := t.byDigest[out[i]]
|
||||
vtxj := t.byDigest[out[j]]
|
||||
return vtxi.mostRecentInterval().stop.Before(*vtxj.mostRecentInterval().stop)
|
||||
})
|
||||
return out
|
||||
}
|
||||
@@ -204,7 +224,11 @@ func (p *textMux) print(t *trace) {
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
if v.Vertex.Completed != nil {
|
||||
if v.ProgressGroup != nil || v.hidden {
|
||||
// skip vtxs in a group (they are merged into a single vtx) and hidden ones
|
||||
continue
|
||||
}
|
||||
if v.isCompleted() {
|
||||
completed[dgst] = struct{}{}
|
||||
} else {
|
||||
rest[dgst] = struct{}{}
|
||||
@@ -226,13 +250,13 @@ func (p *textMux) print(t *trace) {
|
||||
|
||||
if len(rest) == 0 {
|
||||
if current != "" {
|
||||
if v := t.byDigest[current]; v.Started != nil && v.Completed == nil {
|
||||
if v := t.byDigest[current]; v.isStarted() && !v.isCompleted() {
|
||||
return
|
||||
}
|
||||
}
|
||||
// make any open vertex active
|
||||
for dgst, v := range t.byDigest {
|
||||
if v.Started != nil && v.Completed == nil {
|
||||
if v.isStarted() && !v.isCompleted() && v.ProgressGroup == nil && !v.hidden {
|
||||
p.printVtx(t, dgst)
|
||||
return
|
||||
}
|
||||
@@ -257,6 +281,10 @@ func (p *textMux) print(t *trace) {
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
if v.lastBlockTime == nil {
|
||||
// shouldn't happen, but not worth crashing over
|
||||
continue
|
||||
}
|
||||
tm := now.Sub(*v.lastBlockTime)
|
||||
speed := float64(v.count) / tm.Seconds()
|
||||
overLimit := tm > maxDelay && dgst != current
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user