mirror of
https://gitea.com/Lydanne/buildx.git
synced 2025-05-18 00:47:48 +08:00
vendor: update buildkit to master@8b7bcb900d3c
Signed-off-by: Justin Chadwell <me@jedevc.com>
This commit is contained in:
parent
c6cdcb02cf
commit
9541457c54
@ -4,7 +4,6 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
|
|
||||||
"github.com/containerd/containerd/pkg/seed"
|
|
||||||
"github.com/docker/buildx/commands"
|
"github.com/docker/buildx/commands"
|
||||||
"github.com/docker/buildx/version"
|
"github.com/docker/buildx/version"
|
||||||
"github.com/docker/cli/cli"
|
"github.com/docker/cli/cli"
|
||||||
@ -16,6 +15,9 @@ import (
|
|||||||
"github.com/moby/buildkit/solver/errdefs"
|
"github.com/moby/buildkit/solver/errdefs"
|
||||||
"github.com/moby/buildkit/util/stack"
|
"github.com/moby/buildkit/util/stack"
|
||||||
|
|
||||||
|
//nolint:staticcheck // vendored dependencies may still use this
|
||||||
|
"github.com/containerd/containerd/pkg/seed"
|
||||||
|
|
||||||
_ "k8s.io/client-go/plugin/pkg/client/auth/azure"
|
_ "k8s.io/client-go/plugin/pkg/client/auth/azure"
|
||||||
_ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
|
_ "k8s.io/client-go/plugin/pkg/client/auth/gcp"
|
||||||
_ "k8s.io/client-go/plugin/pkg/client/auth/oidc"
|
_ "k8s.io/client-go/plugin/pkg/client/auth/oidc"
|
||||||
@ -28,7 +30,9 @@ import (
|
|||||||
)
|
)
|
||||||
|
|
||||||
func init() {
|
func init() {
|
||||||
|
//nolint:staticcheck
|
||||||
seed.WithTimeAndRand()
|
seed.WithTimeAndRand()
|
||||||
|
|
||||||
stack.SetVersionInfo(version.Version, version.Revision)
|
stack.SetVersionInfo(version.Version, version.Revision)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
85
go.mod
85
go.mod
@ -6,11 +6,11 @@ require (
|
|||||||
github.com/aws/aws-sdk-go-v2/config v1.15.5
|
github.com/aws/aws-sdk-go-v2/config v1.15.5
|
||||||
github.com/compose-spec/compose-go v1.9.0
|
github.com/compose-spec/compose-go v1.9.0
|
||||||
github.com/containerd/console v1.0.3
|
github.com/containerd/console v1.0.3
|
||||||
github.com/containerd/containerd v1.7.0-beta.3
|
github.com/containerd/containerd v1.7.0
|
||||||
github.com/docker/cli v23.0.0+incompatible
|
github.com/docker/cli v23.0.1+incompatible
|
||||||
github.com/docker/cli-docs-tool v0.5.1
|
github.com/docker/cli-docs-tool v0.5.1
|
||||||
github.com/docker/distribution v2.8.1+incompatible
|
github.com/docker/distribution v2.8.1+incompatible
|
||||||
github.com/docker/docker v23.0.0+incompatible
|
github.com/docker/docker v23.0.1+incompatible
|
||||||
github.com/docker/go-units v0.5.0
|
github.com/docker/go-units v0.5.0
|
||||||
github.com/gofrs/flock v0.8.1
|
github.com/gofrs/flock v0.8.1
|
||||||
github.com/gogo/protobuf v1.3.2
|
github.com/gogo/protobuf v1.3.2
|
||||||
@ -19,7 +19,7 @@ require (
|
|||||||
github.com/google/uuid v1.3.0
|
github.com/google/uuid v1.3.0
|
||||||
github.com/hashicorp/go-cty-funcs v0.0.0-20200930094925-2721b1e36840
|
github.com/hashicorp/go-cty-funcs v0.0.0-20200930094925-2721b1e36840
|
||||||
github.com/hashicorp/hcl/v2 v2.8.2
|
github.com/hashicorp/hcl/v2 v2.8.2
|
||||||
github.com/moby/buildkit v0.11.0-rc3.0.20230216100651-46e4e7e51b01
|
github.com/moby/buildkit v0.11.0-rc3.0.20230330090027-8b7bcb900d3c
|
||||||
github.com/moby/sys/mountinfo v0.6.2
|
github.com/moby/sys/mountinfo v0.6.2
|
||||||
github.com/moby/sys/signal v0.7.0
|
github.com/moby/sys/signal v0.7.0
|
||||||
github.com/morikuni/aec v1.0.0
|
github.com/morikuni/aec v1.0.0
|
||||||
@ -31,23 +31,23 @@ require (
|
|||||||
github.com/sirupsen/logrus v1.9.0
|
github.com/sirupsen/logrus v1.9.0
|
||||||
github.com/spf13/cobra v1.6.1
|
github.com/spf13/cobra v1.6.1
|
||||||
github.com/spf13/pflag v1.0.5
|
github.com/spf13/pflag v1.0.5
|
||||||
github.com/stretchr/testify v1.8.1
|
github.com/stretchr/testify v1.8.2
|
||||||
github.com/zclconf/go-cty v1.10.0
|
github.com/zclconf/go-cty v1.10.0
|
||||||
go.opentelemetry.io/otel v1.11.2
|
go.opentelemetry.io/otel v1.14.0
|
||||||
go.opentelemetry.io/otel/trace v1.11.2
|
go.opentelemetry.io/otel/trace v1.14.0
|
||||||
golang.org/x/sync v0.1.0
|
golang.org/x/sync v0.1.0
|
||||||
golang.org/x/term v0.5.0
|
golang.org/x/term v0.5.0
|
||||||
google.golang.org/grpc v1.51.0
|
google.golang.org/grpc v1.53.0
|
||||||
gopkg.in/yaml.v3 v3.0.1
|
gopkg.in/yaml.v3 v3.0.1
|
||||||
k8s.io/api v0.25.4
|
k8s.io/api v0.26.2
|
||||||
k8s.io/apimachinery v0.25.4
|
k8s.io/apimachinery v0.26.2
|
||||||
k8s.io/apiserver v0.25.4
|
k8s.io/apiserver v0.26.2
|
||||||
k8s.io/client-go v0.25.4
|
k8s.io/client-go v0.26.2
|
||||||
)
|
)
|
||||||
|
|
||||||
require (
|
require (
|
||||||
cloud.google.com/go/compute v1.14.0 // indirect
|
cloud.google.com/go/compute v1.18.0 // indirect
|
||||||
cloud.google.com/go/compute/metadata v0.2.1 // indirect
|
cloud.google.com/go/compute/metadata v0.2.3 // indirect
|
||||||
github.com/AdaLogics/go-fuzz-headers v0.0.0-20230106234847-43070de90fa1 // indirect
|
github.com/AdaLogics/go-fuzz-headers v0.0.0-20230106234847-43070de90fa1 // indirect
|
||||||
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect
|
github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1 // indirect
|
||||||
github.com/Azure/go-autorest v14.2.0+incompatible // indirect
|
github.com/Azure/go-autorest v14.2.0+incompatible // indirect
|
||||||
@ -57,8 +57,6 @@ require (
|
|||||||
github.com/Azure/go-autorest/logger v0.2.1 // indirect
|
github.com/Azure/go-autorest/logger v0.2.1 // indirect
|
||||||
github.com/Azure/go-autorest/tracing v0.6.0 // indirect
|
github.com/Azure/go-autorest/tracing v0.6.0 // indirect
|
||||||
github.com/Microsoft/go-winio v0.6.0 // indirect
|
github.com/Microsoft/go-winio v0.6.0 // indirect
|
||||||
github.com/PuerkitoBio/purell v1.1.1 // indirect
|
|
||||||
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 // indirect
|
|
||||||
github.com/agext/levenshtein v1.2.3 // indirect
|
github.com/agext/levenshtein v1.2.3 // indirect
|
||||||
github.com/agl/ed25519 v0.0.0-20170116200512-5312a6153412 // indirect
|
github.com/agl/ed25519 v0.0.0-20170116200512-5312a6153412 // indirect
|
||||||
github.com/apparentlymart/go-cidr v1.0.1 // indirect
|
github.com/apparentlymart/go-cidr v1.0.1 // indirect
|
||||||
@ -79,11 +77,11 @@ require (
|
|||||||
github.com/bugsnag/panicwrap v1.2.0 // indirect
|
github.com/bugsnag/panicwrap v1.2.0 // indirect
|
||||||
github.com/cenkalti/backoff v2.1.1+incompatible // indirect
|
github.com/cenkalti/backoff v2.1.1+incompatible // indirect
|
||||||
github.com/cenkalti/backoff/v4 v4.2.0 // indirect
|
github.com/cenkalti/backoff/v4 v4.2.0 // indirect
|
||||||
github.com/cespare/xxhash/v2 v2.1.2 // indirect
|
github.com/cespare/xxhash/v2 v2.2.0 // indirect
|
||||||
github.com/cloudflare/cfssl v0.0.0-20181213083726-b94e044bb51e // indirect
|
github.com/cloudflare/cfssl v0.0.0-20181213083726-b94e044bb51e // indirect
|
||||||
github.com/containerd/continuity v0.3.0 // indirect
|
github.com/containerd/continuity v0.3.0 // indirect
|
||||||
github.com/containerd/ttrpc v1.1.1-0.20220420014843-944ef4a40df3 // indirect
|
github.com/containerd/ttrpc v1.2.1 // indirect
|
||||||
github.com/containerd/typeurl v1.0.3-0.20220422153119-7f6e6d160d67 // indirect
|
github.com/containerd/typeurl/v2 v2.1.0 // indirect
|
||||||
github.com/cyphar/filepath-securejoin v0.2.3 // indirect
|
github.com/cyphar/filepath-securejoin v0.2.3 // indirect
|
||||||
github.com/davecgh/go-spew v1.1.1 // indirect
|
github.com/davecgh/go-spew v1.1.1 // indirect
|
||||||
github.com/distribution/distribution/v3 v3.0.0-20221103125252-ebfa2a0ac0a9 // indirect
|
github.com/distribution/distribution/v3 v3.0.0-20221103125252-ebfa2a0ac0a9 // indirect
|
||||||
@ -93,13 +91,13 @@ require (
|
|||||||
github.com/docker/go-metrics v0.0.1 // indirect
|
github.com/docker/go-metrics v0.0.1 // indirect
|
||||||
github.com/docker/libtrust v0.0.0-20150526203908-9cbd2a1374f4 // indirect
|
github.com/docker/libtrust v0.0.0-20150526203908-9cbd2a1374f4 // indirect
|
||||||
github.com/elazarl/goproxy v0.0.0-20191011121108-aa519ddbe484 // indirect
|
github.com/elazarl/goproxy v0.0.0-20191011121108-aa519ddbe484 // indirect
|
||||||
github.com/emicklei/go-restful/v3 v3.8.0 // indirect
|
github.com/emicklei/go-restful/v3 v3.10.1 // indirect
|
||||||
github.com/felixge/httpsnoop v1.0.3 // indirect
|
github.com/felixge/httpsnoop v1.0.3 // indirect
|
||||||
github.com/fvbommel/sortorder v1.0.1 // indirect
|
github.com/fvbommel/sortorder v1.0.1 // indirect
|
||||||
github.com/go-logr/logr v1.2.3 // indirect
|
github.com/go-logr/logr v1.2.3 // indirect
|
||||||
github.com/go-logr/stdr v1.2.2 // indirect
|
github.com/go-logr/stdr v1.2.2 // indirect
|
||||||
github.com/go-openapi/jsonpointer v0.19.5 // indirect
|
github.com/go-openapi/jsonpointer v0.19.5 // indirect
|
||||||
github.com/go-openapi/jsonreference v0.19.5 // indirect
|
github.com/go-openapi/jsonreference v0.20.0 // indirect
|
||||||
github.com/go-openapi/swag v0.19.14 // indirect
|
github.com/go-openapi/swag v0.19.14 // indirect
|
||||||
github.com/go-sql-driver/mysql v1.6.0 // indirect
|
github.com/go-sql-driver/mysql v1.6.0 // indirect
|
||||||
github.com/gogo/googleapis v1.4.1 // indirect
|
github.com/gogo/googleapis v1.4.1 // indirect
|
||||||
@ -110,7 +108,7 @@ require (
|
|||||||
github.com/google/gofuzz v1.2.0 // indirect
|
github.com/google/gofuzz v1.2.0 // indirect
|
||||||
github.com/gorilla/mux v1.8.0 // indirect
|
github.com/gorilla/mux v1.8.0 // indirect
|
||||||
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 // indirect
|
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 // indirect
|
||||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 // indirect
|
github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3 // indirect
|
||||||
github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed // indirect
|
github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed // indirect
|
||||||
github.com/imdario/mergo v0.3.13 // indirect
|
github.com/imdario/mergo v0.3.13 // indirect
|
||||||
github.com/inconshreveable/mousetrap v1.0.1 // indirect
|
github.com/inconshreveable/mousetrap v1.0.1 // indirect
|
||||||
@ -119,7 +117,7 @@ require (
|
|||||||
github.com/josharian/intern v1.0.0 // indirect
|
github.com/josharian/intern v1.0.0 // indirect
|
||||||
github.com/json-iterator/go v1.1.12 // indirect
|
github.com/json-iterator/go v1.1.12 // indirect
|
||||||
github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 // indirect
|
github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 // indirect
|
||||||
github.com/klauspost/compress v1.15.12 // indirect
|
github.com/klauspost/compress v1.16.0 // indirect
|
||||||
github.com/kr/pretty v0.3.0 // indirect
|
github.com/kr/pretty v0.3.0 // indirect
|
||||||
github.com/mailru/easyjson v0.7.6 // indirect
|
github.com/mailru/easyjson v0.7.6 // indirect
|
||||||
github.com/mattn/go-shellwords v1.0.12 // indirect
|
github.com/mattn/go-shellwords v1.0.12 // indirect
|
||||||
@ -151,26 +149,26 @@ require (
|
|||||||
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f // indirect
|
github.com/xeipuuv/gojsonpointer v0.0.0-20180127040702-4e3ac2762d5f // indirect
|
||||||
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
|
github.com/xeipuuv/gojsonreference v0.0.0-20180127040603-bd5ef7bd5415 // indirect
|
||||||
github.com/xeipuuv/gojsonschema v1.2.0 // indirect
|
github.com/xeipuuv/gojsonschema v1.2.0 // indirect
|
||||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.37.0 // indirect
|
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.40.0 // indirect
|
||||||
go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.37.0 // indirect
|
go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.40.0 // indirect
|
||||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.37.0 // indirect
|
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.40.0 // indirect
|
||||||
go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.11.2 // indirect
|
go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.14.0 // indirect
|
||||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.11.2 // indirect
|
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.14.0 // indirect
|
||||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.11.2 // indirect
|
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.14.0 // indirect
|
||||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.11.2 // indirect
|
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.14.0 // indirect
|
||||||
go.opentelemetry.io/otel/metric v0.34.0 // indirect
|
go.opentelemetry.io/otel/metric v0.37.0 // indirect
|
||||||
go.opentelemetry.io/otel/sdk v1.11.2 // indirect
|
go.opentelemetry.io/otel/sdk v1.14.0 // indirect
|
||||||
go.opentelemetry.io/proto/otlp v0.19.0 // indirect
|
go.opentelemetry.io/proto/otlp v0.19.0 // indirect
|
||||||
golang.org/x/crypto v0.2.0 // indirect
|
golang.org/x/crypto v0.2.0 // indirect
|
||||||
golang.org/x/mod v0.6.0 // indirect
|
golang.org/x/mod v0.7.0 // indirect
|
||||||
golang.org/x/net v0.7.0 // indirect
|
golang.org/x/net v0.7.0 // indirect
|
||||||
golang.org/x/oauth2 v0.1.0 // indirect
|
golang.org/x/oauth2 v0.4.0 // indirect
|
||||||
golang.org/x/sys v0.5.0 // indirect
|
golang.org/x/sys v0.6.0 // indirect
|
||||||
golang.org/x/text v0.7.0 // indirect
|
golang.org/x/text v0.7.0 // indirect
|
||||||
golang.org/x/time v0.1.0 // indirect
|
golang.org/x/time v0.1.0 // indirect
|
||||||
golang.org/x/tools v0.2.0 // indirect
|
golang.org/x/tools v0.5.0 // indirect
|
||||||
google.golang.org/appengine v1.6.7 // indirect
|
google.golang.org/appengine v1.6.7 // indirect
|
||||||
google.golang.org/genproto v0.0.0-20221206210731-b1a01be3a5f6 // indirect
|
google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 // indirect
|
||||||
google.golang.org/protobuf v1.28.1 // indirect
|
google.golang.org/protobuf v1.28.1 // indirect
|
||||||
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect
|
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c // indirect
|
||||||
gopkg.in/dancannon/gorethink.v3 v3.0.5 // indirect
|
gopkg.in/dancannon/gorethink.v3 v3.0.5 // indirect
|
||||||
@ -178,10 +176,17 @@ require (
|
|||||||
gopkg.in/gorethink/gorethink.v3 v3.0.5 // indirect
|
gopkg.in/gorethink/gorethink.v3 v3.0.5 // indirect
|
||||||
gopkg.in/inf.v0 v0.9.1 // indirect
|
gopkg.in/inf.v0 v0.9.1 // indirect
|
||||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||||
k8s.io/klog/v2 v2.80.1 // indirect
|
k8s.io/klog/v2 v2.90.1 // indirect
|
||||||
k8s.io/kube-openapi v0.0.0-20220803162953-67bda5d908f1 // indirect
|
k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280 // indirect
|
||||||
k8s.io/utils v0.0.0-20221108210102-8e77b1f39fe2 // indirect
|
k8s.io/utils v0.0.0-20230220204549-a5ecb0141aa5 // indirect
|
||||||
sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 // indirect
|
sigs.k8s.io/json v0.0.0-20220713155537-f223a00ba0e2 // indirect
|
||||||
sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect
|
sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect
|
||||||
sigs.k8s.io/yaml v1.3.0 // indirect
|
sigs.k8s.io/yaml v1.3.0 // indirect
|
||||||
)
|
)
|
||||||
|
|
||||||
|
replace (
|
||||||
|
k8s.io/api => k8s.io/api v0.25.4
|
||||||
|
k8s.io/apimachinery => k8s.io/apimachinery v0.25.4
|
||||||
|
k8s.io/apiserver => k8s.io/apiserver v0.25.4
|
||||||
|
k8s.io/client-go => k8s.io/client-go v0.25.4
|
||||||
|
)
|
||||||
|
156
go.sum
156
go.sum
@ -19,10 +19,10 @@ cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvf
|
|||||||
cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
|
cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
|
||||||
cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
|
cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
|
||||||
cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
|
cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
|
||||||
cloud.google.com/go/compute v1.14.0 h1:hfm2+FfxVmnRlh6LpB7cg1ZNU+5edAHmW679JePztk0=
|
cloud.google.com/go/compute v1.18.0 h1:FEigFqoDbys2cvFkZ9Fjq4gnHBP55anJ0yQyau2f9oY=
|
||||||
cloud.google.com/go/compute v1.14.0/go.mod h1:YfLtxrj9sU4Yxv+sXzZkyPjEyPBZfXHUvjxega5vAdo=
|
cloud.google.com/go/compute v1.18.0/go.mod h1:1X7yHxec2Ga+Ss6jPyjxRxpu2uu7PLgsOVXvgU0yacs=
|
||||||
cloud.google.com/go/compute/metadata v0.2.1 h1:efOwf5ymceDhK6PKMnnrTHP4pppY5L22mle96M1yP48=
|
cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY=
|
||||||
cloud.google.com/go/compute/metadata v0.2.1/go.mod h1:jgHgmJd2RKBGzXqF5LR2EZMGxBkeanZ9wwa75XHJgOM=
|
cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA=
|
||||||
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
|
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
|
||||||
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
|
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
|
||||||
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
|
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
|
||||||
@ -60,12 +60,8 @@ github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03
|
|||||||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||||
github.com/Microsoft/go-winio v0.6.0 h1:slsWYD/zyx7lCXoZVlvQrj0hPTM1HI4+v1sIda2yDvg=
|
github.com/Microsoft/go-winio v0.6.0 h1:slsWYD/zyx7lCXoZVlvQrj0hPTM1HI4+v1sIda2yDvg=
|
||||||
github.com/Microsoft/go-winio v0.6.0/go.mod h1:cTAf44im0RAYeL23bpB+fzCyDH2MJiz2BO69KH/soAE=
|
github.com/Microsoft/go-winio v0.6.0/go.mod h1:cTAf44im0RAYeL23bpB+fzCyDH2MJiz2BO69KH/soAE=
|
||||||
github.com/Microsoft/hcsshim v0.10.0-rc.4 h1:r/hSRKXFIdLYzZDbdieRxSY8T83Y0wBKNHZDKntNMSA=
|
github.com/Microsoft/hcsshim v0.10.0-rc.7 h1:HBytQPxcv8Oy4244zbQbe6hnOnx544eL5QPUqhJldz8=
|
||||||
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
|
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
|
||||||
github.com/PuerkitoBio/purell v1.1.1 h1:WEQqlqaGbrPkxLJWfBwQmfEAE1Z7ONdDLqrN38tNFfI=
|
|
||||||
github.com/PuerkitoBio/purell v1.1.1/go.mod h1:c11w/QuzBsJSee3cPx9rAFu61PvFxuPbtSwDGJws/X0=
|
|
||||||
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578 h1:d+Bc7a5rLufV/sSk/8dngufqelfh6jnri85riMAaF/M=
|
|
||||||
github.com/PuerkitoBio/urlesc v0.0.0-20170810143723-de5bf2ad4578/go.mod h1:uGdkoq3SwY9Y+13GIhn11/XLaGBb4BfwItxLd5jeuXE=
|
|
||||||
github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d h1:UrqY+r/OJnIp5u0s1SbQ8dVfLCZJsnvazdBP5hS4iRs=
|
github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d h1:UrqY+r/OJnIp5u0s1SbQ8dVfLCZJsnvazdBP5hS4iRs=
|
||||||
github.com/agext/levenshtein v1.2.1/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558=
|
github.com/agext/levenshtein v1.2.1/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558=
|
||||||
github.com/agext/levenshtein v1.2.3 h1:YB2fHEn0UJagG8T1rrWknE3ZQzWM06O8AMAatNn7lmo=
|
github.com/agext/levenshtein v1.2.3 h1:YB2fHEn0UJagG8T1rrWknE3ZQzWM06O8AMAatNn7lmo=
|
||||||
@ -125,8 +121,9 @@ github.com/cenkalti/backoff/v4 v4.2.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyY
|
|||||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||||
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
|
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
|
||||||
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||||
github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE=
|
|
||||||
github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||||
|
github.com/cespare/xxhash/v2 v2.2.0 h1:DC2CZ1Ep5Y4k3ZQ899DldepgrayRUGE6BBZ/cd9Cj44=
|
||||||
|
github.com/cespare/xxhash/v2 v2.2.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||||
github.com/checkpoint-restore/go-criu/v5 v5.3.0/go.mod h1:E/eQpaFtUKGOOSEBZgmKAcn+zUUwWxqcaKZlF54wK8E=
|
github.com/checkpoint-restore/go-criu/v5 v5.3.0/go.mod h1:E/eQpaFtUKGOOSEBZgmKAcn+zUUwWxqcaKZlF54wK8E=
|
||||||
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
|
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
|
||||||
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
|
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
|
||||||
@ -144,21 +141,21 @@ github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWH
|
|||||||
github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
|
||||||
github.com/compose-spec/compose-go v1.9.0 h1:oaewhNhUP/AClVs6ytHzcjw1xwK+2EMWuvHXj6tYvRc=
|
github.com/compose-spec/compose-go v1.9.0 h1:oaewhNhUP/AClVs6ytHzcjw1xwK+2EMWuvHXj6tYvRc=
|
||||||
github.com/compose-spec/compose-go v1.9.0/go.mod h1:Tb5Ae2PsYN3GTqYqzl2IRbTPiJtPZZjMw8UKUvmehFk=
|
github.com/compose-spec/compose-go v1.9.0/go.mod h1:Tb5Ae2PsYN3GTqYqzl2IRbTPiJtPZZjMw8UKUvmehFk=
|
||||||
github.com/containerd/cgroups v1.0.4 h1:jN/mbWBEaz+T1pi5OFtnkQ+8qnmEbAr1Oo1FRm5B0dA=
|
github.com/containerd/cgroups v1.1.0 h1:v8rEWFl6EoqHB+swVNjVoCJE8o3jX7e8nqBGPLaDFBM=
|
||||||
github.com/containerd/console v1.0.3 h1:lIr7SlA5PxZyMV30bDW0MGbiOPXwc63yRuCP0ARubLw=
|
github.com/containerd/console v1.0.3 h1:lIr7SlA5PxZyMV30bDW0MGbiOPXwc63yRuCP0ARubLw=
|
||||||
github.com/containerd/console v1.0.3/go.mod h1:7LqA/THxQ86k76b8c/EMSiaJ3h1eZkMkXar0TQ1gf3U=
|
github.com/containerd/console v1.0.3/go.mod h1:7LqA/THxQ86k76b8c/EMSiaJ3h1eZkMkXar0TQ1gf3U=
|
||||||
github.com/containerd/containerd v1.7.0-beta.3 h1:PR1VOuSEQFHmxJlu0VDNxNpbfDnndrYHlreiXtEPcjk=
|
github.com/containerd/containerd v1.7.0 h1:G/ZQr3gMZs6ZT0qPUZ15znx5QSdQdASW11nXTLTM2Pg=
|
||||||
github.com/containerd/containerd v1.7.0-beta.3/go.mod h1:FpdL1A1z4j6AtyXJ1CKFc+Z2AfynurGumKkYjIYJGH4=
|
github.com/containerd/containerd v1.7.0/go.mod h1:QfR7Efgb/6X2BDpTPJRvPTYDE9rsF0FsXX9J8sIs/sc=
|
||||||
github.com/containerd/continuity v0.3.0 h1:nisirsYROK15TAMVukJOUyGJjz4BNQJBVsNvAXZJ/eg=
|
github.com/containerd/continuity v0.3.0 h1:nisirsYROK15TAMVukJOUyGJjz4BNQJBVsNvAXZJ/eg=
|
||||||
github.com/containerd/continuity v0.3.0/go.mod h1:wJEAIwKOm/pBZuBd0JmeTvnLquTB1Ag8espWhkykbPM=
|
github.com/containerd/continuity v0.3.0/go.mod h1:wJEAIwKOm/pBZuBd0JmeTvnLquTB1Ag8espWhkykbPM=
|
||||||
github.com/containerd/fifo v1.0.0 h1:6PirWBr9/L7GDamKr+XM0IeUFXu5mf3M/BPpH9gaLBU=
|
github.com/containerd/fifo v1.1.0 h1:4I2mbh5stb1u6ycIABlBw9zgtlK8viPI9QkQNRQEEmY=
|
||||||
github.com/containerd/nydus-snapshotter v0.3.1 h1:b8WahTrPkt3XsabjG2o/leN4fw3HWZYr+qxo/Z8Mfzk=
|
github.com/containerd/nydus-snapshotter v0.3.1 h1:b8WahTrPkt3XsabjG2o/leN4fw3HWZYr+qxo/Z8Mfzk=
|
||||||
github.com/containerd/stargz-snapshotter v0.13.0 h1:3zr1/IkW1aEo6cMYTQeZ4L2jSuCN+F4kgGfjnuowe4U=
|
github.com/containerd/stargz-snapshotter v0.14.1 h1:M58AiJ+Kj50cabqYP1TpBPgUczKgn8zipmteC5FyjVs=
|
||||||
github.com/containerd/stargz-snapshotter/estargz v0.13.0 h1:fD7AwuVV+B40p0d9qVkH/Au1qhp8hn/HWJHIYjpEcfw=
|
github.com/containerd/stargz-snapshotter/estargz v0.14.1 h1:n9M2GDSWM96pyipFTA0DaU+zdtzi3Iwsnj/rIHr1yFM=
|
||||||
github.com/containerd/ttrpc v1.1.1-0.20220420014843-944ef4a40df3 h1:BhCp66ofL8oYcdelc3CBXc2/Pfvvgx+s+mrp9TvNgn8=
|
github.com/containerd/ttrpc v1.2.1 h1:VWv/Rzx023TBLv4WQ+9WPXlBG/s3rsRjY3i9AJ2BJdE=
|
||||||
github.com/containerd/ttrpc v1.1.1-0.20220420014843-944ef4a40df3/go.mod h1:YYyNVhZrTMiaf51Vj6WhAJqJw+vl/nzABhj8pWrzle4=
|
github.com/containerd/ttrpc v1.2.1/go.mod h1:sIT6l32Ph/H9cvnJsfXM5drIVzTr5A2flTf1G5tYZak=
|
||||||
github.com/containerd/typeurl v1.0.3-0.20220422153119-7f6e6d160d67 h1:rQvjv7gRi6Ki/NS/U9oLZFhqyk4dh/GH2M3o/4BRkMM=
|
github.com/containerd/typeurl/v2 v2.1.0 h1:yNAhJvbNEANt7ck48IlEGOxP7YAp6LLpGn5jZACDNIE=
|
||||||
github.com/containerd/typeurl v1.0.3-0.20220422153119-7f6e6d160d67/go.mod h1:HDkcKOXRnX6yKnXv3P0QrogFi0DoiauK/LpQi961f0A=
|
github.com/containerd/typeurl/v2 v2.1.0/go.mod h1:IDp2JFvbwZ31H8dQbEIY7sDl2L3o3HZj1hsSQlywkQ0=
|
||||||
github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
|
github.com/coreos/go-systemd/v22 v22.3.2/go.mod h1:Y58oyj3AT4RCenI/lSvhwexgC+NSVTIJ3seZv2GcEnc=
|
||||||
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
github.com/cpuguy83/go-md2man/v2 v2.0.0-20190314233015-f79a8a8ca69d/go.mod h1:maD7wRr/U5Z6m/iR4s+kqSMx2CaBsrgA7czyZG/E6dU=
|
||||||
github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
github.com/cpuguy83/go-md2man/v2 v2.0.2/go.mod h1:tgQtvFlXSQOSOSIRvRPT7W67SCa46tRHOmNcaadrF8o=
|
||||||
@ -171,14 +168,14 @@ github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c
|
|||||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
github.com/distribution/distribution/v3 v3.0.0-20221103125252-ebfa2a0ac0a9 h1:doprs/RuXCuN864IfxC3h2qocrt158wGv3A5mcqSZQw=
|
github.com/distribution/distribution/v3 v3.0.0-20221103125252-ebfa2a0ac0a9 h1:doprs/RuXCuN864IfxC3h2qocrt158wGv3A5mcqSZQw=
|
||||||
github.com/distribution/distribution/v3 v3.0.0-20221103125252-ebfa2a0ac0a9/go.mod h1:6rIc5NMSjXjjnwzWWy3HAm9gDBu+X7aCzL8VrHIKgxM=
|
github.com/distribution/distribution/v3 v3.0.0-20221103125252-ebfa2a0ac0a9/go.mod h1:6rIc5NMSjXjjnwzWWy3HAm9gDBu+X7aCzL8VrHIKgxM=
|
||||||
github.com/docker/cli v23.0.0+incompatible h1:bcM4syaQ+EM/iczJTimMOGzvnzJBFPFEf4acS7sZ+RM=
|
github.com/docker/cli v23.0.1+incompatible h1:LRyWITpGzl2C9e9uGxzisptnxAn1zfZKXy13Ul2Q5oM=
|
||||||
github.com/docker/cli v23.0.0+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
|
github.com/docker/cli v23.0.1+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
|
||||||
github.com/docker/cli-docs-tool v0.5.1 h1:jIk/cCZurZERhALPVKhqlNxTQGxn2kcI+56gE57PQXg=
|
github.com/docker/cli-docs-tool v0.5.1 h1:jIk/cCZurZERhALPVKhqlNxTQGxn2kcI+56gE57PQXg=
|
||||||
github.com/docker/cli-docs-tool v0.5.1/go.mod h1:zMjqTFCU361PRh8apiXzeAZ1Q/xupbIwTusYpzCXS/o=
|
github.com/docker/cli-docs-tool v0.5.1/go.mod h1:zMjqTFCU361PRh8apiXzeAZ1Q/xupbIwTusYpzCXS/o=
|
||||||
github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6bk8XXApsHjKukMl68=
|
github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6bk8XXApsHjKukMl68=
|
||||||
github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||||
github.com/docker/docker v23.0.0+incompatible h1:L6c28tNyqZ4/ub9AZC9d5QUuunoHHfEH4/Ue+h/E5nE=
|
github.com/docker/docker v23.0.1+incompatible h1:vjgvJZxprTTE1A37nm+CLNAdwu6xZekyoiVlUZEINcY=
|
||||||
github.com/docker/docker v23.0.0+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
github.com/docker/docker v23.0.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||||
github.com/docker/docker-credential-helpers v0.7.0 h1:xtCHsjxogADNZcdv1pKUHXryefjlVRqWqIhk/uXJp0A=
|
github.com/docker/docker-credential-helpers v0.7.0 h1:xtCHsjxogADNZcdv1pKUHXryefjlVRqWqIhk/uXJp0A=
|
||||||
github.com/docker/docker-credential-helpers v0.7.0/go.mod h1:rETQfLdHNT3foU5kuNkFR1R1V12OJRRO5lzt2D1b5X0=
|
github.com/docker/docker-credential-helpers v0.7.0/go.mod h1:rETQfLdHNT3foU5kuNkFR1R1V12OJRRO5lzt2D1b5X0=
|
||||||
github.com/docker/go v1.5.1-1.0.20160303222718-d30aec9fd63c h1:lzqkGL9b3znc+ZUgi7FlLnqjQhcXxkNM/quxIjBVMD0=
|
github.com/docker/go v1.5.1-1.0.20160303222718-d30aec9fd63c h1:lzqkGL9b3znc+ZUgi7FlLnqjQhcXxkNM/quxIjBVMD0=
|
||||||
@ -197,8 +194,8 @@ github.com/docopt/docopt-go v0.0.0-20180111231733-ee0de3bc6815/go.mod h1:WwZ+bS3
|
|||||||
github.com/elazarl/goproxy v0.0.0-20191011121108-aa519ddbe484 h1:pEtiCjIXx3RvGjlUJuCNxNOw0MNblyR9Wi+vJGBFh+8=
|
github.com/elazarl/goproxy v0.0.0-20191011121108-aa519ddbe484 h1:pEtiCjIXx3RvGjlUJuCNxNOw0MNblyR9Wi+vJGBFh+8=
|
||||||
github.com/elazarl/goproxy v0.0.0-20191011121108-aa519ddbe484/go.mod h1:Ro8st/ElPeALwNFlcTpWmkr6IoMFfkjXAvTHpevnDsM=
|
github.com/elazarl/goproxy v0.0.0-20191011121108-aa519ddbe484/go.mod h1:Ro8st/ElPeALwNFlcTpWmkr6IoMFfkjXAvTHpevnDsM=
|
||||||
github.com/elazarl/goproxy/ext v0.0.0-20190711103511-473e67f1d7d2/go.mod h1:gNh8nYJoAm43RfaxurUnxr+N1PwuFV3ZMl/efxlIlY8=
|
github.com/elazarl/goproxy/ext v0.0.0-20190711103511-473e67f1d7d2/go.mod h1:gNh8nYJoAm43RfaxurUnxr+N1PwuFV3ZMl/efxlIlY8=
|
||||||
github.com/emicklei/go-restful/v3 v3.8.0 h1:eCZ8ulSerjdAiaNpF7GxXIE7ZCMo1moN1qX+S609eVw=
|
github.com/emicklei/go-restful/v3 v3.10.1 h1:rc42Y5YTp7Am7CS630D7JmhRjq4UlEUuEKfrDac4bSQ=
|
||||||
github.com/emicklei/go-restful/v3 v3.8.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
|
github.com/emicklei/go-restful/v3 v3.10.1/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc=
|
||||||
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||||
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
|
||||||
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
|
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
|
||||||
@ -233,8 +230,8 @@ github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre
|
|||||||
github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
|
github.com/go-openapi/jsonpointer v0.19.3/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
|
||||||
github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY=
|
github.com/go-openapi/jsonpointer v0.19.5 h1:gZr+CIYByUqjcgeLXnQu2gHYQC9o73G2XUeOFYEICuY=
|
||||||
github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
|
github.com/go-openapi/jsonpointer v0.19.5/go.mod h1:Pl9vOtqEWErmShwVjC8pYs9cog34VGT37dQOVbmoatg=
|
||||||
github.com/go-openapi/jsonreference v0.19.5 h1:1WJP/wi4OjB4iV8KVbH73rQaoialJrqv8gitZLxGLtM=
|
github.com/go-openapi/jsonreference v0.20.0 h1:MYlu0sBgChmCfJxxUKZ8g1cPWFOB37YSZqewK7OKeyA=
|
||||||
github.com/go-openapi/jsonreference v0.19.5/go.mod h1:RdybgQwPxbL4UEjuAruzK1x3nE69AqPYEJeo/TWfEeg=
|
github.com/go-openapi/jsonreference v0.20.0/go.mod h1:Ag74Ico3lPc+zR+qjn4XBUmXymS4zJbYVCZmcgkasdo=
|
||||||
github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
|
github.com/go-openapi/swag v0.19.5/go.mod h1:POnQmlKehdgb5mhVOsnJFsivZCEZ/vjK9gh66Z9tfKk=
|
||||||
github.com/go-openapi/swag v0.19.14 h1:gm3vOOXfiuw5i9p5N9xJvfjvuofpyvLA9Wr6QfK5Fng=
|
github.com/go-openapi/swag v0.19.14 h1:gm3vOOXfiuw5i9p5N9xJvfjvuofpyvLA9Wr6QfK5Fng=
|
||||||
github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ=
|
github.com/go-openapi/swag v0.19.14/go.mod h1:QYRuS/SOXUCsnplDa677K7+DxSOj6IPNl/eQntq43wQ=
|
||||||
@ -335,8 +332,9 @@ github.com/gorilla/websocket v1.4.2/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/ad
|
|||||||
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw=
|
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0 h1:+9834+KizmvFV7pXQGSXQTsaWhq2GjuNUt0aUU0YBYw=
|
||||||
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y=
|
github.com/grpc-ecosystem/go-grpc-middleware v1.3.0/go.mod h1:z0ButlSOZa5vEBq9m2m2hlwIgKw+rp3sdCBRoJY+30Y=
|
||||||
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
|
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
|
||||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0 h1:BZHcxBETFHIdVyhyEfOvn/RdU/QGdLI4y34qQGjGWO0=
|
|
||||||
github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks=
|
github.com/grpc-ecosystem/grpc-gateway/v2 v2.7.0/go.mod h1:hgWBS7lorOAVIJEQMi4ZsPv9hVvWI6+ch50m39Pf2Ks=
|
||||||
|
github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3 h1:lLT7ZLSzGLI08vc9cpd+tYmNWjdKDqyr/2L+f6U12Fk=
|
||||||
|
github.com/grpc-ecosystem/grpc-gateway/v2 v2.11.3/go.mod h1:o//XUCC/F+yRGJoPO/VU0GSB0f8Nhgmxx0VIRUvaC0w=
|
||||||
github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed h1:5upAirOpQc1Q53c0bnx2ufif5kANL7bfZWcc6VJWJd8=
|
github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed h1:5upAirOpQc1Q53c0bnx2ufif5kANL7bfZWcc6VJWJd8=
|
||||||
github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4=
|
github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4=
|
||||||
github.com/hashicorp/go-cty-funcs v0.0.0-20200930094925-2721b1e36840 h1:kgvybwEeu0SXktbB2y3uLHX9lklLo+nzUwh59A3jzQc=
|
github.com/hashicorp/go-cty-funcs v0.0.0-20200930094925-2721b1e36840 h1:kgvybwEeu0SXktbB2y3uLHX9lklLo+nzUwh59A3jzQc=
|
||||||
@ -375,8 +373,8 @@ github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 h1:iQTw/8FWTuc7uia
|
|||||||
github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8=
|
github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0/go.mod h1:1NbS8ALrpOvjt0rHPNLyCIeMtbizbir8U//inJ+zuB8=
|
||||||
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
|
||||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||||
github.com/klauspost/compress v1.15.12 h1:YClS/PImqYbn+UILDnqxQCZ3RehC9N318SU3kElDUEM=
|
github.com/klauspost/compress v1.16.0 h1:iULayQNOReoYUe+1qtKOqw9CwJv3aNQu8ivo7lw1HU4=
|
||||||
github.com/klauspost/compress v1.15.12/go.mod h1:QPwzmACJjUTFsnSHH934V6woptycfrDDJnH7hvFVbGM=
|
github.com/klauspost/compress v1.16.0/go.mod h1:ntbaceVETuRiXiv4DpjP66DpAtAGkEQskQzEyD//IeE=
|
||||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||||
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
||||||
@ -408,8 +406,8 @@ github.com/mitchellh/go-wordwrap v0.0.0-20150314170334-ad45545899c7 h1:DpOJ2HYzC
|
|||||||
github.com/mitchellh/go-wordwrap v0.0.0-20150314170334-ad45545899c7/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo=
|
github.com/mitchellh/go-wordwrap v0.0.0-20150314170334-ad45545899c7/go.mod h1:ZXFpozHsX6DPmq2I0TCekCxypsnAUbP2oI0UX1GXzOo=
|
||||||
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
|
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
|
||||||
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||||
github.com/moby/buildkit v0.11.0-rc3.0.20230216100651-46e4e7e51b01 h1:RTdsd8sbZn5+1pG4SrUl3S1GNvEyzcvqm9hdok/4JAY=
|
github.com/moby/buildkit v0.11.0-rc3.0.20230330090027-8b7bcb900d3c h1:JZvvWzulcnA2G4c/gJiSIqKDUoBjctYw2WMuS+XJexU=
|
||||||
github.com/moby/buildkit v0.11.0-rc3.0.20230216100651-46e4e7e51b01/go.mod h1:bW7S/ewXItpf2ZfybG0sH8Lit4L3BxWFKv15eoAtw0U=
|
github.com/moby/buildkit v0.11.0-rc3.0.20230330090027-8b7bcb900d3c/go.mod h1:NehrLo0nsnhS/+X+XyhU4LNucb1ndYXgPBOx/JNWVDA=
|
||||||
github.com/moby/locker v1.0.1 h1:fOXqR41zeveg4fFODix+1Ch4mj/gT0NE1XJbp/epuBg=
|
github.com/moby/locker v1.0.1 h1:fOXqR41zeveg4fFODix+1Ch4mj/gT0NE1XJbp/epuBg=
|
||||||
github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc=
|
github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc=
|
||||||
github.com/moby/patternmatcher v0.5.0 h1:YCZgJOeULcxLw1Q+sVR636pmS7sPEn1Qo2iAN6M7DBo=
|
github.com/moby/patternmatcher v0.5.0 h1:YCZgJOeULcxLw1Q+sVR636pmS7sPEn1Qo2iAN6M7DBo=
|
||||||
@ -449,9 +447,9 @@ github.com/opencontainers/image-spec v1.1.0-rc2.0.20221005185240-3a7f492d3f1b/go
|
|||||||
github.com/opencontainers/runc v1.1.4 h1:nRCz/8sKg6K6jgYAFLDlXzPeITBZJyX28DBVhWD+5dg=
|
github.com/opencontainers/runc v1.1.4 h1:nRCz/8sKg6K6jgYAFLDlXzPeITBZJyX28DBVhWD+5dg=
|
||||||
github.com/opencontainers/runc v1.1.4/go.mod h1:1J5XiS+vdZ3wCyZybsuxXZWGrgSr8fFJHLXuG2PsnNg=
|
github.com/opencontainers/runc v1.1.4/go.mod h1:1J5XiS+vdZ3wCyZybsuxXZWGrgSr8fFJHLXuG2PsnNg=
|
||||||
github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
github.com/opencontainers/runtime-spec v1.0.3-0.20210326190908-1c3f411f0417/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
||||||
github.com/opencontainers/runtime-spec v1.0.3-0.20220825212826-86290f6a00fb h1:1xSVPOd7/UA+39/hXEGnBJ13p6JFB0E1EvQFlrRDOXI=
|
github.com/opencontainers/runtime-spec v1.1.0-rc.1 h1:wHa9jroFfKGQqFHj0I1fMRKLl0pfj+ynAqBxo3v6u9w=
|
||||||
github.com/opencontainers/selinux v1.10.0/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI=
|
github.com/opencontainers/selinux v1.10.0/go.mod h1:2i0OySw99QjzBBQByd1Gr9gSjvuho1lHsJxIJ3gGbJI=
|
||||||
github.com/opencontainers/selinux v1.10.2 h1:NFy2xCsjn7+WspbfZkUd5zyVeisV7VFbPSP96+8/ha4=
|
github.com/opencontainers/selinux v1.11.0 h1:+5Zbo97w3Lbmb3PeqQtpmTkMwsW5nRI3YaLpt7tQ7oU=
|
||||||
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
|
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
|
||||||
github.com/package-url/packageurl-go v0.1.1-0.20220428063043-89078438f170 h1:DiLBVp4DAcZlBVBEtJpNWZpZVq0AEeCY7Hqk8URVs4o=
|
github.com/package-url/packageurl-go v0.1.1-0.20220428063043-89078438f170 h1:DiLBVp4DAcZlBVBEtJpNWZpZVq0AEeCY7Hqk8URVs4o=
|
||||||
github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8=
|
github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8=
|
||||||
@ -541,8 +539,8 @@ github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/
|
|||||||
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||||
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
|
||||||
github.com/stretchr/testify v1.8.1 h1:w7B6lhMri9wdJUVmEZPGGhZzrYTPvgJArz7wNPgYKsk=
|
github.com/stretchr/testify v1.8.2 h1:+h33VjcLVPDHtOdpUCuF+7gSuG3yGIftsP1YvFihtJ8=
|
||||||
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
github.com/stretchr/testify v1.8.2/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
|
||||||
github.com/subosito/gotenv v1.4.1 h1:jyEFiXpy21Wm81FBN71l9VoMMV8H8jG+qIK3GCpY6Qs=
|
github.com/subosito/gotenv v1.4.1 h1:jyEFiXpy21Wm81FBN71l9VoMMV8H8jG+qIK3GCpY6Qs=
|
||||||
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
|
github.com/syndtr/gocapability v0.0.0-20200815063812-42c35b437635/go.mod h1:hkRG7XYTFWNJGYcbNJQlaLq0fg1yr4J4t/NcTQtrfww=
|
||||||
github.com/theupdateframework/notary v0.6.1 h1:7wshjstgS9x9F5LuB1L5mBI2xNMObWqjz+cjWoom6l0=
|
github.com/theupdateframework/notary v0.6.1 h1:7wshjstgS9x9F5LuB1L5mBI2xNMObWqjz+cjWoom6l0=
|
||||||
@ -580,33 +578,33 @@ go.opencensus.io v0.22.2/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
|||||||
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
go.opencensus.io v0.22.3/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||||
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
||||||
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
|
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
|
||||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.37.0 h1:+uFejS4DCfNH6d3xODVIGsdhzgzhh45p9gpbHQMbdZI=
|
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.40.0 h1:5jD3teb4Qh7mx/nfzq4jO2WFFpvXD0vYWFDrdvNWmXk=
|
||||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.37.0/go.mod h1:HSmzQvagH8pS2/xrK7ScWsk0vAMtRTGbMFgInXCi8Tc=
|
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.40.0/go.mod h1:UMklln0+MRhZC4e3PwmN3pCtq4DyIadWw4yikh6bNrw=
|
||||||
go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.37.0 h1:H0wsFGpY3uD/zB/5UubZgkgnd378/ogV9BH2itqEFbc=
|
go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.40.0 h1:ZjF6qLnAVNq6xUh0sK2mCEqwnRrpgr0mLALQXJL34NI=
|
||||||
go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.37.0/go.mod h1:xXATK4LOREcHuSE4sWsK1VO7FUxa6L58rAORHFTdhAI=
|
go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace v0.40.0/go.mod h1:SD34NWTW0VMH2VvFVfArHPoF+L1ddT4MOQCTb2l8T5I=
|
||||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.37.0 h1:yt2NKzK7Vyo6h0+X8BA4FpreZQTlVEIarnsBP/H5mzs=
|
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.40.0 h1:lE9EJyw3/JhrjWH/hEy9FptnalDQgj7vpbgC2KCCCxE=
|
||||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.37.0/go.mod h1:+ARmXlUlc51J7sZeCBkBJNdHGySrdOzgzxp6VWRWM1U=
|
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.40.0/go.mod h1:pcQ3MM3SWvrA71U4GDqv9UFDJ3HQsW7y5ZO3tDTlUdI=
|
||||||
go.opentelemetry.io/otel v1.11.2 h1:YBZcQlsVekzFsFbjygXMOXSs6pialIZxcjfO/mBDmR0=
|
go.opentelemetry.io/otel v1.14.0 h1:/79Huy8wbf5DnIPhemGB+zEPVwnN6fuQybr/SRXa6hM=
|
||||||
go.opentelemetry.io/otel v1.11.2/go.mod h1:7p4EUV+AqgdlNV9gL97IgUZiVR3yrFXYo53f9BM3tRI=
|
go.opentelemetry.io/otel v1.14.0/go.mod h1:o4buv+dJzx8rohcUeRmWUZhqupFvzWis188WlggnNeU=
|
||||||
go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.11.2 h1:htgM8vZIF8oPSCxa341e3IZ4yr/sKxgu8KZYllByiVY=
|
go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.14.0 h1:/fXHZHGvro6MVqV34fJzDhi7sHGpX3Ej/Qjmfn003ho=
|
||||||
go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.11.2/go.mod h1:rqbht/LlhVBgn5+k3M5QK96K5Xb0DvXpMJ5SFQpY6uw=
|
go.opentelemetry.io/otel/exporters/otlp/internal/retry v1.14.0/go.mod h1:UFG7EBMRdXyFstOwH028U0sVf+AvukSGhF0g8+dmNG8=
|
||||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.11.2 h1:fqR1kli93643au1RKo0Uma3d2aPQKT+WBKfTSBaKbOc=
|
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.14.0 h1:TKf2uAs2ueguzLaxOCBXNpHxfO/aC7PAdDsSH0IbeRQ=
|
||||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.11.2/go.mod h1:5Qn6qvgkMsLDX+sYK64rHb1FPhpn0UtxF+ouX1uhyJE=
|
go.opentelemetry.io/otel/exporters/otlp/otlptrace v1.14.0/go.mod h1:HrbCVv40OOLTABmOn1ZWty6CHXkU8DK/Urc43tHug70=
|
||||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.11.2 h1:ERwKPn9Aer7Gxsc0+ZlutlH1bEEAUXAUhqm3Y45ABbk=
|
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.14.0 h1:ap+y8RXX3Mu9apKVtOkM6WSFESLM8K3wNQyOU8sWHcc=
|
||||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.11.2/go.mod h1:jWZUM2MWhWCJ9J9xVbRx7tzK1mXKpAlze4CeulycwVY=
|
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracegrpc v1.14.0/go.mod h1:5w41DY6S9gZrbjuq6Y+753e96WfPha5IcsOSZTtullM=
|
||||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.11.2 h1:Us8tbCmuN16zAnK5TC69AtODLycKbwnskQzaB6DfFhc=
|
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.14.0 h1:3jAYbRHQAqzLjd9I4tzxwJ8Pk/N6AqBcF6m1ZHrxG94=
|
||||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.11.2/go.mod h1:GZWSQQky8AgdJj50r1KJm8oiQiIPaAX7uZCFQX9GzC8=
|
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.14.0/go.mod h1:+N7zNjIJv4K+DeX67XXET0P+eIciESgaFDBqh+ZJFS4=
|
||||||
go.opentelemetry.io/otel/metric v0.34.0 h1:MCPoQxcg/26EuuJwpYN1mZTeCYAUGx8ABxfW07YkjP8=
|
go.opentelemetry.io/otel/metric v0.37.0 h1:pHDQuLQOZwYD+Km0eb657A25NaRzy0a+eLyKfDXedEs=
|
||||||
go.opentelemetry.io/otel/metric v0.34.0/go.mod h1:ZFuI4yQGNCupurTXCwkeD/zHBt+C2bR7bw5JqUm/AP8=
|
go.opentelemetry.io/otel/metric v0.37.0/go.mod h1:DmdaHfGt54iV6UKxsV9slj2bBRJcKC1B1uvDLIioc1s=
|
||||||
go.opentelemetry.io/otel/sdk v1.11.2 h1:GF4JoaEx7iihdMFu30sOyRx52HDHOkl9xQ8SMqNXUiU=
|
go.opentelemetry.io/otel/sdk v1.14.0 h1:PDCppFRDq8A1jL9v6KMI6dYesaq+DFcDZvjsoGvxGzY=
|
||||||
go.opentelemetry.io/otel/sdk v1.11.2/go.mod h1:wZ1WxImwpq+lVRo4vsmSOxdd+xwoUJ6rqyLc3SyX9aU=
|
go.opentelemetry.io/otel/sdk v1.14.0/go.mod h1:bwIC5TjrNG6QDCHNWvW4HLHtUQ4I+VQDsnjhvyZCALM=
|
||||||
go.opentelemetry.io/otel/trace v1.11.2 h1:Xf7hWSF2Glv0DE3MH7fBHvtpSBsjcBUe5MYAmZM/+y0=
|
go.opentelemetry.io/otel/trace v1.14.0 h1:wp2Mmvj41tDsyAJXiWDWpfNsOiIyd38fy85pyKcFq/M=
|
||||||
go.opentelemetry.io/otel/trace v1.11.2/go.mod h1:4N+yC7QEz7TTsG9BSRLNAa63eg5E06ObSbKPmxQ/pKA=
|
go.opentelemetry.io/otel/trace v1.14.0/go.mod h1:8avnQLK+CG77yNLUae4ea2JDQ6iT+gozhnZjy/rw9G8=
|
||||||
go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
|
go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
|
||||||
go.opentelemetry.io/proto/otlp v0.19.0 h1:IVN6GR+mhC4s5yfcTbmzHYODqvWAp3ZedA2SJPI1Nnw=
|
go.opentelemetry.io/proto/otlp v0.19.0 h1:IVN6GR+mhC4s5yfcTbmzHYODqvWAp3ZedA2SJPI1Nnw=
|
||||||
go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U=
|
go.opentelemetry.io/proto/otlp v0.19.0/go.mod h1:H7XAot3MsfNsj7EXtrA2q5xSNQ10UqI405h3+duxN4U=
|
||||||
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||||
go.uber.org/goleak v1.2.0 h1:xqgm/S+aQvhWFTtR0XK3Jvg7z8kGV8P4X14IzwN3Eqk=
|
go.uber.org/goleak v1.2.1 h1:NBol2c7O1ZokfZ0LEU9K6Whx/KnwvepVetCUhtKja4A=
|
||||||
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
|
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
|
||||||
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
||||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||||
@ -651,8 +649,8 @@ golang.org/x/mod v0.1.1-0.20191105210325-c90efee705ee/go.mod h1:QqPTAvyqsEbceGzB
|
|||||||
golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
golang.org/x/mod v0.1.1-0.20191107180719-034126e5016b/go.mod h1:QqPTAvyqsEbceGzBzNggFXnrqF1CaUcvgkdR5Ot7KZg=
|
||||||
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||||
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
||||||
golang.org/x/mod v0.6.0 h1:b9gGHsz9/HhJ3HF5DHQytPpuwocVTChQJK3AvoLRD5I=
|
golang.org/x/mod v0.7.0 h1:LapD9S96VoQRhi/GrNTqeBJFrUjs5UHCAtTlgwA5oZA=
|
||||||
golang.org/x/mod v0.6.0/go.mod h1:4mET923SAdbXp2ki8ey+zGs1SLqsuM2Y0uvdZR/fUNI=
|
golang.org/x/mod v0.7.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
|
||||||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
golang.org/x/net v0.0.0-20180811021610-c39426892332/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
golang.org/x/net v0.0.0-20180811021610-c39426892332/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||||
@ -668,7 +666,6 @@ golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLL
|
|||||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
golang.org/x/net v0.0.0-20190827160401-ba9fcec4b297/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
|
||||||
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
golang.org/x/net v0.0.0-20191209160850-c0dbc17a3553/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||||
@ -701,8 +698,8 @@ golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d/go.mod h1:gOpvHmFTYa4Iltr
|
|||||||
golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||||
golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
|
||||||
golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
|
golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
|
||||||
golang.org/x/oauth2 v0.1.0 h1:isLCZuhj4v+tYv7eskaN4v/TM+A1begWWgyVJDdl1+Y=
|
golang.org/x/oauth2 v0.4.0 h1:NF0gk8LVPg1Ml7SSbGyySuoxdsXitj7TvgvuRxIMc/M=
|
||||||
golang.org/x/oauth2 v0.1.0/go.mod h1:G9FE4dLTsbXUu90h/Pf85g4w1D+SSAgR+q46nJZ8M4A=
|
golang.org/x/oauth2 v0.4.0/go.mod h1:RznEsdpjGAINPTOF0UH/t+xJ75L18YO3Ho6Pyn+uRec=
|
||||||
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
@ -767,8 +764,9 @@ golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBc
|
|||||||
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.5.0 h1:MUK/U/4lj1t1oPg0HfuXDN/Z1wv31ZJ/YcPiGccS4DU=
|
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.6.0 h1:MVltZSvRTcU2ljQOhs94SXPftV6DCNnZViHeQps87pQ=
|
||||||
|
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||||
golang.org/x/term v0.5.0 h1:n2a8QNdAb0sZNpU9R1ALUXBbY+w51fCQDN+7EdxNBsY=
|
golang.org/x/term v0.5.0 h1:n2a8QNdAb0sZNpU9R1ALUXBbY+w51fCQDN+7EdxNBsY=
|
||||||
@ -830,8 +828,8 @@ golang.org/x/tools v0.0.0-20200729194436-6467de6f59a7/go.mod h1:njjCfa9FT2d7l9Bc
|
|||||||
golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
|
golang.org/x/tools v0.0.0-20200804011535-6c149bb5ef0d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
|
||||||
golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
|
golang.org/x/tools v0.0.0-20200825202427-b303f430e36d/go.mod h1:njjCfa9FT2d7l9Bc6FUM5FLjQPp3cFF28FI3qnDFljA=
|
||||||
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
|
||||||
golang.org/x/tools v0.2.0 h1:G6AHpWxTMGY1KyEYoAQ5WTtIekUUvDNjan3ugu60JvE=
|
golang.org/x/tools v0.5.0 h1:+bSpV5HIeWkuvgaMfI3UmKRThoTA5ODJTUd8T17NO+4=
|
||||||
golang.org/x/tools v0.2.0/go.mod h1:y4OqIKeOV/fWJetJ8bXPU1sEVniLMIyDAZWeHdV+NTA=
|
golang.org/x/tools v0.5.0/go.mod h1:N+Kgy78s5I24c24dU8OfWNEotWjutIs8SnJvn5IDq+k=
|
||||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||||
@ -893,8 +891,8 @@ google.golang.org/genproto v0.0.0-20200804131852-c06518451d9c/go.mod h1:FWY/as6D
|
|||||||
google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
google.golang.org/genproto v0.0.0-20200825200019-8632dd797987/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||||
google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
|
||||||
google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
|
||||||
google.golang.org/genproto v0.0.0-20221206210731-b1a01be3a5f6 h1:AGXp12e/9rItf6/4QymU7WsAUwCf+ICW75cuR91nJIc=
|
google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4 h1:DdoeryqhaXp1LtT/emMP1BRJPHHKFi5akj/nbx/zNTA=
|
||||||
google.golang.org/genproto v0.0.0-20221206210731-b1a01be3a5f6/go.mod h1:1dOng4TWOomJrDGhpXjfCD35wQC6jnC7HpRmOFRqEV0=
|
google.golang.org/genproto v0.0.0-20230306155012-7f2fa6fef1f4/go.mod h1:NWraEVixdDnqcqQ30jipen1STv2r/n24Wb7twVTGR4s=
|
||||||
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
|
||||||
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
|
||||||
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
|
||||||
@ -911,8 +909,8 @@ google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTp
|
|||||||
google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
|
google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
|
||||||
google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
|
google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
|
||||||
google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
|
google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
|
||||||
google.golang.org/grpc v1.51.0 h1:E1eGv1FTqoLIdnBCZufiSHgKjlqG6fKFf6pPWtMTh8U=
|
google.golang.org/grpc v1.53.0 h1:LAv2ds7cmFV/XTS3XG1NneeENYrXGmorPxsBbptIjNc=
|
||||||
google.golang.org/grpc v1.51.0/go.mod h1:wgNDFcnuBGmxLKI/qn4T+m5BtEBYXJPvibbUPsAIPww=
|
google.golang.org/grpc v1.53.0/go.mod h1:OnIrk0ipVdj4N5d9IUoFUx72/VlD7+jUsHwZgwSMQpw=
|
||||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||||
@ -975,12 +973,12 @@ k8s.io/apiserver v0.25.4 h1:/3TwZcgLqX7wUxq7TtXOUqXeBTwXIblVMQdhR5XZ7yo=
|
|||||||
k8s.io/apiserver v0.25.4/go.mod h1:rPcm567XxjOnnd7jedDUnGJGmDGAo+cT6H7QHAN+xV0=
|
k8s.io/apiserver v0.25.4/go.mod h1:rPcm567XxjOnnd7jedDUnGJGmDGAo+cT6H7QHAN+xV0=
|
||||||
k8s.io/client-go v0.25.4 h1:3RNRDffAkNU56M/a7gUfXaEzdhZlYhoW8dgViGy5fn8=
|
k8s.io/client-go v0.25.4 h1:3RNRDffAkNU56M/a7gUfXaEzdhZlYhoW8dgViGy5fn8=
|
||||||
k8s.io/client-go v0.25.4/go.mod h1:8trHCAC83XKY0wsBIpbirZU4NTUpbuhc2JnI7OruGZw=
|
k8s.io/client-go v0.25.4/go.mod h1:8trHCAC83XKY0wsBIpbirZU4NTUpbuhc2JnI7OruGZw=
|
||||||
k8s.io/klog/v2 v2.80.1 h1:atnLQ121W371wYYFawwYx1aEY2eUfs4l3J72wtgAwV4=
|
k8s.io/klog/v2 v2.90.1 h1:m4bYOKall2MmOiRaR1J+We67Do7vm9KiQVlT96lnHUw=
|
||||||
k8s.io/klog/v2 v2.80.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
|
k8s.io/klog/v2 v2.90.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0=
|
||||||
k8s.io/kube-openapi v0.0.0-20220803162953-67bda5d908f1 h1:MQ8BAZPZlWk3S9K4a9NCkIFQtZShWqoha7snGixVgEA=
|
k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280 h1:+70TFaan3hfJzs+7VK2o+OGxg8HsuBr/5f6tVAjDu6E=
|
||||||
k8s.io/kube-openapi v0.0.0-20220803162953-67bda5d908f1/go.mod h1:C/N6wCaBHeBHkHUesQOQy2/MZqGgMAFPqGsGQLdbZBU=
|
k8s.io/kube-openapi v0.0.0-20221012153701-172d655c2280/go.mod h1:+Axhij7bCpeqhklhUTe3xmOn6bWxolyZEeyaFpjGtl4=
|
||||||
k8s.io/utils v0.0.0-20221108210102-8e77b1f39fe2 h1:GfD9OzL11kvZN5iArC6oTS7RTj7oJOIfnislxYlqTj8=
|
k8s.io/utils v0.0.0-20230220204549-a5ecb0141aa5 h1:kmDqav+P+/5e1i9tFfHq1qcF3sOrDp+YEkVDAHu7Jwk=
|
||||||
k8s.io/utils v0.0.0-20221108210102-8e77b1f39fe2/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
k8s.io/utils v0.0.0-20230220204549-a5ecb0141aa5/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
||||||
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
|
rsc.io/binaryregexp v0.2.0/go.mod h1:qTv7/COck+e2FymRvadv62gMdZztPaShugOCi3I+8D8=
|
||||||
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
|
rsc.io/quote/v3 v3.1.0/go.mod h1:yEA65RcK8LyAZtP9Kv3t0HmxON59tX3rD+tICJqUlj0=
|
||||||
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
|
rsc.io/sampler v1.3.0/go.mod h1:T1hPZKmBbMNahiBKFy5HrXp6adAjACjK9JXDnKaTXpA=
|
||||||
|
2
vendor/cloud.google.com/go/compute/internal/version.go
generated
vendored
2
vendor/cloud.google.com/go/compute/internal/version.go
generated
vendored
@ -15,4 +15,4 @@
|
|||||||
package internal
|
package internal
|
||||||
|
|
||||||
// Version is the current tagged release of the library.
|
// Version is the current tagged release of the library.
|
||||||
const Version = "1.14.0"
|
const Version = "1.18.0"
|
||||||
|
14
vendor/cloud.google.com/go/compute/metadata/CHANGES.md
generated
vendored
14
vendor/cloud.google.com/go/compute/metadata/CHANGES.md
generated
vendored
@ -1,5 +1,19 @@
|
|||||||
# Changes
|
# Changes
|
||||||
|
|
||||||
|
## [0.2.3](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.2.2...compute/metadata/v0.2.3) (2022-12-15)
|
||||||
|
|
||||||
|
|
||||||
|
### Bug Fixes
|
||||||
|
|
||||||
|
* **compute/metadata:** Switch DNS lookup to an absolute lookup ([119b410](https://github.com/googleapis/google-cloud-go/commit/119b41060c7895e45e48aee5621ad35607c4d021)), refs [#7165](https://github.com/googleapis/google-cloud-go/issues/7165)
|
||||||
|
|
||||||
|
## [0.2.2](https://github.com/googleapis/google-cloud-go/compare/compute/metadata/v0.2.1...compute/metadata/v0.2.2) (2022-12-01)
|
||||||
|
|
||||||
|
|
||||||
|
### Bug Fixes
|
||||||
|
|
||||||
|
* **compute/metadata:** Set IdleConnTimeout for http.Client ([#7084](https://github.com/googleapis/google-cloud-go/issues/7084)) ([766516a](https://github.com/googleapis/google-cloud-go/commit/766516aaf3816bfb3159efeea65aa3d1d205a3e2)), refs [#5430](https://github.com/googleapis/google-cloud-go/issues/5430)
|
||||||
|
|
||||||
## [0.1.0] (2022-10-26)
|
## [0.1.0] (2022-10-26)
|
||||||
|
|
||||||
Initial release of metadata being it's own module.
|
Initial release of metadata being it's own module.
|
||||||
|
3
vendor/cloud.google.com/go/compute/metadata/metadata.go
generated
vendored
3
vendor/cloud.google.com/go/compute/metadata/metadata.go
generated
vendored
@ -70,6 +70,7 @@ func newDefaultHTTPClient() *http.Client {
|
|||||||
Timeout: 2 * time.Second,
|
Timeout: 2 * time.Second,
|
||||||
KeepAlive: 30 * time.Second,
|
KeepAlive: 30 * time.Second,
|
||||||
}).Dial,
|
}).Dial,
|
||||||
|
IdleConnTimeout: 60 * time.Second,
|
||||||
},
|
},
|
||||||
Timeout: 5 * time.Second,
|
Timeout: 5 * time.Second,
|
||||||
}
|
}
|
||||||
@ -146,7 +147,7 @@ func testOnGCE() bool {
|
|||||||
|
|
||||||
go func() {
|
go func() {
|
||||||
resolver := &net.Resolver{}
|
resolver := &net.Resolver{}
|
||||||
addrs, err := resolver.LookupHost(ctx, "metadata.google.internal")
|
addrs, err := resolver.LookupHost(ctx, "metadata.google.internal.")
|
||||||
if err != nil || len(addrs) == 0 {
|
if err != nil || len(addrs) == 0 {
|
||||||
resc <- false
|
resc <- false
|
||||||
return
|
return
|
||||||
|
5
vendor/github.com/PuerkitoBio/purell/.gitignore
generated
vendored
5
vendor/github.com/PuerkitoBio/purell/.gitignore
generated
vendored
@ -1,5 +0,0 @@
|
|||||||
*.sublime-*
|
|
||||||
.DS_Store
|
|
||||||
*.swp
|
|
||||||
*.swo
|
|
||||||
tags
|
|
12
vendor/github.com/PuerkitoBio/purell/.travis.yml
generated
vendored
12
vendor/github.com/PuerkitoBio/purell/.travis.yml
generated
vendored
@ -1,12 +0,0 @@
|
|||||||
language: go
|
|
||||||
|
|
||||||
go:
|
|
||||||
- 1.4.x
|
|
||||||
- 1.5.x
|
|
||||||
- 1.6.x
|
|
||||||
- 1.7.x
|
|
||||||
- 1.8.x
|
|
||||||
- 1.9.x
|
|
||||||
- "1.10.x"
|
|
||||||
- "1.11.x"
|
|
||||||
- tip
|
|
12
vendor/github.com/PuerkitoBio/purell/LICENSE
generated
vendored
12
vendor/github.com/PuerkitoBio/purell/LICENSE
generated
vendored
@ -1,12 +0,0 @@
|
|||||||
Copyright (c) 2012, Martin Angers
|
|
||||||
All rights reserved.
|
|
||||||
|
|
||||||
Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
|
|
||||||
|
|
||||||
* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
|
|
||||||
|
|
||||||
* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
|
|
||||||
|
|
||||||
* Neither the name of the author nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
|
|
||||||
|
|
||||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
188
vendor/github.com/PuerkitoBio/purell/README.md
generated
vendored
188
vendor/github.com/PuerkitoBio/purell/README.md
generated
vendored
@ -1,188 +0,0 @@
|
|||||||
# Purell
|
|
||||||
|
|
||||||
Purell is a tiny Go library to normalize URLs. It returns a pure URL. Pure-ell. Sanitizer and all. Yeah, I know...
|
|
||||||
|
|
||||||
Based on the [wikipedia paper][wiki] and the [RFC 3986 document][rfc].
|
|
||||||
|
|
||||||
[](http://travis-ci.org/PuerkitoBio/purell)
|
|
||||||
|
|
||||||
## Install
|
|
||||||
|
|
||||||
`go get github.com/PuerkitoBio/purell`
|
|
||||||
|
|
||||||
## Changelog
|
|
||||||
|
|
||||||
* **v1.1.1** : Fix failing test due to Go1.12 changes (thanks to @ianlancetaylor).
|
|
||||||
* **2016-11-14 (v1.1.0)** : IDN: Conform to RFC 5895: Fold character width (thanks to @beeker1121).
|
|
||||||
* **2016-07-27 (v1.0.0)** : Normalize IDN to ASCII (thanks to @zenovich).
|
|
||||||
* **2015-02-08** : Add fix for relative paths issue ([PR #5][pr5]) and add fix for unnecessary encoding of reserved characters ([see issue #7][iss7]).
|
|
||||||
* **v0.2.0** : Add benchmarks, Attempt IDN support.
|
|
||||||
* **v0.1.0** : Initial release.
|
|
||||||
|
|
||||||
## Examples
|
|
||||||
|
|
||||||
From `example_test.go` (note that in your code, you would import "github.com/PuerkitoBio/purell", and would prefix references to its methods and constants with "purell."):
|
|
||||||
|
|
||||||
```go
|
|
||||||
package purell
|
|
||||||
|
|
||||||
import (
|
|
||||||
"fmt"
|
|
||||||
"net/url"
|
|
||||||
)
|
|
||||||
|
|
||||||
func ExampleNormalizeURLString() {
|
|
||||||
if normalized, err := NormalizeURLString("hTTp://someWEBsite.com:80/Amazing%3f/url/",
|
|
||||||
FlagLowercaseScheme|FlagLowercaseHost|FlagUppercaseEscapes); err != nil {
|
|
||||||
panic(err)
|
|
||||||
} else {
|
|
||||||
fmt.Print(normalized)
|
|
||||||
}
|
|
||||||
// Output: http://somewebsite.com:80/Amazing%3F/url/
|
|
||||||
}
|
|
||||||
|
|
||||||
func ExampleMustNormalizeURLString() {
|
|
||||||
normalized := MustNormalizeURLString("hTTpS://someWEBsite.com:443/Amazing%fa/url/",
|
|
||||||
FlagsUnsafeGreedy)
|
|
||||||
fmt.Print(normalized)
|
|
||||||
|
|
||||||
// Output: http://somewebsite.com/Amazing%FA/url
|
|
||||||
}
|
|
||||||
|
|
||||||
func ExampleNormalizeURL() {
|
|
||||||
if u, err := url.Parse("Http://SomeUrl.com:8080/a/b/.././c///g?c=3&a=1&b=9&c=0#target"); err != nil {
|
|
||||||
panic(err)
|
|
||||||
} else {
|
|
||||||
normalized := NormalizeURL(u, FlagsUsuallySafeGreedy|FlagRemoveDuplicateSlashes|FlagRemoveFragment)
|
|
||||||
fmt.Print(normalized)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Output: http://someurl.com:8080/a/c/g?c=3&a=1&b=9&c=0
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
## API
|
|
||||||
|
|
||||||
As seen in the examples above, purell offers three methods, `NormalizeURLString(string, NormalizationFlags) (string, error)`, `MustNormalizeURLString(string, NormalizationFlags) (string)` and `NormalizeURL(*url.URL, NormalizationFlags) (string)`. They all normalize the provided URL based on the specified flags. Here are the available flags:
|
|
||||||
|
|
||||||
```go
|
|
||||||
const (
|
|
||||||
// Safe normalizations
|
|
||||||
FlagLowercaseScheme NormalizationFlags = 1 << iota // HTTP://host -> http://host, applied by default in Go1.1
|
|
||||||
FlagLowercaseHost // http://HOST -> http://host
|
|
||||||
FlagUppercaseEscapes // http://host/t%ef -> http://host/t%EF
|
|
||||||
FlagDecodeUnnecessaryEscapes // http://host/t%41 -> http://host/tA
|
|
||||||
FlagEncodeNecessaryEscapes // http://host/!"#$ -> http://host/%21%22#$
|
|
||||||
FlagRemoveDefaultPort // http://host:80 -> http://host
|
|
||||||
FlagRemoveEmptyQuerySeparator // http://host/path? -> http://host/path
|
|
||||||
|
|
||||||
// Usually safe normalizations
|
|
||||||
FlagRemoveTrailingSlash // http://host/path/ -> http://host/path
|
|
||||||
FlagAddTrailingSlash // http://host/path -> http://host/path/ (should choose only one of these add/remove trailing slash flags)
|
|
||||||
FlagRemoveDotSegments // http://host/path/./a/b/../c -> http://host/path/a/c
|
|
||||||
|
|
||||||
// Unsafe normalizations
|
|
||||||
FlagRemoveDirectoryIndex // http://host/path/index.html -> http://host/path/
|
|
||||||
FlagRemoveFragment // http://host/path#fragment -> http://host/path
|
|
||||||
FlagForceHTTP // https://host -> http://host
|
|
||||||
FlagRemoveDuplicateSlashes // http://host/path//a///b -> http://host/path/a/b
|
|
||||||
FlagRemoveWWW // http://www.host/ -> http://host/
|
|
||||||
FlagAddWWW // http://host/ -> http://www.host/ (should choose only one of these add/remove WWW flags)
|
|
||||||
FlagSortQuery // http://host/path?c=3&b=2&a=1&b=1 -> http://host/path?a=1&b=1&b=2&c=3
|
|
||||||
|
|
||||||
// Normalizations not in the wikipedia article, required to cover tests cases
|
|
||||||
// submitted by jehiah
|
|
||||||
FlagDecodeDWORDHost // http://1113982867 -> http://66.102.7.147
|
|
||||||
FlagDecodeOctalHost // http://0102.0146.07.0223 -> http://66.102.7.147
|
|
||||||
FlagDecodeHexHost // http://0x42660793 -> http://66.102.7.147
|
|
||||||
FlagRemoveUnnecessaryHostDots // http://.host../path -> http://host/path
|
|
||||||
FlagRemoveEmptyPortSeparator // http://host:/path -> http://host/path
|
|
||||||
|
|
||||||
// Convenience set of safe normalizations
|
|
||||||
FlagsSafe NormalizationFlags = FlagLowercaseHost | FlagLowercaseScheme | FlagUppercaseEscapes | FlagDecodeUnnecessaryEscapes | FlagEncodeNecessaryEscapes | FlagRemoveDefaultPort | FlagRemoveEmptyQuerySeparator
|
|
||||||
|
|
||||||
// For convenience sets, "greedy" uses the "remove trailing slash" and "remove www. prefix" flags,
|
|
||||||
// while "non-greedy" uses the "add (or keep) the trailing slash" and "add www. prefix".
|
|
||||||
|
|
||||||
// Convenience set of usually safe normalizations (includes FlagsSafe)
|
|
||||||
FlagsUsuallySafeGreedy NormalizationFlags = FlagsSafe | FlagRemoveTrailingSlash | FlagRemoveDotSegments
|
|
||||||
FlagsUsuallySafeNonGreedy NormalizationFlags = FlagsSafe | FlagAddTrailingSlash | FlagRemoveDotSegments
|
|
||||||
|
|
||||||
// Convenience set of unsafe normalizations (includes FlagsUsuallySafe)
|
|
||||||
FlagsUnsafeGreedy NormalizationFlags = FlagsUsuallySafeGreedy | FlagRemoveDirectoryIndex | FlagRemoveFragment | FlagForceHTTP | FlagRemoveDuplicateSlashes | FlagRemoveWWW | FlagSortQuery
|
|
||||||
FlagsUnsafeNonGreedy NormalizationFlags = FlagsUsuallySafeNonGreedy | FlagRemoveDirectoryIndex | FlagRemoveFragment | FlagForceHTTP | FlagRemoveDuplicateSlashes | FlagAddWWW | FlagSortQuery
|
|
||||||
|
|
||||||
// Convenience set of all available flags
|
|
||||||
FlagsAllGreedy = FlagsUnsafeGreedy | FlagDecodeDWORDHost | FlagDecodeOctalHost | FlagDecodeHexHost | FlagRemoveUnnecessaryHostDots | FlagRemoveEmptyPortSeparator
|
|
||||||
FlagsAllNonGreedy = FlagsUnsafeNonGreedy | FlagDecodeDWORDHost | FlagDecodeOctalHost | FlagDecodeHexHost | FlagRemoveUnnecessaryHostDots | FlagRemoveEmptyPortSeparator
|
|
||||||
)
|
|
||||||
```
|
|
||||||
|
|
||||||
For convenience, the set of flags `FlagsSafe`, `FlagsUsuallySafe[Greedy|NonGreedy]`, `FlagsUnsafe[Greedy|NonGreedy]` and `FlagsAll[Greedy|NonGreedy]` are provided for the similarly grouped normalizations on [wikipedia's URL normalization page][wiki]. You can add (using the bitwise OR `|` operator) or remove (using the bitwise AND NOT `&^` operator) individual flags from the sets if required, to build your own custom set.
|
|
||||||
|
|
||||||
The [full godoc reference is available on gopkgdoc][godoc].
|
|
||||||
|
|
||||||
Some things to note:
|
|
||||||
|
|
||||||
* `FlagDecodeUnnecessaryEscapes`, `FlagEncodeNecessaryEscapes`, `FlagUppercaseEscapes` and `FlagRemoveEmptyQuerySeparator` are always implicitly set, because internally, the URL string is parsed as an URL object, which automatically decodes unnecessary escapes, uppercases and encodes necessary ones, and removes empty query separators (an unnecessary `?` at the end of the url). So this operation cannot **not** be done. For this reason, `FlagRemoveEmptyQuerySeparator` (as well as the other three) has been included in the `FlagsSafe` convenience set, instead of `FlagsUnsafe`, where Wikipedia puts it.
|
|
||||||
|
|
||||||
* The `FlagDecodeUnnecessaryEscapes` decodes the following escapes (*from -> to*):
|
|
||||||
- %24 -> $
|
|
||||||
- %26 -> &
|
|
||||||
- %2B-%3B -> +,-./0123456789:;
|
|
||||||
- %3D -> =
|
|
||||||
- %40-%5A -> @ABCDEFGHIJKLMNOPQRSTUVWXYZ
|
|
||||||
- %5F -> _
|
|
||||||
- %61-%7A -> abcdefghijklmnopqrstuvwxyz
|
|
||||||
- %7E -> ~
|
|
||||||
|
|
||||||
|
|
||||||
* When the `NormalizeURL` function is used (passing an URL object), this source URL object is modified (that is, after the call, the URL object will be modified to reflect the normalization).
|
|
||||||
|
|
||||||
* The *replace IP with domain name* normalization (`http://208.77.188.166/ → http://www.example.com/`) is obviously not possible for a library without making some network requests. This is not implemented in purell.
|
|
||||||
|
|
||||||
* The *remove unused query string parameters* and *remove default query parameters* are also not implemented, since this is a very case-specific normalization, and it is quite trivial to do with an URL object.
|
|
||||||
|
|
||||||
### Safe vs Usually Safe vs Unsafe
|
|
||||||
|
|
||||||
Purell allows you to control the level of risk you take while normalizing an URL. You can aggressively normalize, play it totally safe, or anything in between.
|
|
||||||
|
|
||||||
Consider the following URL:
|
|
||||||
|
|
||||||
`HTTPS://www.RooT.com/toto/t%45%1f///a/./b/../c/?z=3&w=2&a=4&w=1#invalid`
|
|
||||||
|
|
||||||
Normalizing with the `FlagsSafe` gives:
|
|
||||||
|
|
||||||
`https://www.root.com/toto/tE%1F///a/./b/../c/?z=3&w=2&a=4&w=1#invalid`
|
|
||||||
|
|
||||||
With the `FlagsUsuallySafeGreedy`:
|
|
||||||
|
|
||||||
`https://www.root.com/toto/tE%1F///a/c?z=3&w=2&a=4&w=1#invalid`
|
|
||||||
|
|
||||||
And with `FlagsUnsafeGreedy`:
|
|
||||||
|
|
||||||
`http://root.com/toto/tE%1F/a/c?a=4&w=1&w=2&z=3`
|
|
||||||
|
|
||||||
## TODOs
|
|
||||||
|
|
||||||
* Add a class/default instance to allow specifying custom directory index names? At the moment, removing directory index removes `(^|/)((?:default|index)\.\w{1,4})$`.
|
|
||||||
|
|
||||||
## Thanks / Contributions
|
|
||||||
|
|
||||||
@rogpeppe
|
|
||||||
@jehiah
|
|
||||||
@opennota
|
|
||||||
@pchristopher1275
|
|
||||||
@zenovich
|
|
||||||
@beeker1121
|
|
||||||
|
|
||||||
## License
|
|
||||||
|
|
||||||
The [BSD 3-Clause license][bsd].
|
|
||||||
|
|
||||||
[bsd]: http://opensource.org/licenses/BSD-3-Clause
|
|
||||||
[wiki]: http://en.wikipedia.org/wiki/URL_normalization
|
|
||||||
[rfc]: http://tools.ietf.org/html/rfc3986#section-6
|
|
||||||
[godoc]: http://go.pkgdoc.org/github.com/PuerkitoBio/purell
|
|
||||||
[pr5]: https://github.com/PuerkitoBio/purell/pull/5
|
|
||||||
[iss7]: https://github.com/PuerkitoBio/purell/issues/7
|
|
379
vendor/github.com/PuerkitoBio/purell/purell.go
generated
vendored
379
vendor/github.com/PuerkitoBio/purell/purell.go
generated
vendored
@ -1,379 +0,0 @@
|
|||||||
/*
|
|
||||||
Package purell offers URL normalization as described on the wikipedia page:
|
|
||||||
http://en.wikipedia.org/wiki/URL_normalization
|
|
||||||
*/
|
|
||||||
package purell
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"fmt"
|
|
||||||
"net/url"
|
|
||||||
"regexp"
|
|
||||||
"sort"
|
|
||||||
"strconv"
|
|
||||||
"strings"
|
|
||||||
|
|
||||||
"github.com/PuerkitoBio/urlesc"
|
|
||||||
"golang.org/x/net/idna"
|
|
||||||
"golang.org/x/text/unicode/norm"
|
|
||||||
"golang.org/x/text/width"
|
|
||||||
)
|
|
||||||
|
|
||||||
// A set of normalization flags determines how a URL will
|
|
||||||
// be normalized.
|
|
||||||
type NormalizationFlags uint
|
|
||||||
|
|
||||||
const (
|
|
||||||
// Safe normalizations
|
|
||||||
FlagLowercaseScheme NormalizationFlags = 1 << iota // HTTP://host -> http://host, applied by default in Go1.1
|
|
||||||
FlagLowercaseHost // http://HOST -> http://host
|
|
||||||
FlagUppercaseEscapes // http://host/t%ef -> http://host/t%EF
|
|
||||||
FlagDecodeUnnecessaryEscapes // http://host/t%41 -> http://host/tA
|
|
||||||
FlagEncodeNecessaryEscapes // http://host/!"#$ -> http://host/%21%22#$
|
|
||||||
FlagRemoveDefaultPort // http://host:80 -> http://host
|
|
||||||
FlagRemoveEmptyQuerySeparator // http://host/path? -> http://host/path
|
|
||||||
|
|
||||||
// Usually safe normalizations
|
|
||||||
FlagRemoveTrailingSlash // http://host/path/ -> http://host/path
|
|
||||||
FlagAddTrailingSlash // http://host/path -> http://host/path/ (should choose only one of these add/remove trailing slash flags)
|
|
||||||
FlagRemoveDotSegments // http://host/path/./a/b/../c -> http://host/path/a/c
|
|
||||||
|
|
||||||
// Unsafe normalizations
|
|
||||||
FlagRemoveDirectoryIndex // http://host/path/index.html -> http://host/path/
|
|
||||||
FlagRemoveFragment // http://host/path#fragment -> http://host/path
|
|
||||||
FlagForceHTTP // https://host -> http://host
|
|
||||||
FlagRemoveDuplicateSlashes // http://host/path//a///b -> http://host/path/a/b
|
|
||||||
FlagRemoveWWW // http://www.host/ -> http://host/
|
|
||||||
FlagAddWWW // http://host/ -> http://www.host/ (should choose only one of these add/remove WWW flags)
|
|
||||||
FlagSortQuery // http://host/path?c=3&b=2&a=1&b=1 -> http://host/path?a=1&b=1&b=2&c=3
|
|
||||||
|
|
||||||
// Normalizations not in the wikipedia article, required to cover tests cases
|
|
||||||
// submitted by jehiah
|
|
||||||
FlagDecodeDWORDHost // http://1113982867 -> http://66.102.7.147
|
|
||||||
FlagDecodeOctalHost // http://0102.0146.07.0223 -> http://66.102.7.147
|
|
||||||
FlagDecodeHexHost // http://0x42660793 -> http://66.102.7.147
|
|
||||||
FlagRemoveUnnecessaryHostDots // http://.host../path -> http://host/path
|
|
||||||
FlagRemoveEmptyPortSeparator // http://host:/path -> http://host/path
|
|
||||||
|
|
||||||
// Convenience set of safe normalizations
|
|
||||||
FlagsSafe NormalizationFlags = FlagLowercaseHost | FlagLowercaseScheme | FlagUppercaseEscapes | FlagDecodeUnnecessaryEscapes | FlagEncodeNecessaryEscapes | FlagRemoveDefaultPort | FlagRemoveEmptyQuerySeparator
|
|
||||||
|
|
||||||
// For convenience sets, "greedy" uses the "remove trailing slash" and "remove www. prefix" flags,
|
|
||||||
// while "non-greedy" uses the "add (or keep) the trailing slash" and "add www. prefix".
|
|
||||||
|
|
||||||
// Convenience set of usually safe normalizations (includes FlagsSafe)
|
|
||||||
FlagsUsuallySafeGreedy NormalizationFlags = FlagsSafe | FlagRemoveTrailingSlash | FlagRemoveDotSegments
|
|
||||||
FlagsUsuallySafeNonGreedy NormalizationFlags = FlagsSafe | FlagAddTrailingSlash | FlagRemoveDotSegments
|
|
||||||
|
|
||||||
// Convenience set of unsafe normalizations (includes FlagsUsuallySafe)
|
|
||||||
FlagsUnsafeGreedy NormalizationFlags = FlagsUsuallySafeGreedy | FlagRemoveDirectoryIndex | FlagRemoveFragment | FlagForceHTTP | FlagRemoveDuplicateSlashes | FlagRemoveWWW | FlagSortQuery
|
|
||||||
FlagsUnsafeNonGreedy NormalizationFlags = FlagsUsuallySafeNonGreedy | FlagRemoveDirectoryIndex | FlagRemoveFragment | FlagForceHTTP | FlagRemoveDuplicateSlashes | FlagAddWWW | FlagSortQuery
|
|
||||||
|
|
||||||
// Convenience set of all available flags
|
|
||||||
FlagsAllGreedy = FlagsUnsafeGreedy | FlagDecodeDWORDHost | FlagDecodeOctalHost | FlagDecodeHexHost | FlagRemoveUnnecessaryHostDots | FlagRemoveEmptyPortSeparator
|
|
||||||
FlagsAllNonGreedy = FlagsUnsafeNonGreedy | FlagDecodeDWORDHost | FlagDecodeOctalHost | FlagDecodeHexHost | FlagRemoveUnnecessaryHostDots | FlagRemoveEmptyPortSeparator
|
|
||||||
)
|
|
||||||
|
|
||||||
const (
|
|
||||||
defaultHttpPort = ":80"
|
|
||||||
defaultHttpsPort = ":443"
|
|
||||||
)
|
|
||||||
|
|
||||||
// Regular expressions used by the normalizations
|
|
||||||
var rxPort = regexp.MustCompile(`(:\d+)/?$`)
|
|
||||||
var rxDirIndex = regexp.MustCompile(`(^|/)((?:default|index)\.\w{1,4})$`)
|
|
||||||
var rxDupSlashes = regexp.MustCompile(`/{2,}`)
|
|
||||||
var rxDWORDHost = regexp.MustCompile(`^(\d+)((?:\.+)?(?:\:\d*)?)$`)
|
|
||||||
var rxOctalHost = regexp.MustCompile(`^(0\d*)\.(0\d*)\.(0\d*)\.(0\d*)((?:\.+)?(?:\:\d*)?)$`)
|
|
||||||
var rxHexHost = regexp.MustCompile(`^0x([0-9A-Fa-f]+)((?:\.+)?(?:\:\d*)?)$`)
|
|
||||||
var rxHostDots = regexp.MustCompile(`^(.+?)(:\d+)?$`)
|
|
||||||
var rxEmptyPort = regexp.MustCompile(`:+$`)
|
|
||||||
|
|
||||||
// Map of flags to implementation function.
|
|
||||||
// FlagDecodeUnnecessaryEscapes has no action, since it is done automatically
|
|
||||||
// by parsing the string as an URL. Same for FlagUppercaseEscapes and FlagRemoveEmptyQuerySeparator.
|
|
||||||
|
|
||||||
// Since maps have undefined traversing order, make a slice of ordered keys
|
|
||||||
var flagsOrder = []NormalizationFlags{
|
|
||||||
FlagLowercaseScheme,
|
|
||||||
FlagLowercaseHost,
|
|
||||||
FlagRemoveDefaultPort,
|
|
||||||
FlagRemoveDirectoryIndex,
|
|
||||||
FlagRemoveDotSegments,
|
|
||||||
FlagRemoveFragment,
|
|
||||||
FlagForceHTTP, // Must be after remove default port (because https=443/http=80)
|
|
||||||
FlagRemoveDuplicateSlashes,
|
|
||||||
FlagRemoveWWW,
|
|
||||||
FlagAddWWW,
|
|
||||||
FlagSortQuery,
|
|
||||||
FlagDecodeDWORDHost,
|
|
||||||
FlagDecodeOctalHost,
|
|
||||||
FlagDecodeHexHost,
|
|
||||||
FlagRemoveUnnecessaryHostDots,
|
|
||||||
FlagRemoveEmptyPortSeparator,
|
|
||||||
FlagRemoveTrailingSlash, // These two (add/remove trailing slash) must be last
|
|
||||||
FlagAddTrailingSlash,
|
|
||||||
}
|
|
||||||
|
|
||||||
// ... and then the map, where order is unimportant
|
|
||||||
var flags = map[NormalizationFlags]func(*url.URL){
|
|
||||||
FlagLowercaseScheme: lowercaseScheme,
|
|
||||||
FlagLowercaseHost: lowercaseHost,
|
|
||||||
FlagRemoveDefaultPort: removeDefaultPort,
|
|
||||||
FlagRemoveDirectoryIndex: removeDirectoryIndex,
|
|
||||||
FlagRemoveDotSegments: removeDotSegments,
|
|
||||||
FlagRemoveFragment: removeFragment,
|
|
||||||
FlagForceHTTP: forceHTTP,
|
|
||||||
FlagRemoveDuplicateSlashes: removeDuplicateSlashes,
|
|
||||||
FlagRemoveWWW: removeWWW,
|
|
||||||
FlagAddWWW: addWWW,
|
|
||||||
FlagSortQuery: sortQuery,
|
|
||||||
FlagDecodeDWORDHost: decodeDWORDHost,
|
|
||||||
FlagDecodeOctalHost: decodeOctalHost,
|
|
||||||
FlagDecodeHexHost: decodeHexHost,
|
|
||||||
FlagRemoveUnnecessaryHostDots: removeUnncessaryHostDots,
|
|
||||||
FlagRemoveEmptyPortSeparator: removeEmptyPortSeparator,
|
|
||||||
FlagRemoveTrailingSlash: removeTrailingSlash,
|
|
||||||
FlagAddTrailingSlash: addTrailingSlash,
|
|
||||||
}
|
|
||||||
|
|
||||||
// MustNormalizeURLString returns the normalized string, and panics if an error occurs.
|
|
||||||
// It takes an URL string as input, as well as the normalization flags.
|
|
||||||
func MustNormalizeURLString(u string, f NormalizationFlags) string {
|
|
||||||
result, e := NormalizeURLString(u, f)
|
|
||||||
if e != nil {
|
|
||||||
panic(e)
|
|
||||||
}
|
|
||||||
return result
|
|
||||||
}
|
|
||||||
|
|
||||||
// NormalizeURLString returns the normalized string, or an error if it can't be parsed into an URL object.
|
|
||||||
// It takes an URL string as input, as well as the normalization flags.
|
|
||||||
func NormalizeURLString(u string, f NormalizationFlags) (string, error) {
|
|
||||||
parsed, err := url.Parse(u)
|
|
||||||
if err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
if f&FlagLowercaseHost == FlagLowercaseHost {
|
|
||||||
parsed.Host = strings.ToLower(parsed.Host)
|
|
||||||
}
|
|
||||||
|
|
||||||
// The idna package doesn't fully conform to RFC 5895
|
|
||||||
// (https://tools.ietf.org/html/rfc5895), so we do it here.
|
|
||||||
// Taken from Go 1.8 cycle source, courtesy of bradfitz.
|
|
||||||
// TODO: Remove when (if?) idna package conforms to RFC 5895.
|
|
||||||
parsed.Host = width.Fold.String(parsed.Host)
|
|
||||||
parsed.Host = norm.NFC.String(parsed.Host)
|
|
||||||
if parsed.Host, err = idna.ToASCII(parsed.Host); err != nil {
|
|
||||||
return "", err
|
|
||||||
}
|
|
||||||
|
|
||||||
return NormalizeURL(parsed, f), nil
|
|
||||||
}
|
|
||||||
|
|
||||||
// NormalizeURL returns the normalized string.
|
|
||||||
// It takes a parsed URL object as input, as well as the normalization flags.
|
|
||||||
func NormalizeURL(u *url.URL, f NormalizationFlags) string {
|
|
||||||
for _, k := range flagsOrder {
|
|
||||||
if f&k == k {
|
|
||||||
flags[k](u)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return urlesc.Escape(u)
|
|
||||||
}
|
|
||||||
|
|
||||||
func lowercaseScheme(u *url.URL) {
|
|
||||||
if len(u.Scheme) > 0 {
|
|
||||||
u.Scheme = strings.ToLower(u.Scheme)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func lowercaseHost(u *url.URL) {
|
|
||||||
if len(u.Host) > 0 {
|
|
||||||
u.Host = strings.ToLower(u.Host)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func removeDefaultPort(u *url.URL) {
|
|
||||||
if len(u.Host) > 0 {
|
|
||||||
scheme := strings.ToLower(u.Scheme)
|
|
||||||
u.Host = rxPort.ReplaceAllStringFunc(u.Host, func(val string) string {
|
|
||||||
if (scheme == "http" && val == defaultHttpPort) || (scheme == "https" && val == defaultHttpsPort) {
|
|
||||||
return ""
|
|
||||||
}
|
|
||||||
return val
|
|
||||||
})
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func removeTrailingSlash(u *url.URL) {
|
|
||||||
if l := len(u.Path); l > 0 {
|
|
||||||
if strings.HasSuffix(u.Path, "/") {
|
|
||||||
u.Path = u.Path[:l-1]
|
|
||||||
}
|
|
||||||
} else if l = len(u.Host); l > 0 {
|
|
||||||
if strings.HasSuffix(u.Host, "/") {
|
|
||||||
u.Host = u.Host[:l-1]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func addTrailingSlash(u *url.URL) {
|
|
||||||
if l := len(u.Path); l > 0 {
|
|
||||||
if !strings.HasSuffix(u.Path, "/") {
|
|
||||||
u.Path += "/"
|
|
||||||
}
|
|
||||||
} else if l = len(u.Host); l > 0 {
|
|
||||||
if !strings.HasSuffix(u.Host, "/") {
|
|
||||||
u.Host += "/"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func removeDotSegments(u *url.URL) {
|
|
||||||
if len(u.Path) > 0 {
|
|
||||||
var dotFree []string
|
|
||||||
var lastIsDot bool
|
|
||||||
|
|
||||||
sections := strings.Split(u.Path, "/")
|
|
||||||
for _, s := range sections {
|
|
||||||
if s == ".." {
|
|
||||||
if len(dotFree) > 0 {
|
|
||||||
dotFree = dotFree[:len(dotFree)-1]
|
|
||||||
}
|
|
||||||
} else if s != "." {
|
|
||||||
dotFree = append(dotFree, s)
|
|
||||||
}
|
|
||||||
lastIsDot = (s == "." || s == "..")
|
|
||||||
}
|
|
||||||
// Special case if host does not end with / and new path does not begin with /
|
|
||||||
u.Path = strings.Join(dotFree, "/")
|
|
||||||
if u.Host != "" && !strings.HasSuffix(u.Host, "/") && !strings.HasPrefix(u.Path, "/") {
|
|
||||||
u.Path = "/" + u.Path
|
|
||||||
}
|
|
||||||
// Special case if the last segment was a dot, make sure the path ends with a slash
|
|
||||||
if lastIsDot && !strings.HasSuffix(u.Path, "/") {
|
|
||||||
u.Path += "/"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func removeDirectoryIndex(u *url.URL) {
|
|
||||||
if len(u.Path) > 0 {
|
|
||||||
u.Path = rxDirIndex.ReplaceAllString(u.Path, "$1")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func removeFragment(u *url.URL) {
|
|
||||||
u.Fragment = ""
|
|
||||||
}
|
|
||||||
|
|
||||||
func forceHTTP(u *url.URL) {
|
|
||||||
if strings.ToLower(u.Scheme) == "https" {
|
|
||||||
u.Scheme = "http"
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func removeDuplicateSlashes(u *url.URL) {
|
|
||||||
if len(u.Path) > 0 {
|
|
||||||
u.Path = rxDupSlashes.ReplaceAllString(u.Path, "/")
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func removeWWW(u *url.URL) {
|
|
||||||
if len(u.Host) > 0 && strings.HasPrefix(strings.ToLower(u.Host), "www.") {
|
|
||||||
u.Host = u.Host[4:]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func addWWW(u *url.URL) {
|
|
||||||
if len(u.Host) > 0 && !strings.HasPrefix(strings.ToLower(u.Host), "www.") {
|
|
||||||
u.Host = "www." + u.Host
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func sortQuery(u *url.URL) {
|
|
||||||
q := u.Query()
|
|
||||||
|
|
||||||
if len(q) > 0 {
|
|
||||||
arKeys := make([]string, len(q))
|
|
||||||
i := 0
|
|
||||||
for k := range q {
|
|
||||||
arKeys[i] = k
|
|
||||||
i++
|
|
||||||
}
|
|
||||||
sort.Strings(arKeys)
|
|
||||||
buf := new(bytes.Buffer)
|
|
||||||
for _, k := range arKeys {
|
|
||||||
sort.Strings(q[k])
|
|
||||||
for _, v := range q[k] {
|
|
||||||
if buf.Len() > 0 {
|
|
||||||
buf.WriteRune('&')
|
|
||||||
}
|
|
||||||
buf.WriteString(fmt.Sprintf("%s=%s", k, urlesc.QueryEscape(v)))
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Rebuild the raw query string
|
|
||||||
u.RawQuery = buf.String()
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func decodeDWORDHost(u *url.URL) {
|
|
||||||
if len(u.Host) > 0 {
|
|
||||||
if matches := rxDWORDHost.FindStringSubmatch(u.Host); len(matches) > 2 {
|
|
||||||
var parts [4]int64
|
|
||||||
|
|
||||||
dword, _ := strconv.ParseInt(matches[1], 10, 0)
|
|
||||||
for i, shift := range []uint{24, 16, 8, 0} {
|
|
||||||
parts[i] = dword >> shift & 0xFF
|
|
||||||
}
|
|
||||||
u.Host = fmt.Sprintf("%d.%d.%d.%d%s", parts[0], parts[1], parts[2], parts[3], matches[2])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func decodeOctalHost(u *url.URL) {
|
|
||||||
if len(u.Host) > 0 {
|
|
||||||
if matches := rxOctalHost.FindStringSubmatch(u.Host); len(matches) > 5 {
|
|
||||||
var parts [4]int64
|
|
||||||
|
|
||||||
for i := 1; i <= 4; i++ {
|
|
||||||
parts[i-1], _ = strconv.ParseInt(matches[i], 8, 0)
|
|
||||||
}
|
|
||||||
u.Host = fmt.Sprintf("%d.%d.%d.%d%s", parts[0], parts[1], parts[2], parts[3], matches[5])
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func decodeHexHost(u *url.URL) {
|
|
||||||
if len(u.Host) > 0 {
|
|
||||||
if matches := rxHexHost.FindStringSubmatch(u.Host); len(matches) > 2 {
|
|
||||||
// Conversion is safe because of regex validation
|
|
||||||
parsed, _ := strconv.ParseInt(matches[1], 16, 0)
|
|
||||||
// Set host as DWORD (base 10) encoded host
|
|
||||||
u.Host = fmt.Sprintf("%d%s", parsed, matches[2])
|
|
||||||
// The rest is the same as decoding a DWORD host
|
|
||||||
decodeDWORDHost(u)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func removeUnncessaryHostDots(u *url.URL) {
|
|
||||||
if len(u.Host) > 0 {
|
|
||||||
if matches := rxHostDots.FindStringSubmatch(u.Host); len(matches) > 1 {
|
|
||||||
// Trim the leading and trailing dots
|
|
||||||
u.Host = strings.Trim(matches[1], ".")
|
|
||||||
if len(matches) > 2 {
|
|
||||||
u.Host += matches[2]
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func removeEmptyPortSeparator(u *url.URL) {
|
|
||||||
if len(u.Host) > 0 {
|
|
||||||
u.Host = rxEmptyPort.ReplaceAllString(u.Host, "")
|
|
||||||
}
|
|
||||||
}
|
|
15
vendor/github.com/PuerkitoBio/urlesc/.travis.yml
generated
vendored
15
vendor/github.com/PuerkitoBio/urlesc/.travis.yml
generated
vendored
@ -1,15 +0,0 @@
|
|||||||
language: go
|
|
||||||
|
|
||||||
go:
|
|
||||||
- 1.4.x
|
|
||||||
- 1.5.x
|
|
||||||
- 1.6.x
|
|
||||||
- 1.7.x
|
|
||||||
- 1.8.x
|
|
||||||
- tip
|
|
||||||
|
|
||||||
install:
|
|
||||||
- go build .
|
|
||||||
|
|
||||||
script:
|
|
||||||
- go test -v
|
|
16
vendor/github.com/PuerkitoBio/urlesc/README.md
generated
vendored
16
vendor/github.com/PuerkitoBio/urlesc/README.md
generated
vendored
@ -1,16 +0,0 @@
|
|||||||
urlesc [](https://travis-ci.org/PuerkitoBio/urlesc) [](http://godoc.org/github.com/PuerkitoBio/urlesc)
|
|
||||||
======
|
|
||||||
|
|
||||||
Package urlesc implements query escaping as per RFC 3986.
|
|
||||||
|
|
||||||
It contains some parts of the net/url package, modified so as to allow
|
|
||||||
some reserved characters incorrectly escaped by net/url (see [issue 5684](https://github.com/golang/go/issues/5684)).
|
|
||||||
|
|
||||||
## Install
|
|
||||||
|
|
||||||
go get github.com/PuerkitoBio/urlesc
|
|
||||||
|
|
||||||
## License
|
|
||||||
|
|
||||||
Go license (BSD-3-Clause)
|
|
||||||
|
|
180
vendor/github.com/PuerkitoBio/urlesc/urlesc.go
generated
vendored
180
vendor/github.com/PuerkitoBio/urlesc/urlesc.go
generated
vendored
@ -1,180 +0,0 @@
|
|||||||
// Copyright 2009 The Go Authors. All rights reserved.
|
|
||||||
// Use of this source code is governed by a BSD-style
|
|
||||||
// license that can be found in the LICENSE file.
|
|
||||||
|
|
||||||
// Package urlesc implements query escaping as per RFC 3986.
|
|
||||||
// It contains some parts of the net/url package, modified so as to allow
|
|
||||||
// some reserved characters incorrectly escaped by net/url.
|
|
||||||
// See https://github.com/golang/go/issues/5684
|
|
||||||
package urlesc
|
|
||||||
|
|
||||||
import (
|
|
||||||
"bytes"
|
|
||||||
"net/url"
|
|
||||||
"strings"
|
|
||||||
)
|
|
||||||
|
|
||||||
type encoding int
|
|
||||||
|
|
||||||
const (
|
|
||||||
encodePath encoding = 1 + iota
|
|
||||||
encodeUserPassword
|
|
||||||
encodeQueryComponent
|
|
||||||
encodeFragment
|
|
||||||
)
|
|
||||||
|
|
||||||
// Return true if the specified character should be escaped when
|
|
||||||
// appearing in a URL string, according to RFC 3986.
|
|
||||||
func shouldEscape(c byte, mode encoding) bool {
|
|
||||||
// §2.3 Unreserved characters (alphanum)
|
|
||||||
if 'A' <= c && c <= 'Z' || 'a' <= c && c <= 'z' || '0' <= c && c <= '9' {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
switch c {
|
|
||||||
case '-', '.', '_', '~': // §2.3 Unreserved characters (mark)
|
|
||||||
return false
|
|
||||||
|
|
||||||
// §2.2 Reserved characters (reserved)
|
|
||||||
case ':', '/', '?', '#', '[', ']', '@', // gen-delims
|
|
||||||
'!', '$', '&', '\'', '(', ')', '*', '+', ',', ';', '=': // sub-delims
|
|
||||||
// Different sections of the URL allow a few of
|
|
||||||
// the reserved characters to appear unescaped.
|
|
||||||
switch mode {
|
|
||||||
case encodePath: // §3.3
|
|
||||||
// The RFC allows sub-delims and : @.
|
|
||||||
// '/', '[' and ']' can be used to assign meaning to individual path
|
|
||||||
// segments. This package only manipulates the path as a whole,
|
|
||||||
// so we allow those as well. That leaves only ? and # to escape.
|
|
||||||
return c == '?' || c == '#'
|
|
||||||
|
|
||||||
case encodeUserPassword: // §3.2.1
|
|
||||||
// The RFC allows : and sub-delims in
|
|
||||||
// userinfo. The parsing of userinfo treats ':' as special so we must escape
|
|
||||||
// all the gen-delims.
|
|
||||||
return c == ':' || c == '/' || c == '?' || c == '#' || c == '[' || c == ']' || c == '@'
|
|
||||||
|
|
||||||
case encodeQueryComponent: // §3.4
|
|
||||||
// The RFC allows / and ?.
|
|
||||||
return c != '/' && c != '?'
|
|
||||||
|
|
||||||
case encodeFragment: // §4.1
|
|
||||||
// The RFC text is silent but the grammar allows
|
|
||||||
// everything, so escape nothing but #
|
|
||||||
return c == '#'
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Everything else must be escaped.
|
|
||||||
return true
|
|
||||||
}
|
|
||||||
|
|
||||||
// QueryEscape escapes the string so it can be safely placed
|
|
||||||
// inside a URL query.
|
|
||||||
func QueryEscape(s string) string {
|
|
||||||
return escape(s, encodeQueryComponent)
|
|
||||||
}
|
|
||||||
|
|
||||||
func escape(s string, mode encoding) string {
|
|
||||||
spaceCount, hexCount := 0, 0
|
|
||||||
for i := 0; i < len(s); i++ {
|
|
||||||
c := s[i]
|
|
||||||
if shouldEscape(c, mode) {
|
|
||||||
if c == ' ' && mode == encodeQueryComponent {
|
|
||||||
spaceCount++
|
|
||||||
} else {
|
|
||||||
hexCount++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if spaceCount == 0 && hexCount == 0 {
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
|
|
||||||
t := make([]byte, len(s)+2*hexCount)
|
|
||||||
j := 0
|
|
||||||
for i := 0; i < len(s); i++ {
|
|
||||||
switch c := s[i]; {
|
|
||||||
case c == ' ' && mode == encodeQueryComponent:
|
|
||||||
t[j] = '+'
|
|
||||||
j++
|
|
||||||
case shouldEscape(c, mode):
|
|
||||||
t[j] = '%'
|
|
||||||
t[j+1] = "0123456789ABCDEF"[c>>4]
|
|
||||||
t[j+2] = "0123456789ABCDEF"[c&15]
|
|
||||||
j += 3
|
|
||||||
default:
|
|
||||||
t[j] = s[i]
|
|
||||||
j++
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return string(t)
|
|
||||||
}
|
|
||||||
|
|
||||||
var uiReplacer = strings.NewReplacer(
|
|
||||||
"%21", "!",
|
|
||||||
"%27", "'",
|
|
||||||
"%28", "(",
|
|
||||||
"%29", ")",
|
|
||||||
"%2A", "*",
|
|
||||||
)
|
|
||||||
|
|
||||||
// unescapeUserinfo unescapes some characters that need not to be escaped as per RFC3986.
|
|
||||||
func unescapeUserinfo(s string) string {
|
|
||||||
return uiReplacer.Replace(s)
|
|
||||||
}
|
|
||||||
|
|
||||||
// Escape reassembles the URL into a valid URL string.
|
|
||||||
// The general form of the result is one of:
|
|
||||||
//
|
|
||||||
// scheme:opaque
|
|
||||||
// scheme://userinfo@host/path?query#fragment
|
|
||||||
//
|
|
||||||
// If u.Opaque is non-empty, String uses the first form;
|
|
||||||
// otherwise it uses the second form.
|
|
||||||
//
|
|
||||||
// In the second form, the following rules apply:
|
|
||||||
// - if u.Scheme is empty, scheme: is omitted.
|
|
||||||
// - if u.User is nil, userinfo@ is omitted.
|
|
||||||
// - if u.Host is empty, host/ is omitted.
|
|
||||||
// - if u.Scheme and u.Host are empty and u.User is nil,
|
|
||||||
// the entire scheme://userinfo@host/ is omitted.
|
|
||||||
// - if u.Host is non-empty and u.Path begins with a /,
|
|
||||||
// the form host/path does not add its own /.
|
|
||||||
// - if u.RawQuery is empty, ?query is omitted.
|
|
||||||
// - if u.Fragment is empty, #fragment is omitted.
|
|
||||||
func Escape(u *url.URL) string {
|
|
||||||
var buf bytes.Buffer
|
|
||||||
if u.Scheme != "" {
|
|
||||||
buf.WriteString(u.Scheme)
|
|
||||||
buf.WriteByte(':')
|
|
||||||
}
|
|
||||||
if u.Opaque != "" {
|
|
||||||
buf.WriteString(u.Opaque)
|
|
||||||
} else {
|
|
||||||
if u.Scheme != "" || u.Host != "" || u.User != nil {
|
|
||||||
buf.WriteString("//")
|
|
||||||
if ui := u.User; ui != nil {
|
|
||||||
buf.WriteString(unescapeUserinfo(ui.String()))
|
|
||||||
buf.WriteByte('@')
|
|
||||||
}
|
|
||||||
if h := u.Host; h != "" {
|
|
||||||
buf.WriteString(h)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if u.Path != "" && u.Path[0] != '/' && u.Host != "" {
|
|
||||||
buf.WriteByte('/')
|
|
||||||
}
|
|
||||||
buf.WriteString(escape(u.Path, encodePath))
|
|
||||||
}
|
|
||||||
if u.RawQuery != "" {
|
|
||||||
buf.WriteByte('?')
|
|
||||||
buf.WriteString(u.RawQuery)
|
|
||||||
}
|
|
||||||
if u.Fragment != "" {
|
|
||||||
buf.WriteByte('#')
|
|
||||||
buf.WriteString(escape(u.Fragment, encodeFragment))
|
|
||||||
}
|
|
||||||
return buf.String()
|
|
||||||
}
|
|
31
vendor/github.com/cespare/xxhash/v2/README.md
generated
vendored
31
vendor/github.com/cespare/xxhash/v2/README.md
generated
vendored
@ -3,8 +3,7 @@
|
|||||||
[](https://pkg.go.dev/github.com/cespare/xxhash/v2)
|
[](https://pkg.go.dev/github.com/cespare/xxhash/v2)
|
||||||
[](https://github.com/cespare/xxhash/actions/workflows/test.yml)
|
[](https://github.com/cespare/xxhash/actions/workflows/test.yml)
|
||||||
|
|
||||||
xxhash is a Go implementation of the 64-bit
|
xxhash is a Go implementation of the 64-bit [xxHash] algorithm, XXH64. This is a
|
||||||
[xxHash](http://cyan4973.github.io/xxHash/) algorithm, XXH64. This is a
|
|
||||||
high-quality hashing algorithm that is much faster than anything in the Go
|
high-quality hashing algorithm that is much faster than anything in the Go
|
||||||
standard library.
|
standard library.
|
||||||
|
|
||||||
@ -25,8 +24,11 @@ func (*Digest) WriteString(string) (int, error)
|
|||||||
func (*Digest) Sum64() uint64
|
func (*Digest) Sum64() uint64
|
||||||
```
|
```
|
||||||
|
|
||||||
This implementation provides a fast pure-Go implementation and an even faster
|
The package is written with optimized pure Go and also contains even faster
|
||||||
assembly implementation for amd64.
|
assembly implementations for amd64 and arm64. If desired, the `purego` build tag
|
||||||
|
opts into using the Go code even on those architectures.
|
||||||
|
|
||||||
|
[xxHash]: http://cyan4973.github.io/xxHash/
|
||||||
|
|
||||||
## Compatibility
|
## Compatibility
|
||||||
|
|
||||||
@ -45,19 +47,20 @@ I recommend using the latest release of Go.
|
|||||||
Here are some quick benchmarks comparing the pure-Go and assembly
|
Here are some quick benchmarks comparing the pure-Go and assembly
|
||||||
implementations of Sum64.
|
implementations of Sum64.
|
||||||
|
|
||||||
| input size | purego | asm |
|
| input size | purego | asm |
|
||||||
| --- | --- | --- |
|
| ---------- | --------- | --------- |
|
||||||
| 5 B | 979.66 MB/s | 1291.17 MB/s |
|
| 4 B | 1.3 GB/s | 1.2 GB/s |
|
||||||
| 100 B | 7475.26 MB/s | 7973.40 MB/s |
|
| 16 B | 2.9 GB/s | 3.5 GB/s |
|
||||||
| 4 KB | 17573.46 MB/s | 17602.65 MB/s |
|
| 100 B | 6.9 GB/s | 8.1 GB/s |
|
||||||
| 10 MB | 17131.46 MB/s | 17142.16 MB/s |
|
| 4 KB | 11.7 GB/s | 16.7 GB/s |
|
||||||
|
| 10 MB | 12.0 GB/s | 17.3 GB/s |
|
||||||
|
|
||||||
These numbers were generated on Ubuntu 18.04 with an Intel i7-8700K CPU using
|
These numbers were generated on Ubuntu 20.04 with an Intel Xeon Platinum 8252C
|
||||||
the following commands under Go 1.11.2:
|
CPU using the following commands under Go 1.19.2:
|
||||||
|
|
||||||
```
|
```
|
||||||
$ go test -tags purego -benchtime 10s -bench '/xxhash,direct,bytes'
|
benchstat <(go test -tags purego -benchtime 500ms -count 15 -bench 'Sum64$')
|
||||||
$ go test -benchtime 10s -bench '/xxhash,direct,bytes'
|
benchstat <(go test -benchtime 500ms -count 15 -bench 'Sum64$')
|
||||||
```
|
```
|
||||||
|
|
||||||
## Projects using this package
|
## Projects using this package
|
||||||
|
10
vendor/github.com/cespare/xxhash/v2/testall.sh
generated
vendored
Normal file
10
vendor/github.com/cespare/xxhash/v2/testall.sh
generated
vendored
Normal file
@ -0,0 +1,10 @@
|
|||||||
|
#!/bin/bash
|
||||||
|
set -eu -o pipefail
|
||||||
|
|
||||||
|
# Small convenience script for running the tests with various combinations of
|
||||||
|
# arch/tags. This assumes we're running on amd64 and have qemu available.
|
||||||
|
|
||||||
|
go test ./...
|
||||||
|
go test -tags purego ./...
|
||||||
|
GOARCH=arm64 go test
|
||||||
|
GOARCH=arm64 go test -tags purego
|
47
vendor/github.com/cespare/xxhash/v2/xxhash.go
generated
vendored
47
vendor/github.com/cespare/xxhash/v2/xxhash.go
generated
vendored
@ -16,19 +16,11 @@ const (
|
|||||||
prime5 uint64 = 2870177450012600261
|
prime5 uint64 = 2870177450012600261
|
||||||
)
|
)
|
||||||
|
|
||||||
// NOTE(caleb): I'm using both consts and vars of the primes. Using consts where
|
// Store the primes in an array as well.
|
||||||
// possible in the Go code is worth a small (but measurable) performance boost
|
//
|
||||||
// by avoiding some MOVQs. Vars are needed for the asm and also are useful for
|
// The consts are used when possible in Go code to avoid MOVs but we need a
|
||||||
// convenience in the Go code in a few places where we need to intentionally
|
// contiguous array of the assembly code.
|
||||||
// avoid constant arithmetic (e.g., v1 := prime1 + prime2 fails because the
|
var primes = [...]uint64{prime1, prime2, prime3, prime4, prime5}
|
||||||
// result overflows a uint64).
|
|
||||||
var (
|
|
||||||
prime1v = prime1
|
|
||||||
prime2v = prime2
|
|
||||||
prime3v = prime3
|
|
||||||
prime4v = prime4
|
|
||||||
prime5v = prime5
|
|
||||||
)
|
|
||||||
|
|
||||||
// Digest implements hash.Hash64.
|
// Digest implements hash.Hash64.
|
||||||
type Digest struct {
|
type Digest struct {
|
||||||
@ -50,10 +42,10 @@ func New() *Digest {
|
|||||||
|
|
||||||
// Reset clears the Digest's state so that it can be reused.
|
// Reset clears the Digest's state so that it can be reused.
|
||||||
func (d *Digest) Reset() {
|
func (d *Digest) Reset() {
|
||||||
d.v1 = prime1v + prime2
|
d.v1 = primes[0] + prime2
|
||||||
d.v2 = prime2
|
d.v2 = prime2
|
||||||
d.v3 = 0
|
d.v3 = 0
|
||||||
d.v4 = -prime1v
|
d.v4 = -primes[0]
|
||||||
d.total = 0
|
d.total = 0
|
||||||
d.n = 0
|
d.n = 0
|
||||||
}
|
}
|
||||||
@ -69,21 +61,23 @@ func (d *Digest) Write(b []byte) (n int, err error) {
|
|||||||
n = len(b)
|
n = len(b)
|
||||||
d.total += uint64(n)
|
d.total += uint64(n)
|
||||||
|
|
||||||
|
memleft := d.mem[d.n&(len(d.mem)-1):]
|
||||||
|
|
||||||
if d.n+n < 32 {
|
if d.n+n < 32 {
|
||||||
// This new data doesn't even fill the current block.
|
// This new data doesn't even fill the current block.
|
||||||
copy(d.mem[d.n:], b)
|
copy(memleft, b)
|
||||||
d.n += n
|
d.n += n
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if d.n > 0 {
|
if d.n > 0 {
|
||||||
// Finish off the partial block.
|
// Finish off the partial block.
|
||||||
copy(d.mem[d.n:], b)
|
c := copy(memleft, b)
|
||||||
d.v1 = round(d.v1, u64(d.mem[0:8]))
|
d.v1 = round(d.v1, u64(d.mem[0:8]))
|
||||||
d.v2 = round(d.v2, u64(d.mem[8:16]))
|
d.v2 = round(d.v2, u64(d.mem[8:16]))
|
||||||
d.v3 = round(d.v3, u64(d.mem[16:24]))
|
d.v3 = round(d.v3, u64(d.mem[16:24]))
|
||||||
d.v4 = round(d.v4, u64(d.mem[24:32]))
|
d.v4 = round(d.v4, u64(d.mem[24:32]))
|
||||||
b = b[32-d.n:]
|
b = b[c:]
|
||||||
d.n = 0
|
d.n = 0
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -133,21 +127,20 @@ func (d *Digest) Sum64() uint64 {
|
|||||||
|
|
||||||
h += d.total
|
h += d.total
|
||||||
|
|
||||||
i, end := 0, d.n
|
b := d.mem[:d.n&(len(d.mem)-1)]
|
||||||
for ; i+8 <= end; i += 8 {
|
for ; len(b) >= 8; b = b[8:] {
|
||||||
k1 := round(0, u64(d.mem[i:i+8]))
|
k1 := round(0, u64(b[:8]))
|
||||||
h ^= k1
|
h ^= k1
|
||||||
h = rol27(h)*prime1 + prime4
|
h = rol27(h)*prime1 + prime4
|
||||||
}
|
}
|
||||||
if i+4 <= end {
|
if len(b) >= 4 {
|
||||||
h ^= uint64(u32(d.mem[i:i+4])) * prime1
|
h ^= uint64(u32(b[:4])) * prime1
|
||||||
h = rol23(h)*prime2 + prime3
|
h = rol23(h)*prime2 + prime3
|
||||||
i += 4
|
b = b[4:]
|
||||||
}
|
}
|
||||||
for i < end {
|
for ; len(b) > 0; b = b[1:] {
|
||||||
h ^= uint64(d.mem[i]) * prime5
|
h ^= uint64(b[0]) * prime5
|
||||||
h = rol11(h) * prime1
|
h = rol11(h) * prime1
|
||||||
i++
|
|
||||||
}
|
}
|
||||||
|
|
||||||
h ^= h >> 33
|
h ^= h >> 33
|
||||||
|
308
vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s
generated
vendored
308
vendor/github.com/cespare/xxhash/v2/xxhash_amd64.s
generated
vendored
@ -1,215 +1,209 @@
|
|||||||
|
//go:build !appengine && gc && !purego
|
||||||
// +build !appengine
|
// +build !appengine
|
||||||
// +build gc
|
// +build gc
|
||||||
// +build !purego
|
// +build !purego
|
||||||
|
|
||||||
#include "textflag.h"
|
#include "textflag.h"
|
||||||
|
|
||||||
// Register allocation:
|
// Registers:
|
||||||
// AX h
|
#define h AX
|
||||||
// SI pointer to advance through b
|
#define d AX
|
||||||
// DX n
|
#define p SI // pointer to advance through b
|
||||||
// BX loop end
|
#define n DX
|
||||||
// R8 v1, k1
|
#define end BX // loop end
|
||||||
// R9 v2
|
#define v1 R8
|
||||||
// R10 v3
|
#define v2 R9
|
||||||
// R11 v4
|
#define v3 R10
|
||||||
// R12 tmp
|
#define v4 R11
|
||||||
// R13 prime1v
|
#define x R12
|
||||||
// R14 prime2v
|
#define prime1 R13
|
||||||
// DI prime4v
|
#define prime2 R14
|
||||||
|
#define prime4 DI
|
||||||
|
|
||||||
// round reads from and advances the buffer pointer in SI.
|
#define round(acc, x) \
|
||||||
// It assumes that R13 has prime1v and R14 has prime2v.
|
IMULQ prime2, x \
|
||||||
#define round(r) \
|
ADDQ x, acc \
|
||||||
MOVQ (SI), R12 \
|
ROLQ $31, acc \
|
||||||
ADDQ $8, SI \
|
IMULQ prime1, acc
|
||||||
IMULQ R14, R12 \
|
|
||||||
ADDQ R12, r \
|
|
||||||
ROLQ $31, r \
|
|
||||||
IMULQ R13, r
|
|
||||||
|
|
||||||
// mergeRound applies a merge round on the two registers acc and val.
|
// round0 performs the operation x = round(0, x).
|
||||||
// It assumes that R13 has prime1v, R14 has prime2v, and DI has prime4v.
|
#define round0(x) \
|
||||||
#define mergeRound(acc, val) \
|
IMULQ prime2, x \
|
||||||
IMULQ R14, val \
|
ROLQ $31, x \
|
||||||
ROLQ $31, val \
|
IMULQ prime1, x
|
||||||
IMULQ R13, val \
|
|
||||||
XORQ val, acc \
|
// mergeRound applies a merge round on the two registers acc and x.
|
||||||
IMULQ R13, acc \
|
// It assumes that prime1, prime2, and prime4 have been loaded.
|
||||||
ADDQ DI, acc
|
#define mergeRound(acc, x) \
|
||||||
|
round0(x) \
|
||||||
|
XORQ x, acc \
|
||||||
|
IMULQ prime1, acc \
|
||||||
|
ADDQ prime4, acc
|
||||||
|
|
||||||
|
// blockLoop processes as many 32-byte blocks as possible,
|
||||||
|
// updating v1, v2, v3, and v4. It assumes that there is at least one block
|
||||||
|
// to process.
|
||||||
|
#define blockLoop() \
|
||||||
|
loop: \
|
||||||
|
MOVQ +0(p), x \
|
||||||
|
round(v1, x) \
|
||||||
|
MOVQ +8(p), x \
|
||||||
|
round(v2, x) \
|
||||||
|
MOVQ +16(p), x \
|
||||||
|
round(v3, x) \
|
||||||
|
MOVQ +24(p), x \
|
||||||
|
round(v4, x) \
|
||||||
|
ADDQ $32, p \
|
||||||
|
CMPQ p, end \
|
||||||
|
JLE loop
|
||||||
|
|
||||||
// func Sum64(b []byte) uint64
|
// func Sum64(b []byte) uint64
|
||||||
TEXT ·Sum64(SB), NOSPLIT, $0-32
|
TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32
|
||||||
// Load fixed primes.
|
// Load fixed primes.
|
||||||
MOVQ ·prime1v(SB), R13
|
MOVQ ·primes+0(SB), prime1
|
||||||
MOVQ ·prime2v(SB), R14
|
MOVQ ·primes+8(SB), prime2
|
||||||
MOVQ ·prime4v(SB), DI
|
MOVQ ·primes+24(SB), prime4
|
||||||
|
|
||||||
// Load slice.
|
// Load slice.
|
||||||
MOVQ b_base+0(FP), SI
|
MOVQ b_base+0(FP), p
|
||||||
MOVQ b_len+8(FP), DX
|
MOVQ b_len+8(FP), n
|
||||||
LEAQ (SI)(DX*1), BX
|
LEAQ (p)(n*1), end
|
||||||
|
|
||||||
// The first loop limit will be len(b)-32.
|
// The first loop limit will be len(b)-32.
|
||||||
SUBQ $32, BX
|
SUBQ $32, end
|
||||||
|
|
||||||
// Check whether we have at least one block.
|
// Check whether we have at least one block.
|
||||||
CMPQ DX, $32
|
CMPQ n, $32
|
||||||
JLT noBlocks
|
JLT noBlocks
|
||||||
|
|
||||||
// Set up initial state (v1, v2, v3, v4).
|
// Set up initial state (v1, v2, v3, v4).
|
||||||
MOVQ R13, R8
|
MOVQ prime1, v1
|
||||||
ADDQ R14, R8
|
ADDQ prime2, v1
|
||||||
MOVQ R14, R9
|
MOVQ prime2, v2
|
||||||
XORQ R10, R10
|
XORQ v3, v3
|
||||||
XORQ R11, R11
|
XORQ v4, v4
|
||||||
SUBQ R13, R11
|
SUBQ prime1, v4
|
||||||
|
|
||||||
// Loop until SI > BX.
|
blockLoop()
|
||||||
blockLoop:
|
|
||||||
round(R8)
|
|
||||||
round(R9)
|
|
||||||
round(R10)
|
|
||||||
round(R11)
|
|
||||||
|
|
||||||
CMPQ SI, BX
|
MOVQ v1, h
|
||||||
JLE blockLoop
|
ROLQ $1, h
|
||||||
|
MOVQ v2, x
|
||||||
|
ROLQ $7, x
|
||||||
|
ADDQ x, h
|
||||||
|
MOVQ v3, x
|
||||||
|
ROLQ $12, x
|
||||||
|
ADDQ x, h
|
||||||
|
MOVQ v4, x
|
||||||
|
ROLQ $18, x
|
||||||
|
ADDQ x, h
|
||||||
|
|
||||||
MOVQ R8, AX
|
mergeRound(h, v1)
|
||||||
ROLQ $1, AX
|
mergeRound(h, v2)
|
||||||
MOVQ R9, R12
|
mergeRound(h, v3)
|
||||||
ROLQ $7, R12
|
mergeRound(h, v4)
|
||||||
ADDQ R12, AX
|
|
||||||
MOVQ R10, R12
|
|
||||||
ROLQ $12, R12
|
|
||||||
ADDQ R12, AX
|
|
||||||
MOVQ R11, R12
|
|
||||||
ROLQ $18, R12
|
|
||||||
ADDQ R12, AX
|
|
||||||
|
|
||||||
mergeRound(AX, R8)
|
|
||||||
mergeRound(AX, R9)
|
|
||||||
mergeRound(AX, R10)
|
|
||||||
mergeRound(AX, R11)
|
|
||||||
|
|
||||||
JMP afterBlocks
|
JMP afterBlocks
|
||||||
|
|
||||||
noBlocks:
|
noBlocks:
|
||||||
MOVQ ·prime5v(SB), AX
|
MOVQ ·primes+32(SB), h
|
||||||
|
|
||||||
afterBlocks:
|
afterBlocks:
|
||||||
ADDQ DX, AX
|
ADDQ n, h
|
||||||
|
|
||||||
// Right now BX has len(b)-32, and we want to loop until SI > len(b)-8.
|
ADDQ $24, end
|
||||||
ADDQ $24, BX
|
CMPQ p, end
|
||||||
|
JG try4
|
||||||
|
|
||||||
CMPQ SI, BX
|
loop8:
|
||||||
JG fourByte
|
MOVQ (p), x
|
||||||
|
ADDQ $8, p
|
||||||
|
round0(x)
|
||||||
|
XORQ x, h
|
||||||
|
ROLQ $27, h
|
||||||
|
IMULQ prime1, h
|
||||||
|
ADDQ prime4, h
|
||||||
|
|
||||||
wordLoop:
|
CMPQ p, end
|
||||||
// Calculate k1.
|
JLE loop8
|
||||||
MOVQ (SI), R8
|
|
||||||
ADDQ $8, SI
|
|
||||||
IMULQ R14, R8
|
|
||||||
ROLQ $31, R8
|
|
||||||
IMULQ R13, R8
|
|
||||||
|
|
||||||
XORQ R8, AX
|
try4:
|
||||||
ROLQ $27, AX
|
ADDQ $4, end
|
||||||
IMULQ R13, AX
|
CMPQ p, end
|
||||||
ADDQ DI, AX
|
JG try1
|
||||||
|
|
||||||
CMPQ SI, BX
|
MOVL (p), x
|
||||||
JLE wordLoop
|
ADDQ $4, p
|
||||||
|
IMULQ prime1, x
|
||||||
|
XORQ x, h
|
||||||
|
|
||||||
fourByte:
|
ROLQ $23, h
|
||||||
ADDQ $4, BX
|
IMULQ prime2, h
|
||||||
CMPQ SI, BX
|
ADDQ ·primes+16(SB), h
|
||||||
JG singles
|
|
||||||
|
|
||||||
MOVL (SI), R8
|
try1:
|
||||||
ADDQ $4, SI
|
ADDQ $4, end
|
||||||
IMULQ R13, R8
|
CMPQ p, end
|
||||||
XORQ R8, AX
|
|
||||||
|
|
||||||
ROLQ $23, AX
|
|
||||||
IMULQ R14, AX
|
|
||||||
ADDQ ·prime3v(SB), AX
|
|
||||||
|
|
||||||
singles:
|
|
||||||
ADDQ $4, BX
|
|
||||||
CMPQ SI, BX
|
|
||||||
JGE finalize
|
JGE finalize
|
||||||
|
|
||||||
singlesLoop:
|
loop1:
|
||||||
MOVBQZX (SI), R12
|
MOVBQZX (p), x
|
||||||
ADDQ $1, SI
|
ADDQ $1, p
|
||||||
IMULQ ·prime5v(SB), R12
|
IMULQ ·primes+32(SB), x
|
||||||
XORQ R12, AX
|
XORQ x, h
|
||||||
|
ROLQ $11, h
|
||||||
|
IMULQ prime1, h
|
||||||
|
|
||||||
ROLQ $11, AX
|
CMPQ p, end
|
||||||
IMULQ R13, AX
|
JL loop1
|
||||||
|
|
||||||
CMPQ SI, BX
|
|
||||||
JL singlesLoop
|
|
||||||
|
|
||||||
finalize:
|
finalize:
|
||||||
MOVQ AX, R12
|
MOVQ h, x
|
||||||
SHRQ $33, R12
|
SHRQ $33, x
|
||||||
XORQ R12, AX
|
XORQ x, h
|
||||||
IMULQ R14, AX
|
IMULQ prime2, h
|
||||||
MOVQ AX, R12
|
MOVQ h, x
|
||||||
SHRQ $29, R12
|
SHRQ $29, x
|
||||||
XORQ R12, AX
|
XORQ x, h
|
||||||
IMULQ ·prime3v(SB), AX
|
IMULQ ·primes+16(SB), h
|
||||||
MOVQ AX, R12
|
MOVQ h, x
|
||||||
SHRQ $32, R12
|
SHRQ $32, x
|
||||||
XORQ R12, AX
|
XORQ x, h
|
||||||
|
|
||||||
MOVQ AX, ret+24(FP)
|
MOVQ h, ret+24(FP)
|
||||||
RET
|
RET
|
||||||
|
|
||||||
// writeBlocks uses the same registers as above except that it uses AX to store
|
|
||||||
// the d pointer.
|
|
||||||
|
|
||||||
// func writeBlocks(d *Digest, b []byte) int
|
// func writeBlocks(d *Digest, b []byte) int
|
||||||
TEXT ·writeBlocks(SB), NOSPLIT, $0-40
|
TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40
|
||||||
// Load fixed primes needed for round.
|
// Load fixed primes needed for round.
|
||||||
MOVQ ·prime1v(SB), R13
|
MOVQ ·primes+0(SB), prime1
|
||||||
MOVQ ·prime2v(SB), R14
|
MOVQ ·primes+8(SB), prime2
|
||||||
|
|
||||||
// Load slice.
|
// Load slice.
|
||||||
MOVQ b_base+8(FP), SI
|
MOVQ b_base+8(FP), p
|
||||||
MOVQ b_len+16(FP), DX
|
MOVQ b_len+16(FP), n
|
||||||
LEAQ (SI)(DX*1), BX
|
LEAQ (p)(n*1), end
|
||||||
SUBQ $32, BX
|
SUBQ $32, end
|
||||||
|
|
||||||
// Load vN from d.
|
// Load vN from d.
|
||||||
MOVQ d+0(FP), AX
|
MOVQ s+0(FP), d
|
||||||
MOVQ 0(AX), R8 // v1
|
MOVQ 0(d), v1
|
||||||
MOVQ 8(AX), R9 // v2
|
MOVQ 8(d), v2
|
||||||
MOVQ 16(AX), R10 // v3
|
MOVQ 16(d), v3
|
||||||
MOVQ 24(AX), R11 // v4
|
MOVQ 24(d), v4
|
||||||
|
|
||||||
// We don't need to check the loop condition here; this function is
|
// We don't need to check the loop condition here; this function is
|
||||||
// always called with at least one block of data to process.
|
// always called with at least one block of data to process.
|
||||||
blockLoop:
|
blockLoop()
|
||||||
round(R8)
|
|
||||||
round(R9)
|
|
||||||
round(R10)
|
|
||||||
round(R11)
|
|
||||||
|
|
||||||
CMPQ SI, BX
|
|
||||||
JLE blockLoop
|
|
||||||
|
|
||||||
// Copy vN back to d.
|
// Copy vN back to d.
|
||||||
MOVQ R8, 0(AX)
|
MOVQ v1, 0(d)
|
||||||
MOVQ R9, 8(AX)
|
MOVQ v2, 8(d)
|
||||||
MOVQ R10, 16(AX)
|
MOVQ v3, 16(d)
|
||||||
MOVQ R11, 24(AX)
|
MOVQ v4, 24(d)
|
||||||
|
|
||||||
// The number of bytes written is SI minus the old base pointer.
|
// The number of bytes written is p minus the old base pointer.
|
||||||
SUBQ b_base+8(FP), SI
|
SUBQ b_base+8(FP), p
|
||||||
MOVQ SI, ret+32(FP)
|
MOVQ p, ret+32(FP)
|
||||||
|
|
||||||
RET
|
RET
|
||||||
|
183
vendor/github.com/cespare/xxhash/v2/xxhash_arm64.s
generated
vendored
Normal file
183
vendor/github.com/cespare/xxhash/v2/xxhash_arm64.s
generated
vendored
Normal file
@ -0,0 +1,183 @@
|
|||||||
|
//go:build !appengine && gc && !purego
|
||||||
|
// +build !appengine
|
||||||
|
// +build gc
|
||||||
|
// +build !purego
|
||||||
|
|
||||||
|
#include "textflag.h"
|
||||||
|
|
||||||
|
// Registers:
|
||||||
|
#define digest R1
|
||||||
|
#define h R2 // return value
|
||||||
|
#define p R3 // input pointer
|
||||||
|
#define n R4 // input length
|
||||||
|
#define nblocks R5 // n / 32
|
||||||
|
#define prime1 R7
|
||||||
|
#define prime2 R8
|
||||||
|
#define prime3 R9
|
||||||
|
#define prime4 R10
|
||||||
|
#define prime5 R11
|
||||||
|
#define v1 R12
|
||||||
|
#define v2 R13
|
||||||
|
#define v3 R14
|
||||||
|
#define v4 R15
|
||||||
|
#define x1 R20
|
||||||
|
#define x2 R21
|
||||||
|
#define x3 R22
|
||||||
|
#define x4 R23
|
||||||
|
|
||||||
|
#define round(acc, x) \
|
||||||
|
MADD prime2, acc, x, acc \
|
||||||
|
ROR $64-31, acc \
|
||||||
|
MUL prime1, acc
|
||||||
|
|
||||||
|
// round0 performs the operation x = round(0, x).
|
||||||
|
#define round0(x) \
|
||||||
|
MUL prime2, x \
|
||||||
|
ROR $64-31, x \
|
||||||
|
MUL prime1, x
|
||||||
|
|
||||||
|
#define mergeRound(acc, x) \
|
||||||
|
round0(x) \
|
||||||
|
EOR x, acc \
|
||||||
|
MADD acc, prime4, prime1, acc
|
||||||
|
|
||||||
|
// blockLoop processes as many 32-byte blocks as possible,
|
||||||
|
// updating v1, v2, v3, and v4. It assumes that n >= 32.
|
||||||
|
#define blockLoop() \
|
||||||
|
LSR $5, n, nblocks \
|
||||||
|
PCALIGN $16 \
|
||||||
|
loop: \
|
||||||
|
LDP.P 16(p), (x1, x2) \
|
||||||
|
LDP.P 16(p), (x3, x4) \
|
||||||
|
round(v1, x1) \
|
||||||
|
round(v2, x2) \
|
||||||
|
round(v3, x3) \
|
||||||
|
round(v4, x4) \
|
||||||
|
SUB $1, nblocks \
|
||||||
|
CBNZ nblocks, loop
|
||||||
|
|
||||||
|
// func Sum64(b []byte) uint64
|
||||||
|
TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32
|
||||||
|
LDP b_base+0(FP), (p, n)
|
||||||
|
|
||||||
|
LDP ·primes+0(SB), (prime1, prime2)
|
||||||
|
LDP ·primes+16(SB), (prime3, prime4)
|
||||||
|
MOVD ·primes+32(SB), prime5
|
||||||
|
|
||||||
|
CMP $32, n
|
||||||
|
CSEL LT, prime5, ZR, h // if n < 32 { h = prime5 } else { h = 0 }
|
||||||
|
BLT afterLoop
|
||||||
|
|
||||||
|
ADD prime1, prime2, v1
|
||||||
|
MOVD prime2, v2
|
||||||
|
MOVD $0, v3
|
||||||
|
NEG prime1, v4
|
||||||
|
|
||||||
|
blockLoop()
|
||||||
|
|
||||||
|
ROR $64-1, v1, x1
|
||||||
|
ROR $64-7, v2, x2
|
||||||
|
ADD x1, x2
|
||||||
|
ROR $64-12, v3, x3
|
||||||
|
ROR $64-18, v4, x4
|
||||||
|
ADD x3, x4
|
||||||
|
ADD x2, x4, h
|
||||||
|
|
||||||
|
mergeRound(h, v1)
|
||||||
|
mergeRound(h, v2)
|
||||||
|
mergeRound(h, v3)
|
||||||
|
mergeRound(h, v4)
|
||||||
|
|
||||||
|
afterLoop:
|
||||||
|
ADD n, h
|
||||||
|
|
||||||
|
TBZ $4, n, try8
|
||||||
|
LDP.P 16(p), (x1, x2)
|
||||||
|
|
||||||
|
round0(x1)
|
||||||
|
|
||||||
|
// NOTE: here and below, sequencing the EOR after the ROR (using a
|
||||||
|
// rotated register) is worth a small but measurable speedup for small
|
||||||
|
// inputs.
|
||||||
|
ROR $64-27, h
|
||||||
|
EOR x1 @> 64-27, h, h
|
||||||
|
MADD h, prime4, prime1, h
|
||||||
|
|
||||||
|
round0(x2)
|
||||||
|
ROR $64-27, h
|
||||||
|
EOR x2 @> 64-27, h, h
|
||||||
|
MADD h, prime4, prime1, h
|
||||||
|
|
||||||
|
try8:
|
||||||
|
TBZ $3, n, try4
|
||||||
|
MOVD.P 8(p), x1
|
||||||
|
|
||||||
|
round0(x1)
|
||||||
|
ROR $64-27, h
|
||||||
|
EOR x1 @> 64-27, h, h
|
||||||
|
MADD h, prime4, prime1, h
|
||||||
|
|
||||||
|
try4:
|
||||||
|
TBZ $2, n, try2
|
||||||
|
MOVWU.P 4(p), x2
|
||||||
|
|
||||||
|
MUL prime1, x2
|
||||||
|
ROR $64-23, h
|
||||||
|
EOR x2 @> 64-23, h, h
|
||||||
|
MADD h, prime3, prime2, h
|
||||||
|
|
||||||
|
try2:
|
||||||
|
TBZ $1, n, try1
|
||||||
|
MOVHU.P 2(p), x3
|
||||||
|
AND $255, x3, x1
|
||||||
|
LSR $8, x3, x2
|
||||||
|
|
||||||
|
MUL prime5, x1
|
||||||
|
ROR $64-11, h
|
||||||
|
EOR x1 @> 64-11, h, h
|
||||||
|
MUL prime1, h
|
||||||
|
|
||||||
|
MUL prime5, x2
|
||||||
|
ROR $64-11, h
|
||||||
|
EOR x2 @> 64-11, h, h
|
||||||
|
MUL prime1, h
|
||||||
|
|
||||||
|
try1:
|
||||||
|
TBZ $0, n, finalize
|
||||||
|
MOVBU (p), x4
|
||||||
|
|
||||||
|
MUL prime5, x4
|
||||||
|
ROR $64-11, h
|
||||||
|
EOR x4 @> 64-11, h, h
|
||||||
|
MUL prime1, h
|
||||||
|
|
||||||
|
finalize:
|
||||||
|
EOR h >> 33, h
|
||||||
|
MUL prime2, h
|
||||||
|
EOR h >> 29, h
|
||||||
|
MUL prime3, h
|
||||||
|
EOR h >> 32, h
|
||||||
|
|
||||||
|
MOVD h, ret+24(FP)
|
||||||
|
RET
|
||||||
|
|
||||||
|
// func writeBlocks(d *Digest, b []byte) int
|
||||||
|
TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40
|
||||||
|
LDP ·primes+0(SB), (prime1, prime2)
|
||||||
|
|
||||||
|
// Load state. Assume v[1-4] are stored contiguously.
|
||||||
|
MOVD d+0(FP), digest
|
||||||
|
LDP 0(digest), (v1, v2)
|
||||||
|
LDP 16(digest), (v3, v4)
|
||||||
|
|
||||||
|
LDP b_base+8(FP), (p, n)
|
||||||
|
|
||||||
|
blockLoop()
|
||||||
|
|
||||||
|
// Store updated state.
|
||||||
|
STP (v1, v2), 0(digest)
|
||||||
|
STP (v3, v4), 16(digest)
|
||||||
|
|
||||||
|
BIC $31, n
|
||||||
|
MOVD n, ret+32(FP)
|
||||||
|
RET
|
@ -1,3 +1,5 @@
|
|||||||
|
//go:build (amd64 || arm64) && !appengine && gc && !purego
|
||||||
|
// +build amd64 arm64
|
||||||
// +build !appengine
|
// +build !appengine
|
||||||
// +build gc
|
// +build gc
|
||||||
// +build !purego
|
// +build !purego
|
22
vendor/github.com/cespare/xxhash/v2/xxhash_other.go
generated
vendored
22
vendor/github.com/cespare/xxhash/v2/xxhash_other.go
generated
vendored
@ -1,4 +1,5 @@
|
|||||||
// +build !amd64 appengine !gc purego
|
//go:build (!amd64 && !arm64) || appengine || !gc || purego
|
||||||
|
// +build !amd64,!arm64 appengine !gc purego
|
||||||
|
|
||||||
package xxhash
|
package xxhash
|
||||||
|
|
||||||
@ -14,10 +15,10 @@ func Sum64(b []byte) uint64 {
|
|||||||
var h uint64
|
var h uint64
|
||||||
|
|
||||||
if n >= 32 {
|
if n >= 32 {
|
||||||
v1 := prime1v + prime2
|
v1 := primes[0] + prime2
|
||||||
v2 := prime2
|
v2 := prime2
|
||||||
v3 := uint64(0)
|
v3 := uint64(0)
|
||||||
v4 := -prime1v
|
v4 := -primes[0]
|
||||||
for len(b) >= 32 {
|
for len(b) >= 32 {
|
||||||
v1 = round(v1, u64(b[0:8:len(b)]))
|
v1 = round(v1, u64(b[0:8:len(b)]))
|
||||||
v2 = round(v2, u64(b[8:16:len(b)]))
|
v2 = round(v2, u64(b[8:16:len(b)]))
|
||||||
@ -36,19 +37,18 @@ func Sum64(b []byte) uint64 {
|
|||||||
|
|
||||||
h += uint64(n)
|
h += uint64(n)
|
||||||
|
|
||||||
i, end := 0, len(b)
|
for ; len(b) >= 8; b = b[8:] {
|
||||||
for ; i+8 <= end; i += 8 {
|
k1 := round(0, u64(b[:8]))
|
||||||
k1 := round(0, u64(b[i:i+8:len(b)]))
|
|
||||||
h ^= k1
|
h ^= k1
|
||||||
h = rol27(h)*prime1 + prime4
|
h = rol27(h)*prime1 + prime4
|
||||||
}
|
}
|
||||||
if i+4 <= end {
|
if len(b) >= 4 {
|
||||||
h ^= uint64(u32(b[i:i+4:len(b)])) * prime1
|
h ^= uint64(u32(b[:4])) * prime1
|
||||||
h = rol23(h)*prime2 + prime3
|
h = rol23(h)*prime2 + prime3
|
||||||
i += 4
|
b = b[4:]
|
||||||
}
|
}
|
||||||
for ; i < end; i++ {
|
for ; len(b) > 0; b = b[1:] {
|
||||||
h ^= uint64(b[i]) * prime5
|
h ^= uint64(b[0]) * prime5
|
||||||
h = rol11(h) * prime1
|
h = rol11(h) * prime1
|
||||||
}
|
}
|
||||||
|
|
||||||
|
1
vendor/github.com/cespare/xxhash/v2/xxhash_safe.go
generated
vendored
1
vendor/github.com/cespare/xxhash/v2/xxhash_safe.go
generated
vendored
@ -1,3 +1,4 @@
|
|||||||
|
//go:build appengine
|
||||||
// +build appengine
|
// +build appengine
|
||||||
|
|
||||||
// This file contains the safe implementations of otherwise unsafe-using code.
|
// This file contains the safe implementations of otherwise unsafe-using code.
|
||||||
|
3
vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go
generated
vendored
3
vendor/github.com/cespare/xxhash/v2/xxhash_unsafe.go
generated
vendored
@ -1,3 +1,4 @@
|
|||||||
|
//go:build !appengine
|
||||||
// +build !appengine
|
// +build !appengine
|
||||||
|
|
||||||
// This file encapsulates usage of unsafe.
|
// This file encapsulates usage of unsafe.
|
||||||
@ -11,7 +12,7 @@ import (
|
|||||||
|
|
||||||
// In the future it's possible that compiler optimizations will make these
|
// In the future it's possible that compiler optimizations will make these
|
||||||
// XxxString functions unnecessary by realizing that calls such as
|
// XxxString functions unnecessary by realizing that calls such as
|
||||||
// Sum64([]byte(s)) don't need to copy s. See https://golang.org/issue/2205.
|
// Sum64([]byte(s)) don't need to copy s. See https://go.dev/issue/2205.
|
||||||
// If that happens, even if we keep these functions they can be replaced with
|
// If that happens, even if we keep these functions they can be replaced with
|
||||||
// the trivial safe code.
|
// the trivial safe code.
|
||||||
|
|
||||||
|
8
vendor/github.com/containerd/containerd/content/helpers.go
generated
vendored
8
vendor/github.com/containerd/containerd/content/helpers.go
generated
vendored
@ -21,12 +21,12 @@ import (
|
|||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"math/rand"
|
|
||||||
"sync"
|
"sync"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/containerd/containerd/errdefs"
|
"github.com/containerd/containerd/errdefs"
|
||||||
"github.com/containerd/containerd/log"
|
"github.com/containerd/containerd/log"
|
||||||
|
"github.com/containerd/containerd/pkg/randutil"
|
||||||
"github.com/opencontainers/go-digest"
|
"github.com/opencontainers/go-digest"
|
||||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||||
)
|
)
|
||||||
@ -59,6 +59,10 @@ func NewReader(ra ReaderAt) io.Reader {
|
|||||||
//
|
//
|
||||||
// Avoid using this for large blobs, such as layers.
|
// Avoid using this for large blobs, such as layers.
|
||||||
func ReadBlob(ctx context.Context, provider Provider, desc ocispec.Descriptor) ([]byte, error) {
|
func ReadBlob(ctx context.Context, provider Provider, desc ocispec.Descriptor) ([]byte, error) {
|
||||||
|
if int64(len(desc.Data)) == desc.Size && digest.FromBytes(desc.Data) == desc.Digest {
|
||||||
|
return desc.Data, nil
|
||||||
|
}
|
||||||
|
|
||||||
ra, err := provider.ReaderAt(ctx, desc)
|
ra, err := provider.ReaderAt(ctx, desc)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
@ -119,7 +123,7 @@ func OpenWriter(ctx context.Context, cs Ingester, opts ...WriterOpt) (Writer, er
|
|||||||
// error or abort. Requires asserting for an ingest manager
|
// error or abort. Requires asserting for an ingest manager
|
||||||
|
|
||||||
select {
|
select {
|
||||||
case <-time.After(time.Millisecond * time.Duration(rand.Intn(retry))):
|
case <-time.After(time.Millisecond * time.Duration(randutil.Intn(retry))):
|
||||||
if retry < 2048 {
|
if retry < 2048 {
|
||||||
retry = retry << 1
|
retry = retry << 1
|
||||||
}
|
}
|
||||||
|
4
vendor/github.com/containerd/containerd/content/local/store.go
generated
vendored
4
vendor/github.com/containerd/containerd/content/local/store.go
generated
vendored
@ -20,7 +20,6 @@ import (
|
|||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
"math/rand"
|
|
||||||
"os"
|
"os"
|
||||||
"path/filepath"
|
"path/filepath"
|
||||||
"strconv"
|
"strconv"
|
||||||
@ -32,6 +31,7 @@ import (
|
|||||||
"github.com/containerd/containerd/errdefs"
|
"github.com/containerd/containerd/errdefs"
|
||||||
"github.com/containerd/containerd/filters"
|
"github.com/containerd/containerd/filters"
|
||||||
"github.com/containerd/containerd/log"
|
"github.com/containerd/containerd/log"
|
||||||
|
"github.com/containerd/containerd/pkg/randutil"
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
|
|
||||||
"github.com/opencontainers/go-digest"
|
"github.com/opencontainers/go-digest"
|
||||||
@ -473,7 +473,7 @@ func (s *store) Writer(ctx context.Context, opts ...content.WriterOpt) (content.
|
|||||||
lockErr = nil
|
lockErr = nil
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
time.Sleep(time.Millisecond * time.Duration(rand.Intn(1<<count)))
|
time.Sleep(time.Millisecond * time.Duration(randutil.Intn(1<<count)))
|
||||||
}
|
}
|
||||||
|
|
||||||
if lockErr != nil {
|
if lockErr != nil {
|
||||||
|
4
vendor/github.com/containerd/containerd/labels/labels.go
generated
vendored
4
vendor/github.com/containerd/containerd/labels/labels.go
generated
vendored
@ -23,3 +23,7 @@ const LabelUncompressed = "containerd.io/uncompressed"
|
|||||||
// LabelSharedNamespace is added to a namespace to allow that namespaces
|
// LabelSharedNamespace is added to a namespace to allow that namespaces
|
||||||
// contents to be shared.
|
// contents to be shared.
|
||||||
const LabelSharedNamespace = "containerd.io/namespace.shareable"
|
const LabelSharedNamespace = "containerd.io/namespace.shareable"
|
||||||
|
|
||||||
|
// LabelDistributionSource is added to content to indicate its origin.
|
||||||
|
// e.g., "containerd.io/distribution.source.docker.io=library/redis"
|
||||||
|
const LabelDistributionSource = "containerd.io/distribution.source"
|
||||||
|
2
vendor/github.com/containerd/containerd/leases/id.go
generated
vendored
2
vendor/github.com/containerd/containerd/leases/id.go
generated
vendored
@ -17,9 +17,9 @@
|
|||||||
package leases
|
package leases
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"crypto/rand"
|
||||||
"encoding/base64"
|
"encoding/base64"
|
||||||
"fmt"
|
"fmt"
|
||||||
"math/rand"
|
|
||||||
"time"
|
"time"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
3
vendor/github.com/containerd/containerd/log/context.go
generated
vendored
3
vendor/github.com/containerd/containerd/log/context.go
generated
vendored
@ -35,6 +35,9 @@ var (
|
|||||||
|
|
||||||
type (
|
type (
|
||||||
loggerKey struct{}
|
loggerKey struct{}
|
||||||
|
|
||||||
|
// Fields type to pass to `WithFields`, alias from `logrus`.
|
||||||
|
Fields = logrus.Fields
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
|
48
vendor/github.com/containerd/containerd/pkg/randutil/randutil.go
generated
vendored
Normal file
48
vendor/github.com/containerd/containerd/pkg/randutil/randutil.go
generated
vendored
Normal file
@ -0,0 +1,48 @@
|
|||||||
|
/*
|
||||||
|
Copyright The containerd Authors.
|
||||||
|
|
||||||
|
Licensed under the Apache License, Version 2.0 (the "License");
|
||||||
|
you may not use this file except in compliance with the License.
|
||||||
|
You may obtain a copy of the License at
|
||||||
|
|
||||||
|
http://www.apache.org/licenses/LICENSE-2.0
|
||||||
|
|
||||||
|
Unless required by applicable law or agreed to in writing, software
|
||||||
|
distributed under the License is distributed on an "AS IS" BASIS,
|
||||||
|
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||||
|
See the License for the specific language governing permissions and
|
||||||
|
limitations under the License.
|
||||||
|
*/
|
||||||
|
|
||||||
|
// Package randutil provides utilities for [cyrpto/rand].
|
||||||
|
package randutil
|
||||||
|
|
||||||
|
import (
|
||||||
|
"crypto/rand"
|
||||||
|
"math"
|
||||||
|
"math/big"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Int63n is similar to [math/rand.Int63n] but uses [crypto/rand.Reader] under the hood.
|
||||||
|
func Int63n(n int64) int64 {
|
||||||
|
b, err := rand.Int(rand.Reader, big.NewInt(n))
|
||||||
|
if err != nil {
|
||||||
|
panic(err)
|
||||||
|
}
|
||||||
|
return b.Int64()
|
||||||
|
}
|
||||||
|
|
||||||
|
// Int63 is similar to [math/rand.Int63] but uses [crypto/rand.Reader] under the hood.
|
||||||
|
func Int63() int64 {
|
||||||
|
return Int63n(math.MaxInt64)
|
||||||
|
}
|
||||||
|
|
||||||
|
// Intn is similar to [math/rand.Intn] but uses [crypto/rand.Reader] under the hood.
|
||||||
|
func Intn(n int) int {
|
||||||
|
return int(Int63n(int64(n)))
|
||||||
|
}
|
||||||
|
|
||||||
|
// Int is similar to [math/rand.Int] but uses [crypto/rand.Reader] under the hood.
|
||||||
|
func Int() int {
|
||||||
|
return int(Int63())
|
||||||
|
}
|
5
vendor/github.com/containerd/containerd/pkg/seed/seed.go
generated
vendored
5
vendor/github.com/containerd/containerd/pkg/seed/seed.go
generated
vendored
@ -14,6 +14,9 @@
|
|||||||
limitations under the License.
|
limitations under the License.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
|
// Package seed provides an initializer for the global [math/rand] seed.
|
||||||
|
//
|
||||||
|
// Deprecated: Do not rely on the global seed.
|
||||||
package seed
|
package seed
|
||||||
|
|
||||||
import (
|
import (
|
||||||
@ -23,6 +26,8 @@ import (
|
|||||||
|
|
||||||
// WithTimeAndRand seeds the global math rand generator with nanoseconds
|
// WithTimeAndRand seeds the global math rand generator with nanoseconds
|
||||||
// XOR'ed with a crypto component if available for uniqueness.
|
// XOR'ed with a crypto component if available for uniqueness.
|
||||||
|
//
|
||||||
|
// Deprecated: Do not rely on the global seed.
|
||||||
func WithTimeAndRand() {
|
func WithTimeAndRand() {
|
||||||
var (
|
var (
|
||||||
b [4]byte
|
b [4]byte
|
||||||
|
5
vendor/github.com/containerd/containerd/platforms/defaults_windows.go
generated
vendored
5
vendor/github.com/containerd/containerd/platforms/defaults_windows.go
generated
vendored
@ -50,7 +50,10 @@ func (m windowsmatcher) Match(p specs.Platform) bool {
|
|||||||
match := m.defaultMatcher.Match(p)
|
match := m.defaultMatcher.Match(p)
|
||||||
|
|
||||||
if match && m.OS == "windows" {
|
if match && m.OS == "windows" {
|
||||||
return strings.HasPrefix(p.OSVersion, m.osVersionPrefix) && m.defaultMatcher.Match(p)
|
if strings.HasPrefix(p.OSVersion, m.osVersionPrefix) {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
return p.OSVersion == ""
|
||||||
}
|
}
|
||||||
|
|
||||||
return match
|
return match
|
||||||
|
2
vendor/github.com/containerd/containerd/protobuf/any.go
generated
vendored
2
vendor/github.com/containerd/containerd/protobuf/any.go
generated
vendored
@ -17,7 +17,7 @@
|
|||||||
package protobuf
|
package protobuf
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/containerd/typeurl"
|
"github.com/containerd/typeurl/v2"
|
||||||
"google.golang.org/protobuf/types/known/anypb"
|
"google.golang.org/protobuf/types/known/anypb"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
4
vendor/github.com/containerd/containerd/remotes/docker/auth/fetch.go
generated
vendored
4
vendor/github.com/containerd/containerd/remotes/docker/auth/fetch.go
generated
vendored
@ -114,7 +114,7 @@ func FetchTokenWithOAuth(ctx context.Context, client *http.Client, headers http.
|
|||||||
form.Set("access_type", "offline")
|
form.Set("access_type", "offline")
|
||||||
}
|
}
|
||||||
|
|
||||||
req, err := http.NewRequestWithContext(ctx, "POST", to.Realm, strings.NewReader(form.Encode()))
|
req, err := http.NewRequestWithContext(ctx, http.MethodPost, to.Realm, strings.NewReader(form.Encode()))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
@ -161,7 +161,7 @@ type FetchTokenResponse struct {
|
|||||||
|
|
||||||
// FetchToken fetches a token using a GET request
|
// FetchToken fetches a token using a GET request
|
||||||
func FetchToken(ctx context.Context, client *http.Client, headers http.Header, to TokenOptions) (*FetchTokenResponse, error) {
|
func FetchToken(ctx context.Context, client *http.Client, headers http.Header, to TokenOptions) (*FetchTokenResponse, error) {
|
||||||
req, err := http.NewRequestWithContext(ctx, "GET", to.Realm, nil)
|
req, err := http.NewRequestWithContext(ctx, http.MethodGet, to.Realm, nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
3
vendor/github.com/containerd/containerd/remotes/docker/authorizer.go
generated
vendored
3
vendor/github.com/containerd/containerd/remotes/docker/authorizer.go
generated
vendored
@ -29,7 +29,6 @@ import (
|
|||||||
"github.com/containerd/containerd/log"
|
"github.com/containerd/containerd/log"
|
||||||
"github.com/containerd/containerd/remotes/docker/auth"
|
"github.com/containerd/containerd/remotes/docker/auth"
|
||||||
remoteerrors "github.com/containerd/containerd/remotes/errors"
|
remoteerrors "github.com/containerd/containerd/remotes/errors"
|
||||||
"github.com/sirupsen/logrus"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type dockerAuthorizer struct {
|
type dockerAuthorizer struct {
|
||||||
@ -312,7 +311,7 @@ func (ah *authHandler) doBearerAuth(ctx context.Context) (token, refreshToken st
|
|||||||
}
|
}
|
||||||
return resp.Token, resp.RefreshToken, nil
|
return resp.Token, resp.RefreshToken, nil
|
||||||
}
|
}
|
||||||
log.G(ctx).WithFields(logrus.Fields{
|
log.G(ctx).WithFields(log.Fields{
|
||||||
"status": errStatus.Status,
|
"status": errStatus.Status,
|
||||||
"body": string(errStatus.Body),
|
"body": string(errStatus.Body),
|
||||||
}).Debugf("token request failed")
|
}).Debugf("token request failed")
|
||||||
|
7
vendor/github.com/containerd/containerd/remotes/docker/handler.go
generated
vendored
7
vendor/github.com/containerd/containerd/remotes/docker/handler.go
generated
vendored
@ -30,11 +30,6 @@ import (
|
|||||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
|
||||||
// labelDistributionSource describes the source blob comes from.
|
|
||||||
labelDistributionSource = "containerd.io/distribution.source"
|
|
||||||
)
|
|
||||||
|
|
||||||
// AppendDistributionSourceLabel updates the label of blob with distribution source.
|
// AppendDistributionSourceLabel updates the label of blob with distribution source.
|
||||||
func AppendDistributionSourceLabel(manager content.Manager, ref string) (images.HandlerFunc, error) {
|
func AppendDistributionSourceLabel(manager content.Manager, ref string) (images.HandlerFunc, error) {
|
||||||
refspec, err := reference.Parse(ref)
|
refspec, err := reference.Parse(ref)
|
||||||
@ -108,7 +103,7 @@ func appendDistributionSourceLabel(originLabel, repo string) string {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func distributionSourceLabelKey(source string) string {
|
func distributionSourceLabelKey(source string) string {
|
||||||
return fmt.Sprintf("%s.%s", labelDistributionSource, source)
|
return fmt.Sprintf("%s.%s", labels.LabelDistributionSource, source)
|
||||||
}
|
}
|
||||||
|
|
||||||
// selectRepositoryMountCandidate will select the repo which has longest
|
// selectRepositoryMountCandidate will select the repo which has longest
|
||||||
|
9
vendor/github.com/containerd/containerd/remotes/docker/resolver.go
generated
vendored
9
vendor/github.com/containerd/containerd/remotes/docker/resolver.go
generated
vendored
@ -38,7 +38,6 @@ import (
|
|||||||
"github.com/containerd/containerd/version"
|
"github.com/containerd/containerd/version"
|
||||||
"github.com/opencontainers/go-digest"
|
"github.com/opencontainers/go-digest"
|
||||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||||
"github.com/sirupsen/logrus"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
var (
|
||||||
@ -647,7 +646,7 @@ func (r *request) String() string {
|
|||||||
return r.host.Scheme + "://" + r.host.Host + r.path
|
return r.host.Scheme + "://" + r.host.Host + r.path
|
||||||
}
|
}
|
||||||
|
|
||||||
func requestFields(req *http.Request) logrus.Fields {
|
func requestFields(req *http.Request) log.Fields {
|
||||||
fields := map[string]interface{}{
|
fields := map[string]interface{}{
|
||||||
"request.method": req.Method,
|
"request.method": req.Method,
|
||||||
}
|
}
|
||||||
@ -665,10 +664,10 @@ func requestFields(req *http.Request) logrus.Fields {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return logrus.Fields(fields)
|
return log.Fields(fields)
|
||||||
}
|
}
|
||||||
|
|
||||||
func responseFields(resp *http.Response) logrus.Fields {
|
func responseFields(resp *http.Response) log.Fields {
|
||||||
fields := map[string]interface{}{
|
fields := map[string]interface{}{
|
||||||
"response.status": resp.Status,
|
"response.status": resp.Status,
|
||||||
}
|
}
|
||||||
@ -683,7 +682,7 @@ func responseFields(resp *http.Response) logrus.Fields {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return logrus.Fields(fields)
|
return log.Fields(fields)
|
||||||
}
|
}
|
||||||
|
|
||||||
// IsLocalhost checks if the registry host is local.
|
// IsLocalhost checks if the registry host is local.
|
||||||
|
13
vendor/github.com/containerd/containerd/remotes/handlers.go
generated
vendored
13
vendor/github.com/containerd/containerd/remotes/handlers.go
generated
vendored
@ -17,6 +17,7 @@
|
|||||||
package remotes
|
package remotes
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"bytes"
|
||||||
"context"
|
"context"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
@ -27,10 +28,10 @@ import (
|
|||||||
"github.com/containerd/containerd/content"
|
"github.com/containerd/containerd/content"
|
||||||
"github.com/containerd/containerd/errdefs"
|
"github.com/containerd/containerd/errdefs"
|
||||||
"github.com/containerd/containerd/images"
|
"github.com/containerd/containerd/images"
|
||||||
|
"github.com/containerd/containerd/labels"
|
||||||
"github.com/containerd/containerd/log"
|
"github.com/containerd/containerd/log"
|
||||||
"github.com/containerd/containerd/platforms"
|
"github.com/containerd/containerd/platforms"
|
||||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||||
"github.com/sirupsen/logrus"
|
|
||||||
"golang.org/x/sync/semaphore"
|
"golang.org/x/sync/semaphore"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -90,7 +91,7 @@ func MakeRefKey(ctx context.Context, desc ocispec.Descriptor) string {
|
|||||||
// recursive fetch.
|
// recursive fetch.
|
||||||
func FetchHandler(ingester content.Ingester, fetcher Fetcher) images.HandlerFunc {
|
func FetchHandler(ingester content.Ingester, fetcher Fetcher) images.HandlerFunc {
|
||||||
return func(ctx context.Context, desc ocispec.Descriptor) (subdescs []ocispec.Descriptor, err error) {
|
return func(ctx context.Context, desc ocispec.Descriptor) (subdescs []ocispec.Descriptor, err error) {
|
||||||
ctx = log.WithLogger(ctx, log.G(ctx).WithFields(logrus.Fields{
|
ctx = log.WithLogger(ctx, log.G(ctx).WithFields(log.Fields{
|
||||||
"digest": desc.Digest,
|
"digest": desc.Digest,
|
||||||
"mediatype": desc.MediaType,
|
"mediatype": desc.MediaType,
|
||||||
"size": desc.Size,
|
"size": desc.Size,
|
||||||
@ -139,6 +140,10 @@ func Fetch(ctx context.Context, ingester content.Ingester, fetcher Fetcher, desc
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if desc.Size == int64(len(desc.Data)) {
|
||||||
|
return content.Copy(ctx, cw, bytes.NewReader(desc.Data), desc.Size, desc.Digest)
|
||||||
|
}
|
||||||
|
|
||||||
rc, err := fetcher.Fetch(ctx, desc)
|
rc, err := fetcher.Fetch(ctx, desc)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
@ -152,7 +157,7 @@ func Fetch(ctx context.Context, ingester content.Ingester, fetcher Fetcher, desc
|
|||||||
// using a writer from the pusher.
|
// using a writer from the pusher.
|
||||||
func PushHandler(pusher Pusher, provider content.Provider) images.HandlerFunc {
|
func PushHandler(pusher Pusher, provider content.Provider) images.HandlerFunc {
|
||||||
return func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
|
return func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
|
||||||
ctx = log.WithLogger(ctx, log.G(ctx).WithFields(logrus.Fields{
|
ctx = log.WithLogger(ctx, log.G(ctx).WithFields(log.Fields{
|
||||||
"digest": desc.Digest,
|
"digest": desc.Digest,
|
||||||
"mediatype": desc.MediaType,
|
"mediatype": desc.MediaType,
|
||||||
"size": desc.Size,
|
"size": desc.Size,
|
||||||
@ -363,7 +368,7 @@ func annotateDistributionSourceHandler(f images.HandlerFunc, manager content.Man
|
|||||||
}
|
}
|
||||||
|
|
||||||
for k, v := range info.Labels {
|
for k, v := range info.Labels {
|
||||||
if !strings.HasPrefix(k, "containerd.io/distribution.source.") {
|
if !strings.HasPrefix(k, labels.LabelDistributionSource+".") {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
|
2
vendor/github.com/containerd/containerd/remotes/resolver.go
generated
vendored
2
vendor/github.com/containerd/containerd/remotes/resolver.go
generated
vendored
@ -34,7 +34,7 @@ type Resolver interface {
|
|||||||
// reference a specific host or be matched against a specific handler.
|
// reference a specific host or be matched against a specific handler.
|
||||||
//
|
//
|
||||||
// The returned name should be used to identify the referenced entity.
|
// The returned name should be used to identify the referenced entity.
|
||||||
// Dependending on the remote namespace, this may be immutable or mutable.
|
// Depending on the remote namespace, this may be immutable or mutable.
|
||||||
// While the name may differ from ref, it should itself be a valid ref.
|
// While the name may differ from ref, it should itself be a valid ref.
|
||||||
//
|
//
|
||||||
// If the resolution fails, an error will be returned.
|
// If the resolution fails, an error will be returned.
|
||||||
|
@ -30,7 +30,6 @@ import (
|
|||||||
ptypes "github.com/containerd/containerd/protobuf/types"
|
ptypes "github.com/containerd/containerd/protobuf/types"
|
||||||
digest "github.com/opencontainers/go-digest"
|
digest "github.com/opencontainers/go-digest"
|
||||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||||
"github.com/sirupsen/logrus"
|
|
||||||
"google.golang.org/grpc"
|
"google.golang.org/grpc"
|
||||||
"google.golang.org/grpc/codes"
|
"google.golang.org/grpc/codes"
|
||||||
"google.golang.org/grpc/status"
|
"google.golang.org/grpc/status"
|
||||||
@ -295,7 +294,7 @@ func (s *service) Write(session api.Content_WriteServer) (err error) {
|
|||||||
return status.Errorf(codes.InvalidArgument, "first message must have a reference")
|
return status.Errorf(codes.InvalidArgument, "first message must have a reference")
|
||||||
}
|
}
|
||||||
|
|
||||||
fields := logrus.Fields{
|
fields := log.Fields{
|
||||||
"ref": ref,
|
"ref": ref,
|
||||||
}
|
}
|
||||||
total = req.Total
|
total = req.Total
|
||||||
|
9
vendor/github.com/containerd/containerd/tracing/tracing.go
generated
vendored
9
vendor/github.com/containerd/containerd/tracing/tracing.go
generated
vendored
@ -23,7 +23,8 @@ import (
|
|||||||
"go.opentelemetry.io/otel"
|
"go.opentelemetry.io/otel"
|
||||||
"go.opentelemetry.io/otel/attribute"
|
"go.opentelemetry.io/otel/attribute"
|
||||||
"go.opentelemetry.io/otel/codes"
|
"go.opentelemetry.io/otel/codes"
|
||||||
semconv "go.opentelemetry.io/otel/semconv/v1.12.0"
|
semconv "go.opentelemetry.io/otel/semconv/v1.17.0"
|
||||||
|
httpconv "go.opentelemetry.io/otel/semconv/v1.17.0/httpconv"
|
||||||
"go.opentelemetry.io/otel/trace"
|
"go.opentelemetry.io/otel/trace"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -39,8 +40,8 @@ type SpanOpt func(config *StartConfig)
|
|||||||
func WithHTTPRequest(request *http.Request) SpanOpt {
|
func WithHTTPRequest(request *http.Request) SpanOpt {
|
||||||
return func(config *StartConfig) {
|
return func(config *StartConfig) {
|
||||||
config.spanOpts = append(config.spanOpts,
|
config.spanOpts = append(config.spanOpts,
|
||||||
trace.WithSpanKind(trace.SpanKindClient), // A client making a request to a server
|
trace.WithSpanKind(trace.SpanKindClient), // A client making a request to a server
|
||||||
trace.WithAttributes(semconv.HTTPClientAttributesFromHTTPRequest(request)...), // Add HTTP attributes
|
trace.WithAttributes(httpconv.ClientRequest(request)...), // Add HTTP attributes
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -112,5 +113,5 @@ func Attribute(k string, v interface{}) attribute.KeyValue {
|
|||||||
// HTTPStatusCodeAttributes generates attributes of the HTTP namespace as specified by the OpenTelemetry
|
// HTTPStatusCodeAttributes generates attributes of the HTTP namespace as specified by the OpenTelemetry
|
||||||
// specification for a span.
|
// specification for a span.
|
||||||
func HTTPStatusCodeAttributes(code int) []attribute.KeyValue {
|
func HTTPStatusCodeAttributes(code int) []attribute.KeyValue {
|
||||||
return semconv.HTTPAttributesFromHTTPStatusCode(code)
|
return []attribute.KeyValue{semconv.HTTPStatusCodeKey.Int(code)}
|
||||||
}
|
}
|
||||||
|
2
vendor/github.com/containerd/containerd/version/version.go
generated
vendored
2
vendor/github.com/containerd/containerd/version/version.go
generated
vendored
@ -23,7 +23,7 @@ var (
|
|||||||
Package = "github.com/containerd/containerd"
|
Package = "github.com/containerd/containerd"
|
||||||
|
|
||||||
// Version holds the complete version number. Filled in at linking time.
|
// Version holds the complete version number. Filled in at linking time.
|
||||||
Version = "1.7.0-beta.3+unknown"
|
Version = "1.7.0+unknown"
|
||||||
|
|
||||||
// Revision is filled with the VCS (e.g. git) revision being used to build
|
// Revision is filled with the VCS (e.g. git) revision being used to build
|
||||||
// the program at linking time.
|
// the program at linking time.
|
||||||
|
8
vendor/github.com/containerd/ttrpc/Makefile
generated
vendored
8
vendor/github.com/containerd/ttrpc/Makefile
generated
vendored
@ -57,7 +57,7 @@ TESTFLAGS_PARALLEL ?= 8
|
|||||||
# Use this to replace `go test` with, for instance, `gotestsum`
|
# Use this to replace `go test` with, for instance, `gotestsum`
|
||||||
GOTEST ?= $(GO) test
|
GOTEST ?= $(GO) test
|
||||||
|
|
||||||
.PHONY: clean all AUTHORS build binaries test integration generate protos checkprotos coverage ci check help install vendor install-protobuf install-protobuild
|
.PHONY: clean all AUTHORS build binaries test integration generate protos check-protos coverage ci check help install vendor install-protobuf install-protobuild
|
||||||
.DEFAULT: default
|
.DEFAULT: default
|
||||||
|
|
||||||
# Forcibly set the default goal to all, in case an include above brought in a rule definition.
|
# Forcibly set the default goal to all, in case an include above brought in a rule definition.
|
||||||
@ -69,7 +69,7 @@ check: proto-fmt ## run all linters
|
|||||||
@echo "$(WHALE) $@"
|
@echo "$(WHALE) $@"
|
||||||
GOGC=75 golangci-lint run
|
GOGC=75 golangci-lint run
|
||||||
|
|
||||||
ci: check binaries checkprotos coverage # coverage-integration ## to be used by the CI
|
ci: check binaries check-protos coverage # coverage-integration ## to be used by the CI
|
||||||
|
|
||||||
AUTHORS: .mailmap .git/HEAD
|
AUTHORS: .mailmap .git/HEAD
|
||||||
git log --format='%aN <%aE>' | sort -fu > $@
|
git log --format='%aN <%aE>' | sort -fu > $@
|
||||||
@ -145,8 +145,8 @@ install-protobuf:
|
|||||||
|
|
||||||
install-protobuild:
|
install-protobuild:
|
||||||
@echo "$(WHALE) $@"
|
@echo "$(WHALE) $@"
|
||||||
@$(GO) install google.golang.org/protobuf/cmd/protoc-gen-go@v1.27.1
|
@$(GO) install google.golang.org/protobuf/cmd/protoc-gen-go@v1.28.1
|
||||||
@$(GO) install github.com/containerd/protobuild@7e5ee24bc1f70e9e289fef15e2631eb3491320bf
|
@$(GO) install github.com/containerd/protobuild@14832ccc41429f5c4f81028e5af08aa233a219cf
|
||||||
|
|
||||||
coverage: ## generate coverprofiles from the unit tests, except tests that require root
|
coverage: ## generate coverprofiles from the unit tests, except tests that require root
|
||||||
@echo "$(WHALE) $@"
|
@echo "$(WHALE) $@"
|
||||||
|
3
vendor/github.com/containerd/ttrpc/Protobuild.toml
generated
vendored
3
vendor/github.com/containerd/ttrpc/Protobuild.toml
generated
vendored
@ -23,3 +23,6 @@ generators = ["go"]
|
|||||||
# enable ttrpc and disable fieldpath and grpc for the shim
|
# enable ttrpc and disable fieldpath and grpc for the shim
|
||||||
prefixes = ["github.com/containerd/ttrpc/integration/streaming"]
|
prefixes = ["github.com/containerd/ttrpc/integration/streaming"]
|
||||||
generators = ["go", "go-ttrpc"]
|
generators = ["go", "go-ttrpc"]
|
||||||
|
|
||||||
|
[overrides.parameters.go-ttrpc]
|
||||||
|
prefix = "TTRPC"
|
||||||
|
4
vendor/github.com/containerd/ttrpc/README.md
generated
vendored
4
vendor/github.com/containerd/ttrpc/README.md
generated
vendored
@ -1,7 +1,6 @@
|
|||||||
# ttrpc
|
# ttrpc
|
||||||
|
|
||||||
[](https://github.com/containerd/ttrpc/actions?query=workflow%3ACI)
|
[](https://github.com/containerd/ttrpc/actions?query=workflow%3ACI)
|
||||||
[](https://codecov.io/gh/containerd/ttrpc)
|
|
||||||
|
|
||||||
GRPC for low-memory environments.
|
GRPC for low-memory environments.
|
||||||
|
|
||||||
@ -30,7 +29,7 @@ Create a gogo vanity binary (see
|
|||||||
[`cmd/protoc-gen-gogottrpc/main.go`](cmd/protoc-gen-gogottrpc/main.go) for an
|
[`cmd/protoc-gen-gogottrpc/main.go`](cmd/protoc-gen-gogottrpc/main.go) for an
|
||||||
example with the ttrpc plugin enabled.
|
example with the ttrpc plugin enabled.
|
||||||
|
|
||||||
It's recommended to use [`protobuild`](https://github.com//stevvooe/protobuild)
|
It's recommended to use [`protobuild`](https://github.com/containerd/protobuild)
|
||||||
to build the protobufs for this project, but this will work with protoc
|
to build the protobufs for this project, but this will work with protoc
|
||||||
directly, if required.
|
directly, if required.
|
||||||
|
|
||||||
@ -41,7 +40,6 @@ directly, if required.
|
|||||||
- The client and server interface are identical whereas in GRPC there is a
|
- The client and server interface are identical whereas in GRPC there is a
|
||||||
client and server interface that are different.
|
client and server interface that are different.
|
||||||
- The Go stdlib context package is used instead.
|
- The Go stdlib context package is used instead.
|
||||||
- No support for streams yet.
|
|
||||||
|
|
||||||
# Status
|
# Status
|
||||||
|
|
||||||
|
4
vendor/github.com/containerd/ttrpc/request.pb.go
generated
vendored
4
vendor/github.com/containerd/ttrpc/request.pb.go
generated
vendored
@ -1,7 +1,7 @@
|
|||||||
// Code generated by protoc-gen-go. DO NOT EDIT.
|
// Code generated by protoc-gen-go. DO NOT EDIT.
|
||||||
// versions:
|
// versions:
|
||||||
// protoc-gen-go v1.27.1
|
// protoc-gen-go v1.28.1
|
||||||
// protoc v3.11.4
|
// protoc v3.20.1
|
||||||
// source: github.com/containerd/ttrpc/request.proto
|
// source: github.com/containerd/ttrpc/request.proto
|
||||||
|
|
||||||
package ttrpc
|
package ttrpc
|
||||||
|
62
vendor/github.com/containerd/ttrpc/server.go
generated
vendored
62
vendor/github.com/containerd/ttrpc/server.go
generated
vendored
@ -18,11 +18,13 @@ package ttrpc
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"errors"
|
||||||
"io"
|
"io"
|
||||||
"math/rand"
|
"math/rand"
|
||||||
"net"
|
"net"
|
||||||
"sync"
|
"sync"
|
||||||
"sync/atomic"
|
"sync/atomic"
|
||||||
|
"syscall"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"github.com/sirupsen/logrus"
|
"github.com/sirupsen/logrus"
|
||||||
@ -119,12 +121,18 @@ func (s *Server) Serve(ctx context.Context, l net.Listener) error {
|
|||||||
|
|
||||||
approved, handshake, err := handshaker.Handshake(ctx, conn)
|
approved, handshake, err := handshaker.Handshake(ctx, conn)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
logrus.WithError(err).Errorf("ttrpc: refusing connection after handshake")
|
logrus.WithError(err).Error("ttrpc: refusing connection after handshake")
|
||||||
|
conn.Close()
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
|
||||||
|
sc, err := s.newConn(approved, handshake)
|
||||||
|
if err != nil {
|
||||||
|
logrus.WithError(err).Error("ttrpc: create connection failed")
|
||||||
conn.Close()
|
conn.Close()
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
sc := s.newConn(approved, handshake)
|
|
||||||
go sc.run(ctx)
|
go sc.run(ctx)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -143,15 +151,20 @@ func (s *Server) Shutdown(ctx context.Context) error {
|
|||||||
ticker := time.NewTicker(200 * time.Millisecond)
|
ticker := time.NewTicker(200 * time.Millisecond)
|
||||||
defer ticker.Stop()
|
defer ticker.Stop()
|
||||||
for {
|
for {
|
||||||
if s.closeIdleConns() {
|
s.closeIdleConns()
|
||||||
return lnerr
|
|
||||||
|
if s.countConnection() == 0 {
|
||||||
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
select {
|
select {
|
||||||
case <-ctx.Done():
|
case <-ctx.Done():
|
||||||
return ctx.Err()
|
return ctx.Err()
|
||||||
case <-ticker.C:
|
case <-ticker.C:
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
return lnerr
|
||||||
}
|
}
|
||||||
|
|
||||||
// Close the server without waiting for active connections.
|
// Close the server without waiting for active connections.
|
||||||
@ -203,11 +216,18 @@ func (s *Server) closeListeners() error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Server) addConnection(c *serverConn) {
|
func (s *Server) addConnection(c *serverConn) error {
|
||||||
s.mu.Lock()
|
s.mu.Lock()
|
||||||
defer s.mu.Unlock()
|
defer s.mu.Unlock()
|
||||||
|
|
||||||
|
select {
|
||||||
|
case <-s.done:
|
||||||
|
return ErrServerClosed
|
||||||
|
default:
|
||||||
|
}
|
||||||
|
|
||||||
s.connections[c] = struct{}{}
|
s.connections[c] = struct{}{}
|
||||||
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Server) delConnection(c *serverConn) {
|
func (s *Server) delConnection(c *serverConn) {
|
||||||
@ -224,20 +244,17 @@ func (s *Server) countConnection() int {
|
|||||||
return len(s.connections)
|
return len(s.connections)
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Server) closeIdleConns() bool {
|
func (s *Server) closeIdleConns() {
|
||||||
s.mu.Lock()
|
s.mu.Lock()
|
||||||
defer s.mu.Unlock()
|
defer s.mu.Unlock()
|
||||||
quiescent := true
|
|
||||||
for c := range s.connections {
|
for c := range s.connections {
|
||||||
st, ok := c.getState()
|
if st, ok := c.getState(); !ok || st == connStateActive {
|
||||||
if !ok || st != connStateIdle {
|
|
||||||
quiescent = false
|
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
c.close()
|
c.close()
|
||||||
delete(s.connections, c)
|
delete(s.connections, c)
|
||||||
}
|
}
|
||||||
return quiescent
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type connState int
|
type connState int
|
||||||
@ -261,7 +278,7 @@ func (cs connState) String() string {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Server) newConn(conn net.Conn, handshake interface{}) *serverConn {
|
func (s *Server) newConn(conn net.Conn, handshake interface{}) (*serverConn, error) {
|
||||||
c := &serverConn{
|
c := &serverConn{
|
||||||
server: s,
|
server: s,
|
||||||
conn: conn,
|
conn: conn,
|
||||||
@ -269,8 +286,11 @@ func (s *Server) newConn(conn net.Conn, handshake interface{}) *serverConn {
|
|||||||
shutdown: make(chan struct{}),
|
shutdown: make(chan struct{}),
|
||||||
}
|
}
|
||||||
c.setState(connStateIdle)
|
c.setState(connStateIdle)
|
||||||
s.addConnection(c)
|
if err := s.addConnection(c); err != nil {
|
||||||
return c
|
c.close()
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return c, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
type serverConn struct {
|
type serverConn struct {
|
||||||
@ -318,6 +338,7 @@ func (c *serverConn) run(sctx context.Context) {
|
|||||||
responses = make(chan response)
|
responses = make(chan response)
|
||||||
recvErr = make(chan error, 1)
|
recvErr = make(chan error, 1)
|
||||||
done = make(chan struct{})
|
done = make(chan struct{})
|
||||||
|
streams = sync.Map{}
|
||||||
active int32
|
active int32
|
||||||
lastStreamID uint32
|
lastStreamID uint32
|
||||||
)
|
)
|
||||||
@ -347,7 +368,6 @@ func (c *serverConn) run(sctx context.Context) {
|
|||||||
|
|
||||||
go func(recvErr chan error) {
|
go func(recvErr chan error) {
|
||||||
defer close(recvErr)
|
defer close(recvErr)
|
||||||
streams := map[uint32]*streamHandler{}
|
|
||||||
for {
|
for {
|
||||||
select {
|
select {
|
||||||
case <-c.shutdown:
|
case <-c.shutdown:
|
||||||
@ -383,12 +403,13 @@ func (c *serverConn) run(sctx context.Context) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
if mh.Type == messageTypeData {
|
if mh.Type == messageTypeData {
|
||||||
sh, ok := streams[mh.StreamID]
|
i, ok := streams.Load(mh.StreamID)
|
||||||
if !ok {
|
if !ok {
|
||||||
if !sendStatus(mh.StreamID, status.Newf(codes.InvalidArgument, "StreamID is no longer active")) {
|
if !sendStatus(mh.StreamID, status.Newf(codes.InvalidArgument, "StreamID is no longer active")) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
sh := i.(*streamHandler)
|
||||||
if mh.Flags&flagNoData != flagNoData {
|
if mh.Flags&flagNoData != flagNoData {
|
||||||
unmarshal := func(obj interface{}) error {
|
unmarshal := func(obj interface{}) error {
|
||||||
err := protoUnmarshal(p, obj)
|
err := protoUnmarshal(p, obj)
|
||||||
@ -458,7 +479,7 @@ func (c *serverConn) run(sctx context.Context) {
|
|||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
|
|
||||||
streams[id] = sh
|
streams.Store(id, sh)
|
||||||
atomic.AddInt32(&active, 1)
|
atomic.AddInt32(&active, 1)
|
||||||
}
|
}
|
||||||
// TODO: else we must ignore this for future compat. log this?
|
// TODO: else we must ignore this for future compat. log this?
|
||||||
@ -518,6 +539,7 @@ func (c *serverConn) run(sctx context.Context) {
|
|||||||
// The ttrpc protocol currently does not support the case where
|
// The ttrpc protocol currently does not support the case where
|
||||||
// the server is localClosed but not remoteClosed. Once the server
|
// the server is localClosed but not remoteClosed. Once the server
|
||||||
// is closing, the whole stream may be considered finished
|
// is closing, the whole stream may be considered finished
|
||||||
|
streams.Delete(response.id)
|
||||||
atomic.AddInt32(&active, -1)
|
atomic.AddInt32(&active, -1)
|
||||||
}
|
}
|
||||||
case err := <-recvErr:
|
case err := <-recvErr:
|
||||||
@ -525,14 +547,12 @@ func (c *serverConn) run(sctx context.Context) {
|
|||||||
// branch. Basically, it means that we are no longer receiving
|
// branch. Basically, it means that we are no longer receiving
|
||||||
// requests due to a terminal error.
|
// requests due to a terminal error.
|
||||||
recvErr = nil // connection is now "closing"
|
recvErr = nil // connection is now "closing"
|
||||||
if err == io.EOF || err == io.ErrUnexpectedEOF {
|
if err == io.EOF || err == io.ErrUnexpectedEOF || errors.Is(err, syscall.ECONNRESET) {
|
||||||
// The client went away and we should stop processing
|
// The client went away and we should stop processing
|
||||||
// requests, so that the client connection is closed
|
// requests, so that the client connection is closed
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if err != nil {
|
logrus.WithError(err).Error("error receiving message")
|
||||||
logrus.WithError(err).Error("error receiving message")
|
|
||||||
}
|
|
||||||
// else, initiate shutdown
|
// else, initiate shutdown
|
||||||
case <-shutdown:
|
case <-shutdown:
|
||||||
return
|
return
|
||||||
|
@ -46,24 +46,36 @@ var (
|
|||||||
ErrNotFound = errors.New("not found")
|
ErrNotFound = errors.New("not found")
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// Any contains an arbitrary protcol buffer message along with its type.
|
||||||
|
//
|
||||||
|
// While there is google.golang.org/protobuf/types/known/anypb.Any,
|
||||||
|
// we'd like to have our own to hide the underlying protocol buffer
|
||||||
|
// implementations from containerd clients.
|
||||||
|
//
|
||||||
|
// https://developers.google.com/protocol-buffers/docs/proto3#any
|
||||||
type Any interface {
|
type Any interface {
|
||||||
|
// GetTypeUrl returns a URL/resource name that uniquely identifies
|
||||||
|
// the type of the serialized protocol buffer message.
|
||||||
GetTypeUrl() string
|
GetTypeUrl() string
|
||||||
|
|
||||||
|
// GetValue returns a valid serialized protocol buffer of the type that
|
||||||
|
// GetTypeUrl() indicates.
|
||||||
GetValue() []byte
|
GetValue() []byte
|
||||||
}
|
}
|
||||||
|
|
||||||
type any struct {
|
type anyType struct {
|
||||||
typeURL string
|
typeURL string
|
||||||
value []byte
|
value []byte
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *any) GetTypeUrl() string {
|
func (a *anyType) GetTypeUrl() string {
|
||||||
if a == nil {
|
if a == nil {
|
||||||
return ""
|
return ""
|
||||||
}
|
}
|
||||||
return a.typeURL
|
return a.typeURL
|
||||||
}
|
}
|
||||||
|
|
||||||
func (a *any) GetValue() []byte {
|
func (a *anyType) GetValue() []byte {
|
||||||
if a == nil {
|
if a == nil {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
@ -150,7 +162,7 @@ func MarshalAny(v interface{}) (Any, error) {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return &any{
|
return &anyType{
|
||||||
typeURL: url,
|
typeURL: url,
|
||||||
value: data,
|
value: data,
|
||||||
}, nil
|
}, nil
|
2
vendor/github.com/docker/cli/opts/throttledevice.go
generated
vendored
2
vendor/github.com/docker/cli/opts/throttledevice.go
generated
vendored
@ -31,7 +31,7 @@ func ValidateThrottleBpsDevice(val string) (*blkiodev.ThrottleDevice, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
return &blkiodev.ThrottleDevice{
|
return &blkiodev.ThrottleDevice{
|
||||||
Path: v,
|
Path: k,
|
||||||
Rate: uint64(rate),
|
Rate: uint64(rate),
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
17
vendor/github.com/emicklei/go-restful/v3/CHANGES.md
generated
vendored
17
vendor/github.com/emicklei/go-restful/v3/CHANGES.md
generated
vendored
@ -1,6 +1,21 @@
|
|||||||
# Change history of go-restful
|
# Change history of go-restful
|
||||||
|
|
||||||
## [v3.8.0] - 20221-06-06
|
## [v3.10.1] - 2022-11-19
|
||||||
|
|
||||||
|
- fix broken 3.10.0 by using path package for joining paths
|
||||||
|
|
||||||
|
## [v3.10.0] - 2022-10-11 - BROKEN
|
||||||
|
|
||||||
|
- changed tokenizer to match std route match behavior; do not trimright the path (#511)
|
||||||
|
- Add MIME_ZIP (#512)
|
||||||
|
- Add MIME_ZIP and HEADER_ContentDisposition (#513)
|
||||||
|
- Changed how to get query parameter issue #510
|
||||||
|
|
||||||
|
## [v3.9.0] - 2022-07-21
|
||||||
|
|
||||||
|
- add support for http.Handler implementations to work as FilterFunction, issue #504 (thanks to https://github.com/ggicci)
|
||||||
|
|
||||||
|
## [v3.8.0] - 2022-06-06
|
||||||
|
|
||||||
- use exact matching of allowed domain entries, issue #489 (#493)
|
- use exact matching of allowed domain entries, issue #489 (#493)
|
||||||
- this changes fixes [security] Authorization Bypass Through User-Controlled Key
|
- this changes fixes [security] Authorization Bypass Through User-Controlled Key
|
||||||
|
3
vendor/github.com/emicklei/go-restful/v3/README.md
generated
vendored
3
vendor/github.com/emicklei/go-restful/v3/README.md
generated
vendored
@ -84,6 +84,7 @@ func (u UserResource) findUser(request *restful.Request, response *restful.Respo
|
|||||||
- Route errors produce HTTP 404/405/406/415 errors, customizable using ServiceErrorHandler(...)
|
- Route errors produce HTTP 404/405/406/415 errors, customizable using ServiceErrorHandler(...)
|
||||||
- Configurable (trace) logging
|
- Configurable (trace) logging
|
||||||
- Customizable gzip/deflate readers and writers using CompressorProvider registration
|
- Customizable gzip/deflate readers and writers using CompressorProvider registration
|
||||||
|
- Inject your own http.Handler using the `HttpMiddlewareHandlerToFilter` function
|
||||||
|
|
||||||
## How to customize
|
## How to customize
|
||||||
There are several hooks to customize the behavior of the go-restful package.
|
There are several hooks to customize the behavior of the go-restful package.
|
||||||
@ -94,7 +95,7 @@ There are several hooks to customize the behavior of the go-restful package.
|
|||||||
- Trace logging
|
- Trace logging
|
||||||
- Compression
|
- Compression
|
||||||
- Encoders for other serializers
|
- Encoders for other serializers
|
||||||
- Use [jsoniter](https://github.com/json-iterator/go) by build this package using a tag, e.g. `go build -tags=jsoniter .`
|
- Use [jsoniter](https://github.com/json-iterator/go) by building this package using a build tag, e.g. `go build -tags=jsoniter .`
|
||||||
|
|
||||||
## Resources
|
## Resources
|
||||||
|
|
||||||
|
2
vendor/github.com/emicklei/go-restful/v3/constants.go
generated
vendored
2
vendor/github.com/emicklei/go-restful/v3/constants.go
generated
vendored
@ -7,12 +7,14 @@ package restful
|
|||||||
const (
|
const (
|
||||||
MIME_XML = "application/xml" // Accept or Content-Type used in Consumes() and/or Produces()
|
MIME_XML = "application/xml" // Accept or Content-Type used in Consumes() and/or Produces()
|
||||||
MIME_JSON = "application/json" // Accept or Content-Type used in Consumes() and/or Produces()
|
MIME_JSON = "application/json" // Accept or Content-Type used in Consumes() and/or Produces()
|
||||||
|
MIME_ZIP = "application/zip" // Accept or Content-Type used in Consumes() and/or Produces()
|
||||||
MIME_OCTET = "application/octet-stream" // If Content-Type is not present in request, use the default
|
MIME_OCTET = "application/octet-stream" // If Content-Type is not present in request, use the default
|
||||||
|
|
||||||
HEADER_Allow = "Allow"
|
HEADER_Allow = "Allow"
|
||||||
HEADER_Accept = "Accept"
|
HEADER_Accept = "Accept"
|
||||||
HEADER_Origin = "Origin"
|
HEADER_Origin = "Origin"
|
||||||
HEADER_ContentType = "Content-Type"
|
HEADER_ContentType = "Content-Type"
|
||||||
|
HEADER_ContentDisposition = "Content-Disposition"
|
||||||
HEADER_LastModified = "Last-Modified"
|
HEADER_LastModified = "Last-Modified"
|
||||||
HEADER_AcceptEncoding = "Accept-Encoding"
|
HEADER_AcceptEncoding = "Accept-Encoding"
|
||||||
HEADER_ContentEncoding = "Content-Encoding"
|
HEADER_ContentEncoding = "Content-Encoding"
|
||||||
|
21
vendor/github.com/emicklei/go-restful/v3/filter_adapter.go
generated
vendored
Normal file
21
vendor/github.com/emicklei/go-restful/v3/filter_adapter.go
generated
vendored
Normal file
@ -0,0 +1,21 @@
|
|||||||
|
package restful
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net/http"
|
||||||
|
)
|
||||||
|
|
||||||
|
// HttpMiddlewareHandler is a function that takes a http.Handler and returns a http.Handler
|
||||||
|
type HttpMiddlewareHandler func(http.Handler) http.Handler
|
||||||
|
|
||||||
|
// HttpMiddlewareHandlerToFilter converts a HttpMiddlewareHandler to a FilterFunction.
|
||||||
|
func HttpMiddlewareHandlerToFilter(middleware HttpMiddlewareHandler) FilterFunction {
|
||||||
|
return func(req *Request, resp *Response, chain *FilterChain) {
|
||||||
|
next := http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
|
||||||
|
req.Request = r
|
||||||
|
resp.ResponseWriter = rw
|
||||||
|
chain.ProcessFilter(req, resp)
|
||||||
|
})
|
||||||
|
|
||||||
|
middleware(next).ServeHTTP(resp.ResponseWriter, req.Request)
|
||||||
|
}
|
||||||
|
}
|
8
vendor/github.com/emicklei/go-restful/v3/parameter.go
generated
vendored
8
vendor/github.com/emicklei/go-restful/v3/parameter.go
generated
vendored
@ -22,6 +22,9 @@ const (
|
|||||||
// FormParameterKind = indicator of Request parameter type "form"
|
// FormParameterKind = indicator of Request parameter type "form"
|
||||||
FormParameterKind
|
FormParameterKind
|
||||||
|
|
||||||
|
// MultiPartFormParameterKind = indicator of Request parameter type "multipart/form-data"
|
||||||
|
MultiPartFormParameterKind
|
||||||
|
|
||||||
// CollectionFormatCSV comma separated values `foo,bar`
|
// CollectionFormatCSV comma separated values `foo,bar`
|
||||||
CollectionFormatCSV = CollectionFormat("csv")
|
CollectionFormatCSV = CollectionFormat("csv")
|
||||||
|
|
||||||
@ -108,6 +111,11 @@ func (p *Parameter) beForm() *Parameter {
|
|||||||
return p
|
return p
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (p *Parameter) beMultiPartForm() *Parameter {
|
||||||
|
p.data.Kind = MultiPartFormParameterKind
|
||||||
|
return p
|
||||||
|
}
|
||||||
|
|
||||||
// Required sets the required field and returns the receiver
|
// Required sets the required field and returns the receiver
|
||||||
func (p *Parameter) Required(required bool) *Parameter {
|
func (p *Parameter) Required(required bool) *Parameter {
|
||||||
p.data.Required = required
|
p.data.Required = required
|
||||||
|
5
vendor/github.com/emicklei/go-restful/v3/request.go
generated
vendored
5
vendor/github.com/emicklei/go-restful/v3/request.go
generated
vendored
@ -31,7 +31,8 @@ func NewRequest(httpRequest *http.Request) *Request {
|
|||||||
// a "Unable to unmarshal content of type:" response is returned.
|
// a "Unable to unmarshal content of type:" response is returned.
|
||||||
// Valid values are restful.MIME_JSON and restful.MIME_XML
|
// Valid values are restful.MIME_JSON and restful.MIME_XML
|
||||||
// Example:
|
// Example:
|
||||||
// restful.DefaultRequestContentType(restful.MIME_JSON)
|
//
|
||||||
|
// restful.DefaultRequestContentType(restful.MIME_JSON)
|
||||||
func DefaultRequestContentType(mime string) {
|
func DefaultRequestContentType(mime string) {
|
||||||
defaultRequestContentType = mime
|
defaultRequestContentType = mime
|
||||||
}
|
}
|
||||||
@ -48,7 +49,7 @@ func (r *Request) PathParameters() map[string]string {
|
|||||||
|
|
||||||
// QueryParameter returns the (first) Query parameter value by its name
|
// QueryParameter returns the (first) Query parameter value by its name
|
||||||
func (r *Request) QueryParameter(name string) string {
|
func (r *Request) QueryParameter(name string) string {
|
||||||
return r.Request.FormValue(name)
|
return r.Request.URL.Query().Get(name)
|
||||||
}
|
}
|
||||||
|
|
||||||
// QueryParameters returns the all the query parameters values by name
|
// QueryParameters returns the all the query parameters values by name
|
||||||
|
3
vendor/github.com/emicklei/go-restful/v3/response.go
generated
vendored
3
vendor/github.com/emicklei/go-restful/v3/response.go
generated
vendored
@ -109,6 +109,9 @@ func (r *Response) EntityWriter() (EntityReaderWriter, bool) {
|
|||||||
if DefaultResponseMimeType == MIME_XML {
|
if DefaultResponseMimeType == MIME_XML {
|
||||||
return entityAccessRegistry.accessorAt(MIME_XML)
|
return entityAccessRegistry.accessorAt(MIME_XML)
|
||||||
}
|
}
|
||||||
|
if DefaultResponseMimeType == MIME_ZIP {
|
||||||
|
return entityAccessRegistry.accessorAt(MIME_ZIP)
|
||||||
|
}
|
||||||
// Fallback to whatever the route says it can produce.
|
// Fallback to whatever the route says it can produce.
|
||||||
// https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html
|
// https://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html
|
||||||
for _, each := range r.routeProduces {
|
for _, each := range r.routeProduces {
|
||||||
|
4
vendor/github.com/emicklei/go-restful/v3/route.go
generated
vendored
4
vendor/github.com/emicklei/go-restful/v3/route.go
generated
vendored
@ -164,7 +164,7 @@ func tokenizePath(path string) []string {
|
|||||||
if "/" == path {
|
if "/" == path {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
return strings.Split(strings.Trim(path, "/"), "/")
|
return strings.Split(strings.TrimLeft(path, "/"), "/")
|
||||||
}
|
}
|
||||||
|
|
||||||
// for debugging
|
// for debugging
|
||||||
@ -176,3 +176,5 @@ func (r *Route) String() string {
|
|||||||
func (r *Route) EnableContentEncoding(enabled bool) {
|
func (r *Route) EnableContentEncoding(enabled bool) {
|
||||||
r.contentEncodingEnabled = &enabled
|
r.contentEncodingEnabled = &enabled
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var TrimRightSlashEnabled = false
|
||||||
|
12
vendor/github.com/emicklei/go-restful/v3/route_builder.go
generated
vendored
12
vendor/github.com/emicklei/go-restful/v3/route_builder.go
generated
vendored
@ -7,6 +7,7 @@ package restful
|
|||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"os"
|
"os"
|
||||||
|
"path"
|
||||||
"reflect"
|
"reflect"
|
||||||
"runtime"
|
"runtime"
|
||||||
"strings"
|
"strings"
|
||||||
@ -46,11 +47,12 @@ type RouteBuilder struct {
|
|||||||
// Do evaluates each argument with the RouteBuilder itself.
|
// Do evaluates each argument with the RouteBuilder itself.
|
||||||
// This allows you to follow DRY principles without breaking the fluent programming style.
|
// This allows you to follow DRY principles without breaking the fluent programming style.
|
||||||
// Example:
|
// Example:
|
||||||
// ws.Route(ws.DELETE("/{name}").To(t.deletePerson).Do(Returns200, Returns500))
|
|
||||||
//
|
//
|
||||||
// func Returns500(b *RouteBuilder) {
|
// ws.Route(ws.DELETE("/{name}").To(t.deletePerson).Do(Returns200, Returns500))
|
||||||
// b.Returns(500, "Internal Server Error", restful.ServiceError{})
|
//
|
||||||
// }
|
// func Returns500(b *RouteBuilder) {
|
||||||
|
// b.Returns(500, "Internal Server Error", restful.ServiceError{})
|
||||||
|
// }
|
||||||
func (b *RouteBuilder) Do(oneArgBlocks ...func(*RouteBuilder)) *RouteBuilder {
|
func (b *RouteBuilder) Do(oneArgBlocks ...func(*RouteBuilder)) *RouteBuilder {
|
||||||
for _, each := range oneArgBlocks {
|
for _, each := range oneArgBlocks {
|
||||||
each(b)
|
each(b)
|
||||||
@ -352,7 +354,7 @@ func (b *RouteBuilder) Build() Route {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func concatPath(path1, path2 string) string {
|
func concatPath(path1, path2 string) string {
|
||||||
return strings.TrimRight(path1, "/") + "/" + strings.TrimLeft(path2, "/")
|
return path.Join(path1, path2)
|
||||||
}
|
}
|
||||||
|
|
||||||
var anonymousFuncCount int32
|
var anonymousFuncCount int32
|
||||||
|
12
vendor/github.com/emicklei/go-restful/v3/web_service.go
generated
vendored
12
vendor/github.com/emicklei/go-restful/v3/web_service.go
generated
vendored
@ -165,6 +165,18 @@ func FormParameter(name, description string) *Parameter {
|
|||||||
return p
|
return p
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// MultiPartFormParameter creates a new Parameter of kind Form (using multipart/form-data) for documentation purposes.
|
||||||
|
// It is initialized as required with string as its DataType.
|
||||||
|
func (w *WebService) MultiPartFormParameter(name, description string) *Parameter {
|
||||||
|
return MultiPartFormParameter(name, description)
|
||||||
|
}
|
||||||
|
|
||||||
|
func MultiPartFormParameter(name, description string) *Parameter {
|
||||||
|
p := &Parameter{&ParameterData{Name: name, Description: description, Required: false, DataType: "string"}}
|
||||||
|
p.beMultiPartForm()
|
||||||
|
return p
|
||||||
|
}
|
||||||
|
|
||||||
// Route creates a new Route using the RouteBuilder and add to the ordered list of Routes.
|
// Route creates a new Route using the RouteBuilder and add to the ordered list of Routes.
|
||||||
func (w *WebService) Route(builder *RouteBuilder) *WebService {
|
func (w *WebService) Route(builder *RouteBuilder) *WebService {
|
||||||
w.routesLock.Lock()
|
w.routesLock.Lock()
|
||||||
|
63
vendor/github.com/go-openapi/jsonreference/internal/normalize_url.go
generated
vendored
Normal file
63
vendor/github.com/go-openapi/jsonreference/internal/normalize_url.go
generated
vendored
Normal file
@ -0,0 +1,63 @@
|
|||||||
|
package internal
|
||||||
|
|
||||||
|
import (
|
||||||
|
"net/url"
|
||||||
|
"regexp"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
const (
|
||||||
|
defaultHttpPort = ":80"
|
||||||
|
defaultHttpsPort = ":443"
|
||||||
|
)
|
||||||
|
|
||||||
|
// Regular expressions used by the normalizations
|
||||||
|
var rxPort = regexp.MustCompile(`(:\d+)/?$`)
|
||||||
|
var rxDupSlashes = regexp.MustCompile(`/{2,}`)
|
||||||
|
|
||||||
|
// NormalizeURL will normalize the specified URL
|
||||||
|
// This was added to replace a previous call to the no longer maintained purell library:
|
||||||
|
// The call that was used looked like the following:
|
||||||
|
// url.Parse(purell.NormalizeURL(parsed, purell.FlagsSafe|purell.FlagRemoveDuplicateSlashes))
|
||||||
|
//
|
||||||
|
// To explain all that was included in the call above, purell.FlagsSafe was really just the following:
|
||||||
|
// - FlagLowercaseScheme
|
||||||
|
// - FlagLowercaseHost
|
||||||
|
// - FlagRemoveDefaultPort
|
||||||
|
// - FlagRemoveDuplicateSlashes (and this was mixed in with the |)
|
||||||
|
func NormalizeURL(u *url.URL) {
|
||||||
|
lowercaseScheme(u)
|
||||||
|
lowercaseHost(u)
|
||||||
|
removeDefaultPort(u)
|
||||||
|
removeDuplicateSlashes(u)
|
||||||
|
}
|
||||||
|
|
||||||
|
func lowercaseScheme(u *url.URL) {
|
||||||
|
if len(u.Scheme) > 0 {
|
||||||
|
u.Scheme = strings.ToLower(u.Scheme)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func lowercaseHost(u *url.URL) {
|
||||||
|
if len(u.Host) > 0 {
|
||||||
|
u.Host = strings.ToLower(u.Host)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func removeDefaultPort(u *url.URL) {
|
||||||
|
if len(u.Host) > 0 {
|
||||||
|
scheme := strings.ToLower(u.Scheme)
|
||||||
|
u.Host = rxPort.ReplaceAllStringFunc(u.Host, func(val string) string {
|
||||||
|
if (scheme == "http" && val == defaultHttpPort) || (scheme == "https" && val == defaultHttpsPort) {
|
||||||
|
return ""
|
||||||
|
}
|
||||||
|
return val
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func removeDuplicateSlashes(u *url.URL) {
|
||||||
|
if len(u.Path) > 0 {
|
||||||
|
u.Path = rxDupSlashes.ReplaceAllString(u.Path, "/")
|
||||||
|
}
|
||||||
|
}
|
6
vendor/github.com/go-openapi/jsonreference/reference.go
generated
vendored
6
vendor/github.com/go-openapi/jsonreference/reference.go
generated
vendored
@ -30,8 +30,8 @@ import (
|
|||||||
"net/url"
|
"net/url"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/PuerkitoBio/purell"
|
|
||||||
"github.com/go-openapi/jsonpointer"
|
"github.com/go-openapi/jsonpointer"
|
||||||
|
"github.com/go-openapi/jsonreference/internal"
|
||||||
)
|
)
|
||||||
|
|
||||||
const (
|
const (
|
||||||
@ -114,7 +114,9 @@ func (r *Ref) parse(jsonReferenceString string) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
r.referenceURL, _ = url.Parse(purell.NormalizeURL(parsed, purell.FlagsSafe|purell.FlagRemoveDuplicateSlashes))
|
internal.NormalizeURL(parsed)
|
||||||
|
|
||||||
|
r.referenceURL = parsed
|
||||||
refURL := r.referenceURL
|
refURL := r.referenceURL
|
||||||
|
|
||||||
if refURL.Scheme != "" && refURL.Host != "" {
|
if refURL.Scheme != "" && refURL.Host != "" {
|
||||||
|
11
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/parse.go
generated
vendored
11
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule/parse.go
generated
vendored
@ -275,11 +275,12 @@ func (p *parser) accept(term termType) (string, error) {
|
|||||||
// expectPChars determines if "t" consists of only pchars defined in RFC3986.
|
// expectPChars determines if "t" consists of only pchars defined in RFC3986.
|
||||||
//
|
//
|
||||||
// https://www.ietf.org/rfc/rfc3986.txt, P.49
|
// https://www.ietf.org/rfc/rfc3986.txt, P.49
|
||||||
// pchar = unreserved / pct-encoded / sub-delims / ":" / "@"
|
//
|
||||||
// unreserved = ALPHA / DIGIT / "-" / "." / "_" / "~"
|
// pchar = unreserved / pct-encoded / sub-delims / ":" / "@"
|
||||||
// sub-delims = "!" / "$" / "&" / "'" / "(" / ")"
|
// unreserved = ALPHA / DIGIT / "-" / "." / "_" / "~"
|
||||||
// / "*" / "+" / "," / ";" / "="
|
// sub-delims = "!" / "$" / "&" / "'" / "(" / ")"
|
||||||
// pct-encoded = "%" HEXDIG HEXDIG
|
// / "*" / "+" / "," / ";" / "="
|
||||||
|
// pct-encoded = "%" HEXDIG HEXDIG
|
||||||
func expectPChars(t string) error {
|
func expectPChars(t string) error {
|
||||||
const (
|
const (
|
||||||
init = iota
|
init = iota
|
||||||
|
6
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/BUILD.bazel
generated
vendored
6
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/BUILD.bazel
generated
vendored
@ -30,6 +30,7 @@ go_library(
|
|||||||
"@io_bazel_rules_go//proto/wkt:field_mask_go_proto",
|
"@io_bazel_rules_go//proto/wkt:field_mask_go_proto",
|
||||||
"@org_golang_google_grpc//codes",
|
"@org_golang_google_grpc//codes",
|
||||||
"@org_golang_google_grpc//grpclog",
|
"@org_golang_google_grpc//grpclog",
|
||||||
|
"@org_golang_google_grpc//health/grpc_health_v1",
|
||||||
"@org_golang_google_grpc//metadata",
|
"@org_golang_google_grpc//metadata",
|
||||||
"@org_golang_google_grpc//status",
|
"@org_golang_google_grpc//status",
|
||||||
"@org_golang_google_protobuf//encoding/protojson",
|
"@org_golang_google_protobuf//encoding/protojson",
|
||||||
@ -37,6 +38,7 @@ go_library(
|
|||||||
"@org_golang_google_protobuf//reflect/protoreflect",
|
"@org_golang_google_protobuf//reflect/protoreflect",
|
||||||
"@org_golang_google_protobuf//reflect/protoregistry",
|
"@org_golang_google_protobuf//reflect/protoregistry",
|
||||||
"@org_golang_google_protobuf//types/known/durationpb",
|
"@org_golang_google_protobuf//types/known/durationpb",
|
||||||
|
"@org_golang_google_protobuf//types/known/structpb",
|
||||||
"@org_golang_google_protobuf//types/known/timestamppb",
|
"@org_golang_google_protobuf//types/known/timestamppb",
|
||||||
"@org_golang_google_protobuf//types/known/wrapperspb",
|
"@org_golang_google_protobuf//types/known/wrapperspb",
|
||||||
],
|
],
|
||||||
@ -56,8 +58,10 @@ go_test(
|
|||||||
"marshal_jsonpb_test.go",
|
"marshal_jsonpb_test.go",
|
||||||
"marshal_proto_test.go",
|
"marshal_proto_test.go",
|
||||||
"marshaler_registry_test.go",
|
"marshaler_registry_test.go",
|
||||||
|
"mux_internal_test.go",
|
||||||
"mux_test.go",
|
"mux_test.go",
|
||||||
"pattern_test.go",
|
"pattern_test.go",
|
||||||
|
"query_fuzz_test.go",
|
||||||
"query_test.go",
|
"query_test.go",
|
||||||
],
|
],
|
||||||
embed = [":runtime"],
|
embed = [":runtime"],
|
||||||
@ -70,7 +74,9 @@ go_test(
|
|||||||
"@go_googleapis//google/rpc:errdetails_go_proto",
|
"@go_googleapis//google/rpc:errdetails_go_proto",
|
||||||
"@go_googleapis//google/rpc:status_go_proto",
|
"@go_googleapis//google/rpc:status_go_proto",
|
||||||
"@io_bazel_rules_go//proto/wkt:field_mask_go_proto",
|
"@io_bazel_rules_go//proto/wkt:field_mask_go_proto",
|
||||||
|
"@org_golang_google_grpc//:go_default_library",
|
||||||
"@org_golang_google_grpc//codes",
|
"@org_golang_google_grpc//codes",
|
||||||
|
"@org_golang_google_grpc//health/grpc_health_v1",
|
||||||
"@org_golang_google_grpc//metadata",
|
"@org_golang_google_grpc//metadata",
|
||||||
"@org_golang_google_grpc//status",
|
"@org_golang_google_grpc//status",
|
||||||
"@org_golang_google_protobuf//encoding/protojson",
|
"@org_golang_google_protobuf//encoding/protojson",
|
||||||
|
19
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/context.go
generated
vendored
19
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/context.go
generated
vendored
@ -41,6 +41,12 @@ var (
|
|||||||
DefaultContextTimeout = 0 * time.Second
|
DefaultContextTimeout = 0 * time.Second
|
||||||
)
|
)
|
||||||
|
|
||||||
|
// malformedHTTPHeaders lists the headers that the gRPC server may reject outright as malformed.
|
||||||
|
// See https://github.com/grpc/grpc-go/pull/4803#issuecomment-986093310 for more context.
|
||||||
|
var malformedHTTPHeaders = map[string]struct{}{
|
||||||
|
"connection": {},
|
||||||
|
}
|
||||||
|
|
||||||
type (
|
type (
|
||||||
rpcMethodKey struct{}
|
rpcMethodKey struct{}
|
||||||
httpPathPatternKey struct{}
|
httpPathPatternKey struct{}
|
||||||
@ -172,11 +178,17 @@ type serverMetadataKey struct{}
|
|||||||
|
|
||||||
// NewServerMetadataContext creates a new context with ServerMetadata
|
// NewServerMetadataContext creates a new context with ServerMetadata
|
||||||
func NewServerMetadataContext(ctx context.Context, md ServerMetadata) context.Context {
|
func NewServerMetadataContext(ctx context.Context, md ServerMetadata) context.Context {
|
||||||
|
if ctx == nil {
|
||||||
|
ctx = context.Background()
|
||||||
|
}
|
||||||
return context.WithValue(ctx, serverMetadataKey{}, md)
|
return context.WithValue(ctx, serverMetadataKey{}, md)
|
||||||
}
|
}
|
||||||
|
|
||||||
// ServerMetadataFromContext returns the ServerMetadata in ctx
|
// ServerMetadataFromContext returns the ServerMetadata in ctx
|
||||||
func ServerMetadataFromContext(ctx context.Context) (md ServerMetadata, ok bool) {
|
func ServerMetadataFromContext(ctx context.Context) (md ServerMetadata, ok bool) {
|
||||||
|
if ctx == nil {
|
||||||
|
return md, false
|
||||||
|
}
|
||||||
md, ok = ctx.Value(serverMetadataKey{}).(ServerMetadata)
|
md, ok = ctx.Value(serverMetadataKey{}).(ServerMetadata)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
@ -308,6 +320,13 @@ func isPermanentHTTPHeader(hdr string) bool {
|
|||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// isMalformedHTTPHeader checks whether header belongs to the list of
|
||||||
|
// "malformed headers" and would be rejected by the gRPC server.
|
||||||
|
func isMalformedHTTPHeader(header string) bool {
|
||||||
|
_, isMalformed := malformedHTTPHeaders[strings.ToLower(header)]
|
||||||
|
return isMalformed
|
||||||
|
}
|
||||||
|
|
||||||
// RPCMethod returns the method string for the server context. The returned
|
// RPCMethod returns the method string for the server context. The returned
|
||||||
// string is in the format of "/package.service/method".
|
// string is in the format of "/package.service/method".
|
||||||
func RPCMethod(ctx context.Context) (string, bool) {
|
func RPCMethod(ctx context.Context) (string, bool) {
|
||||||
|
2
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/convert.go
generated
vendored
2
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/convert.go
generated
vendored
@ -265,7 +265,7 @@ func EnumSlice(val, sep string, enumValMap map[string]int32) ([]int32, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
Support fot google.protobuf.wrappers on top of primitive types
|
Support for google.protobuf.wrappers on top of primitive types
|
||||||
*/
|
*/
|
||||||
|
|
||||||
// StringValue well-known type support as wrapper around string type
|
// StringValue well-known type support as wrapper around string type
|
||||||
|
9
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go
generated
vendored
9
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/errors.go
generated
vendored
@ -162,10 +162,11 @@ func DefaultStreamErrorHandler(_ context.Context, err error) *status.Status {
|
|||||||
|
|
||||||
// DefaultRoutingErrorHandler is our default handler for routing errors.
|
// DefaultRoutingErrorHandler is our default handler for routing errors.
|
||||||
// By default http error codes mapped on the following error codes:
|
// By default http error codes mapped on the following error codes:
|
||||||
// NotFound -> grpc.NotFound
|
//
|
||||||
// StatusBadRequest -> grpc.InvalidArgument
|
// NotFound -> grpc.NotFound
|
||||||
// MethodNotAllowed -> grpc.Unimplemented
|
// StatusBadRequest -> grpc.InvalidArgument
|
||||||
// Other -> grpc.Internal, method is not expecting to be called for anything else
|
// MethodNotAllowed -> grpc.Unimplemented
|
||||||
|
// Other -> grpc.Internal, method is not expecting to be called for anything else
|
||||||
func DefaultRoutingErrorHandler(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, r *http.Request, httpStatus int) {
|
func DefaultRoutingErrorHandler(ctx context.Context, mux *ServeMux, marshaler Marshaler, w http.ResponseWriter, r *http.Request, httpStatus int) {
|
||||||
sterr := status.Error(codes.Internal, "Unexpected routing error")
|
sterr := status.Error(codes.Internal, "Unexpected routing error")
|
||||||
switch httpStatus {
|
switch httpStatus {
|
||||||
|
2
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/fieldmask.go
generated
vendored
2
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/fieldmask.go
generated
vendored
@ -53,7 +53,7 @@ func FieldMaskFromRequestBody(r io.Reader, msg proto.Message) (*field_mask.Field
|
|||||||
}
|
}
|
||||||
|
|
||||||
if isDynamicProtoMessage(fd.Message()) {
|
if isDynamicProtoMessage(fd.Message()) {
|
||||||
for _, p := range buildPathsBlindly(k, v) {
|
for _, p := range buildPathsBlindly(string(fd.FullName().Name()), v) {
|
||||||
newPath := p
|
newPath := p
|
||||||
if item.path != "" {
|
if item.path != "" {
|
||||||
newPath = item.path + "." + newPath
|
newPath = item.path + "." + newPath
|
||||||
|
12
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go
generated
vendored
12
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/handler.go
generated
vendored
@ -52,11 +52,11 @@ func ForwardResponseStream(ctx context.Context, mux *ServeMux, marshaler Marshal
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
if err != nil {
|
if err != nil {
|
||||||
handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err)
|
handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err, delimiter)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if err := handleForwardResponseOptions(ctx, w, resp, opts); err != nil {
|
if err := handleForwardResponseOptions(ctx, w, resp, opts); err != nil {
|
||||||
handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err)
|
handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err, delimiter)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -82,7 +82,7 @@ func ForwardResponseStream(ctx context.Context, mux *ServeMux, marshaler Marshal
|
|||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
grpclog.Infof("Failed to marshal response chunk: %v", err)
|
grpclog.Infof("Failed to marshal response chunk: %v", err)
|
||||||
handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err)
|
handleForwardResponseStreamError(ctx, wroteHeader, marshaler, w, req, mux, err, delimiter)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
if _, err = w.Write(buf); err != nil {
|
if _, err = w.Write(buf); err != nil {
|
||||||
@ -200,7 +200,7 @@ func handleForwardResponseOptions(ctx context.Context, w http.ResponseWriter, re
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func handleForwardResponseStreamError(ctx context.Context, wroteHeader bool, marshaler Marshaler, w http.ResponseWriter, req *http.Request, mux *ServeMux, err error) {
|
func handleForwardResponseStreamError(ctx context.Context, wroteHeader bool, marshaler Marshaler, w http.ResponseWriter, req *http.Request, mux *ServeMux, err error, delimiter []byte) {
|
||||||
st := mux.streamErrorHandler(ctx, err)
|
st := mux.streamErrorHandler(ctx, err)
|
||||||
msg := errorChunk(st)
|
msg := errorChunk(st)
|
||||||
if !wroteHeader {
|
if !wroteHeader {
|
||||||
@ -216,6 +216,10 @@ func handleForwardResponseStreamError(ctx context.Context, wroteHeader bool, mar
|
|||||||
grpclog.Infof("Failed to notify error to client: %v", werr)
|
grpclog.Infof("Failed to notify error to client: %v", werr)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
if _, derr := w.Write(delimiter); derr != nil {
|
||||||
|
grpclog.Infof("Failed to send delimiter chunk: %v", err)
|
||||||
|
return
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
func errorChunk(st *status.Status) map[string]proto.Message {
|
func errorChunk(st *status.Status) map[string]proto.Message {
|
||||||
|
11
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_jsonpb.go
generated
vendored
11
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/marshal_jsonpb.go
generated
vendored
@ -280,6 +280,17 @@ func decodeNonProtoField(d *json.Decoder, unmarshaler protojson.UnmarshalOptions
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
if rv.Kind() == reflect.Slice {
|
if rv.Kind() == reflect.Slice {
|
||||||
|
if rv.Type().Elem().Kind() == reflect.Uint8 {
|
||||||
|
var sl []byte
|
||||||
|
if err := d.Decode(&sl); err != nil {
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
if sl != nil {
|
||||||
|
rv.SetBytes(sl)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
var sl []json.RawMessage
|
var sl []json.RawMessage
|
||||||
if err := d.Decode(&sl); err != nil {
|
if err := d.Decode(&sl); err != nil {
|
||||||
return err
|
return err
|
||||||
|
98
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go
generated
vendored
98
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/mux.go
generated
vendored
@ -6,10 +6,13 @@ import (
|
|||||||
"fmt"
|
"fmt"
|
||||||
"net/http"
|
"net/http"
|
||||||
"net/textproto"
|
"net/textproto"
|
||||||
|
"regexp"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
"github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule"
|
"github.com/grpc-ecosystem/grpc-gateway/v2/internal/httprule"
|
||||||
"google.golang.org/grpc/codes"
|
"google.golang.org/grpc/codes"
|
||||||
|
"google.golang.org/grpc/grpclog"
|
||||||
|
"google.golang.org/grpc/health/grpc_health_v1"
|
||||||
"google.golang.org/grpc/metadata"
|
"google.golang.org/grpc/metadata"
|
||||||
"google.golang.org/grpc/status"
|
"google.golang.org/grpc/status"
|
||||||
"google.golang.org/protobuf/proto"
|
"google.golang.org/protobuf/proto"
|
||||||
@ -23,15 +26,15 @@ const (
|
|||||||
// path string before doing any routing.
|
// path string before doing any routing.
|
||||||
UnescapingModeLegacy UnescapingMode = iota
|
UnescapingModeLegacy UnescapingMode = iota
|
||||||
|
|
||||||
// EscapingTypeExceptReserved unescapes all path parameters except RFC 6570
|
// UnescapingModeAllExceptReserved unescapes all path parameters except RFC 6570
|
||||||
// reserved characters.
|
// reserved characters.
|
||||||
UnescapingModeAllExceptReserved
|
UnescapingModeAllExceptReserved
|
||||||
|
|
||||||
// EscapingTypeExceptSlash unescapes URL path parameters except path
|
// UnescapingModeAllExceptSlash unescapes URL path parameters except path
|
||||||
// seperators, which will be left as "%2F".
|
// separators, which will be left as "%2F".
|
||||||
UnescapingModeAllExceptSlash
|
UnescapingModeAllExceptSlash
|
||||||
|
|
||||||
// URL path parameters will be fully decoded.
|
// UnescapingModeAllCharacters unescapes all URL path parameters.
|
||||||
UnescapingModeAllCharacters
|
UnescapingModeAllCharacters
|
||||||
|
|
||||||
// UnescapingModeDefault is the default escaping type.
|
// UnescapingModeDefault is the default escaping type.
|
||||||
@ -40,6 +43,10 @@ const (
|
|||||||
UnescapingModeDefault = UnescapingModeLegacy
|
UnescapingModeDefault = UnescapingModeLegacy
|
||||||
)
|
)
|
||||||
|
|
||||||
|
var (
|
||||||
|
encodedPathSplitter = regexp.MustCompile("(/|%2F)")
|
||||||
|
)
|
||||||
|
|
||||||
// A HandlerFunc handles a specific pair of path pattern and HTTP method.
|
// A HandlerFunc handles a specific pair of path pattern and HTTP method.
|
||||||
type HandlerFunc func(w http.ResponseWriter, r *http.Request, pathParams map[string]string)
|
type HandlerFunc func(w http.ResponseWriter, r *http.Request, pathParams map[string]string)
|
||||||
|
|
||||||
@ -113,11 +120,30 @@ func DefaultHeaderMatcher(key string) (string, bool) {
|
|||||||
// This matcher will be called with each header in http.Request. If matcher returns true, that header will be
|
// This matcher will be called with each header in http.Request. If matcher returns true, that header will be
|
||||||
// passed to gRPC context. To transform the header before passing to gRPC context, matcher should return modified header.
|
// passed to gRPC context. To transform the header before passing to gRPC context, matcher should return modified header.
|
||||||
func WithIncomingHeaderMatcher(fn HeaderMatcherFunc) ServeMuxOption {
|
func WithIncomingHeaderMatcher(fn HeaderMatcherFunc) ServeMuxOption {
|
||||||
|
for _, header := range fn.matchedMalformedHeaders() {
|
||||||
|
grpclog.Warningf("The configured forwarding filter would allow %q to be sent to the gRPC server, which will likely cause errors. See https://github.com/grpc/grpc-go/pull/4803#issuecomment-986093310 for more information.", header)
|
||||||
|
}
|
||||||
|
|
||||||
return func(mux *ServeMux) {
|
return func(mux *ServeMux) {
|
||||||
mux.incomingHeaderMatcher = fn
|
mux.incomingHeaderMatcher = fn
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// matchedMalformedHeaders returns the malformed headers that would be forwarded to gRPC server.
|
||||||
|
func (fn HeaderMatcherFunc) matchedMalformedHeaders() []string {
|
||||||
|
if fn == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
headers := make([]string, 0)
|
||||||
|
for header := range malformedHTTPHeaders {
|
||||||
|
out, accept := fn(header)
|
||||||
|
if accept && isMalformedHTTPHeader(out) {
|
||||||
|
headers = append(headers, out)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return headers
|
||||||
|
}
|
||||||
|
|
||||||
// WithOutgoingHeaderMatcher returns a ServeMuxOption representing a headerMatcher for outgoing response from gateway.
|
// WithOutgoingHeaderMatcher returns a ServeMuxOption representing a headerMatcher for outgoing response from gateway.
|
||||||
//
|
//
|
||||||
// This matcher will be called with each header in response header metadata. If matcher returns true, that header will be
|
// This matcher will be called with each header in response header metadata. If matcher returns true, that header will be
|
||||||
@ -179,6 +205,57 @@ func WithDisablePathLengthFallback() ServeMuxOption {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// WithHealthEndpointAt returns a ServeMuxOption that will add an endpoint to the created ServeMux at the path specified by endpointPath.
|
||||||
|
// When called the handler will forward the request to the upstream grpc service health check (defined in the
|
||||||
|
// gRPC Health Checking Protocol).
|
||||||
|
//
|
||||||
|
// See here https://grpc-ecosystem.github.io/grpc-gateway/docs/operations/health_check/ for more information on how
|
||||||
|
// to setup the protocol in the grpc server.
|
||||||
|
//
|
||||||
|
// If you define a service as query parameter, this will also be forwarded as service in the HealthCheckRequest.
|
||||||
|
func WithHealthEndpointAt(healthCheckClient grpc_health_v1.HealthClient, endpointPath string) ServeMuxOption {
|
||||||
|
return func(s *ServeMux) {
|
||||||
|
// error can be ignored since pattern is definitely valid
|
||||||
|
_ = s.HandlePath(
|
||||||
|
http.MethodGet, endpointPath, func(w http.ResponseWriter, r *http.Request, _ map[string]string,
|
||||||
|
) {
|
||||||
|
_, outboundMarshaler := MarshalerForRequest(s, r)
|
||||||
|
|
||||||
|
resp, err := healthCheckClient.Check(r.Context(), &grpc_health_v1.HealthCheckRequest{
|
||||||
|
Service: r.URL.Query().Get("service"),
|
||||||
|
})
|
||||||
|
if err != nil {
|
||||||
|
s.errorHandler(r.Context(), s, outboundMarshaler, w, r, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
w.Header().Set("Content-Type", "application/json")
|
||||||
|
|
||||||
|
if resp.GetStatus() != grpc_health_v1.HealthCheckResponse_SERVING {
|
||||||
|
var err error
|
||||||
|
switch resp.GetStatus() {
|
||||||
|
case grpc_health_v1.HealthCheckResponse_NOT_SERVING, grpc_health_v1.HealthCheckResponse_UNKNOWN:
|
||||||
|
err = status.Error(codes.Unavailable, resp.String())
|
||||||
|
case grpc_health_v1.HealthCheckResponse_SERVICE_UNKNOWN:
|
||||||
|
err = status.Error(codes.NotFound, resp.String())
|
||||||
|
}
|
||||||
|
|
||||||
|
s.errorHandler(r.Context(), s, outboundMarshaler, w, r, err)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
_ = outboundMarshaler.NewEncoder(w).Encode(resp)
|
||||||
|
})
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// WithHealthzEndpoint returns a ServeMuxOption that will add a /healthz endpoint to the created ServeMux.
|
||||||
|
//
|
||||||
|
// See WithHealthEndpointAt for the general implementation.
|
||||||
|
func WithHealthzEndpoint(healthCheckClient grpc_health_v1.HealthClient) ServeMuxOption {
|
||||||
|
return WithHealthEndpointAt(healthCheckClient, "/healthz")
|
||||||
|
}
|
||||||
|
|
||||||
// NewServeMux returns a new ServeMux whose internal mapping is empty.
|
// NewServeMux returns a new ServeMux whose internal mapping is empty.
|
||||||
func NewServeMux(opts ...ServeMuxOption) *ServeMux {
|
func NewServeMux(opts ...ServeMuxOption) *ServeMux {
|
||||||
serveMux := &ServeMux{
|
serveMux := &ServeMux{
|
||||||
@ -229,7 +306,7 @@ func (s *ServeMux) HandlePath(meth string, pathPattern string, h HandlerFunc) er
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
// ServeHTTP dispatches the request to the first handler whose pattern matches to r.Method and r.Path.
|
// ServeHTTP dispatches the request to the first handler whose pattern matches to r.Method and r.URL.Path.
|
||||||
func (s *ServeMux) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
func (s *ServeMux) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
||||||
ctx := r.Context()
|
ctx := r.Context()
|
||||||
|
|
||||||
@ -245,7 +322,16 @@ func (s *ServeMux) ServeHTTP(w http.ResponseWriter, r *http.Request) {
|
|||||||
path = r.URL.RawPath
|
path = r.URL.RawPath
|
||||||
}
|
}
|
||||||
|
|
||||||
components := strings.Split(path[1:], "/")
|
var components []string
|
||||||
|
// since in UnescapeModeLegacy, the URL will already have been fully unescaped, if we also split on "%2F"
|
||||||
|
// in this escaping mode we would be double unescaping but in UnescapingModeAllCharacters, we still do as the
|
||||||
|
// path is the RawPath (i.e. unescaped). That does mean that the behavior of this function will change its default
|
||||||
|
// behavior when the UnescapingModeDefault gets changed from UnescapingModeLegacy to UnescapingModeAllExceptReserved
|
||||||
|
if s.unescapingMode == UnescapingModeAllCharacters {
|
||||||
|
components = encodedPathSplitter.Split(path[1:], -1)
|
||||||
|
} else {
|
||||||
|
components = strings.Split(path[1:], "/")
|
||||||
|
}
|
||||||
|
|
||||||
if override := r.Header.Get("X-HTTP-Method-Override"); override != "" && s.isPathLengthFallback(r) {
|
if override := r.Header.Get("X-HTTP-Method-Override"); override != "" && s.isPathLengthFallback(r) {
|
||||||
r.Method = strings.ToUpper(override)
|
r.Method = strings.ToUpper(override)
|
||||||
|
37
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/query.go
generated
vendored
37
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/runtime/query.go
generated
vendored
@ -1,7 +1,6 @@
|
|||||||
package runtime
|
package runtime
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"encoding/base64"
|
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
"net/url"
|
"net/url"
|
||||||
@ -13,17 +12,19 @@ import (
|
|||||||
"github.com/grpc-ecosystem/grpc-gateway/v2/utilities"
|
"github.com/grpc-ecosystem/grpc-gateway/v2/utilities"
|
||||||
"google.golang.org/genproto/protobuf/field_mask"
|
"google.golang.org/genproto/protobuf/field_mask"
|
||||||
"google.golang.org/grpc/grpclog"
|
"google.golang.org/grpc/grpclog"
|
||||||
|
"google.golang.org/protobuf/encoding/protojson"
|
||||||
"google.golang.org/protobuf/proto"
|
"google.golang.org/protobuf/proto"
|
||||||
"google.golang.org/protobuf/reflect/protoreflect"
|
"google.golang.org/protobuf/reflect/protoreflect"
|
||||||
"google.golang.org/protobuf/reflect/protoregistry"
|
"google.golang.org/protobuf/reflect/protoregistry"
|
||||||
"google.golang.org/protobuf/types/known/durationpb"
|
"google.golang.org/protobuf/types/known/durationpb"
|
||||||
|
"google.golang.org/protobuf/types/known/structpb"
|
||||||
"google.golang.org/protobuf/types/known/timestamppb"
|
"google.golang.org/protobuf/types/known/timestamppb"
|
||||||
"google.golang.org/protobuf/types/known/wrapperspb"
|
"google.golang.org/protobuf/types/known/wrapperspb"
|
||||||
)
|
)
|
||||||
|
|
||||||
var valuesKeyRegexp = regexp.MustCompile(`^(.*)\[(.*)\]$`)
|
var valuesKeyRegexp = regexp.MustCompile(`^(.*)\[(.*)\]$`)
|
||||||
|
|
||||||
var currentQueryParser QueryParameterParser = &defaultQueryParser{}
|
var currentQueryParser QueryParameterParser = &DefaultQueryParser{}
|
||||||
|
|
||||||
// QueryParameterParser defines interface for all query parameter parsers
|
// QueryParameterParser defines interface for all query parameter parsers
|
||||||
type QueryParameterParser interface {
|
type QueryParameterParser interface {
|
||||||
@ -36,11 +37,15 @@ func PopulateQueryParameters(msg proto.Message, values url.Values, filter *utili
|
|||||||
return currentQueryParser.Parse(msg, values, filter)
|
return currentQueryParser.Parse(msg, values, filter)
|
||||||
}
|
}
|
||||||
|
|
||||||
type defaultQueryParser struct{}
|
// DefaultQueryParser is a QueryParameterParser which implements the default
|
||||||
|
// query parameters parsing behavior.
|
||||||
|
//
|
||||||
|
// See https://github.com/grpc-ecosystem/grpc-gateway/issues/2632 for more context.
|
||||||
|
type DefaultQueryParser struct{}
|
||||||
|
|
||||||
// Parse populates "values" into "msg".
|
// Parse populates "values" into "msg".
|
||||||
// A value is ignored if its key starts with one of the elements in "filter".
|
// A value is ignored if its key starts with one of the elements in "filter".
|
||||||
func (*defaultQueryParser) Parse(msg proto.Message, values url.Values, filter *utilities.DoubleArray) error {
|
func (*DefaultQueryParser) Parse(msg proto.Message, values url.Values, filter *utilities.DoubleArray) error {
|
||||||
for key, values := range values {
|
for key, values := range values {
|
||||||
match := valuesKeyRegexp.FindStringSubmatch(key)
|
match := valuesKeyRegexp.FindStringSubmatch(key)
|
||||||
if len(match) == 3 {
|
if len(match) == 3 {
|
||||||
@ -234,7 +239,7 @@ func parseField(fieldDescriptor protoreflect.FieldDescriptor, value string) (pro
|
|||||||
case protoreflect.StringKind:
|
case protoreflect.StringKind:
|
||||||
return protoreflect.ValueOfString(value), nil
|
return protoreflect.ValueOfString(value), nil
|
||||||
case protoreflect.BytesKind:
|
case protoreflect.BytesKind:
|
||||||
v, err := base64.URLEncoding.DecodeString(value)
|
v, err := Bytes(value)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return protoreflect.Value{}, err
|
return protoreflect.Value{}, err
|
||||||
}
|
}
|
||||||
@ -250,18 +255,12 @@ func parseMessage(msgDescriptor protoreflect.MessageDescriptor, value string) (p
|
|||||||
var msg proto.Message
|
var msg proto.Message
|
||||||
switch msgDescriptor.FullName() {
|
switch msgDescriptor.FullName() {
|
||||||
case "google.protobuf.Timestamp":
|
case "google.protobuf.Timestamp":
|
||||||
if value == "null" {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
t, err := time.Parse(time.RFC3339Nano, value)
|
t, err := time.Parse(time.RFC3339Nano, value)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return protoreflect.Value{}, err
|
return protoreflect.Value{}, err
|
||||||
}
|
}
|
||||||
msg = timestamppb.New(t)
|
msg = timestamppb.New(t)
|
||||||
case "google.protobuf.Duration":
|
case "google.protobuf.Duration":
|
||||||
if value == "null" {
|
|
||||||
break
|
|
||||||
}
|
|
||||||
d, err := time.ParseDuration(value)
|
d, err := time.ParseDuration(value)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return protoreflect.Value{}, err
|
return protoreflect.Value{}, err
|
||||||
@ -312,7 +311,7 @@ func parseMessage(msgDescriptor protoreflect.MessageDescriptor, value string) (p
|
|||||||
case "google.protobuf.StringValue":
|
case "google.protobuf.StringValue":
|
||||||
msg = &wrapperspb.StringValue{Value: value}
|
msg = &wrapperspb.StringValue{Value: value}
|
||||||
case "google.protobuf.BytesValue":
|
case "google.protobuf.BytesValue":
|
||||||
v, err := base64.URLEncoding.DecodeString(value)
|
v, err := Bytes(value)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return protoreflect.Value{}, err
|
return protoreflect.Value{}, err
|
||||||
}
|
}
|
||||||
@ -321,6 +320,20 @@ func parseMessage(msgDescriptor protoreflect.MessageDescriptor, value string) (p
|
|||||||
fm := &field_mask.FieldMask{}
|
fm := &field_mask.FieldMask{}
|
||||||
fm.Paths = append(fm.Paths, strings.Split(value, ",")...)
|
fm.Paths = append(fm.Paths, strings.Split(value, ",")...)
|
||||||
msg = fm
|
msg = fm
|
||||||
|
case "google.protobuf.Value":
|
||||||
|
var v structpb.Value
|
||||||
|
err := protojson.Unmarshal([]byte(value), &v)
|
||||||
|
if err != nil {
|
||||||
|
return protoreflect.Value{}, err
|
||||||
|
}
|
||||||
|
msg = &v
|
||||||
|
case "google.protobuf.Struct":
|
||||||
|
var v structpb.Struct
|
||||||
|
err := protojson.Unmarshal([]byte(value), &v)
|
||||||
|
if err != nil {
|
||||||
|
return protoreflect.Value{}, err
|
||||||
|
}
|
||||||
|
msg = &v
|
||||||
default:
|
default:
|
||||||
return protoreflect.Value{}, fmt.Errorf("unsupported message type: %q", string(msgDescriptor.FullName()))
|
return protoreflect.Value{}, fmt.Errorf("unsupported message type: %q", string(msgDescriptor.FullName()))
|
||||||
}
|
}
|
||||||
|
6
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/BUILD.bazel
generated
vendored
6
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/BUILD.bazel
generated
vendored
@ -8,6 +8,7 @@ go_library(
|
|||||||
"doc.go",
|
"doc.go",
|
||||||
"pattern.go",
|
"pattern.go",
|
||||||
"readerfactory.go",
|
"readerfactory.go",
|
||||||
|
"string_array_flag.go",
|
||||||
"trie.go",
|
"trie.go",
|
||||||
],
|
],
|
||||||
importpath = "github.com/grpc-ecosystem/grpc-gateway/v2/utilities",
|
importpath = "github.com/grpc-ecosystem/grpc-gateway/v2/utilities",
|
||||||
@ -16,7 +17,10 @@ go_library(
|
|||||||
go_test(
|
go_test(
|
||||||
name = "utilities_test",
|
name = "utilities_test",
|
||||||
size = "small",
|
size = "small",
|
||||||
srcs = ["trie_test.go"],
|
srcs = [
|
||||||
|
"string_array_flag_test.go",
|
||||||
|
"trie_test.go",
|
||||||
|
],
|
||||||
deps = [":utilities"],
|
deps = [":utilities"],
|
||||||
)
|
)
|
||||||
|
|
||||||
|
33
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/string_array_flag.go
generated
vendored
Normal file
33
vendor/github.com/grpc-ecosystem/grpc-gateway/v2/utilities/string_array_flag.go
generated
vendored
Normal file
@ -0,0 +1,33 @@
|
|||||||
|
package utilities
|
||||||
|
|
||||||
|
import (
|
||||||
|
"flag"
|
||||||
|
"strings"
|
||||||
|
)
|
||||||
|
|
||||||
|
// flagInterface is an cut down interface to `flag`
|
||||||
|
type flagInterface interface {
|
||||||
|
Var(value flag.Value, name string, usage string)
|
||||||
|
}
|
||||||
|
|
||||||
|
// StringArrayFlag defines a flag with the specified name and usage string.
|
||||||
|
// The return value is the address of a `StringArrayFlags` variable that stores the repeated values of the flag.
|
||||||
|
func StringArrayFlag(f flagInterface, name string, usage string) *StringArrayFlags {
|
||||||
|
value := &StringArrayFlags{}
|
||||||
|
f.Var(value, name, usage)
|
||||||
|
return value
|
||||||
|
}
|
||||||
|
|
||||||
|
// StringArrayFlags is a wrapper of `[]string` to provider an interface for `flag.Var`
|
||||||
|
type StringArrayFlags []string
|
||||||
|
|
||||||
|
// String returns a string representation of `StringArrayFlags`
|
||||||
|
func (i *StringArrayFlags) String() string {
|
||||||
|
return strings.Join(*i, ",")
|
||||||
|
}
|
||||||
|
|
||||||
|
// Set appends a value to `StringArrayFlags`
|
||||||
|
func (i *StringArrayFlags) Set(value string) error {
|
||||||
|
*i = append(*i, value)
|
||||||
|
return nil
|
||||||
|
}
|
2
vendor/github.com/klauspost/compress/.goreleaser.yml
generated
vendored
2
vendor/github.com/klauspost/compress/.goreleaser.yml
generated
vendored
@ -3,7 +3,7 @@
|
|||||||
before:
|
before:
|
||||||
hooks:
|
hooks:
|
||||||
- ./gen.sh
|
- ./gen.sh
|
||||||
- go install mvdan.cc/garble@latest
|
- go install mvdan.cc/garble@v0.9.3
|
||||||
|
|
||||||
builds:
|
builds:
|
||||||
-
|
-
|
||||||
|
23
vendor/github.com/klauspost/compress/README.md
generated
vendored
23
vendor/github.com/klauspost/compress/README.md
generated
vendored
@ -9,7 +9,6 @@ This package provides various compression algorithms.
|
|||||||
* [huff0](https://github.com/klauspost/compress/tree/master/huff0) and [FSE](https://github.com/klauspost/compress/tree/master/fse) implementations for raw entropy encoding.
|
* [huff0](https://github.com/klauspost/compress/tree/master/huff0) and [FSE](https://github.com/klauspost/compress/tree/master/fse) implementations for raw entropy encoding.
|
||||||
* [gzhttp](https://github.com/klauspost/compress/tree/master/gzhttp) Provides client and server wrappers for handling gzipped requests efficiently.
|
* [gzhttp](https://github.com/klauspost/compress/tree/master/gzhttp) Provides client and server wrappers for handling gzipped requests efficiently.
|
||||||
* [pgzip](https://github.com/klauspost/pgzip) is a separate package that provides a very fast parallel gzip implementation.
|
* [pgzip](https://github.com/klauspost/pgzip) is a separate package that provides a very fast parallel gzip implementation.
|
||||||
* [fuzz package](https://github.com/klauspost/compress-fuzz) for fuzz testing all compressors/decompressors here.
|
|
||||||
|
|
||||||
[](https://pkg.go.dev/github.com/klauspost/compress?tab=subdirectories)
|
[](https://pkg.go.dev/github.com/klauspost/compress?tab=subdirectories)
|
||||||
[](https://github.com/klauspost/compress/actions/workflows/go.yml)
|
[](https://github.com/klauspost/compress/actions/workflows/go.yml)
|
||||||
@ -17,6 +16,28 @@ This package provides various compression algorithms.
|
|||||||
|
|
||||||
# changelog
|
# changelog
|
||||||
|
|
||||||
|
* Jan 21st, 2023 (v1.15.15)
|
||||||
|
* deflate: Improve level 7-9 by @klauspost in https://github.com/klauspost/compress/pull/739
|
||||||
|
* zstd: Add delta encoding support by @greatroar in https://github.com/klauspost/compress/pull/728
|
||||||
|
* zstd: Various speed improvements by @greatroar https://github.com/klauspost/compress/pull/741 https://github.com/klauspost/compress/pull/734 https://github.com/klauspost/compress/pull/736 https://github.com/klauspost/compress/pull/744 https://github.com/klauspost/compress/pull/743 https://github.com/klauspost/compress/pull/745
|
||||||
|
* gzhttp: Add SuffixETag() and DropETag() options to prevent ETag collisions on compressed responses by @willbicks in https://github.com/klauspost/compress/pull/740
|
||||||
|
|
||||||
|
* Jan 3rd, 2023 (v1.15.14)
|
||||||
|
|
||||||
|
* flate: Improve speed in big stateless blocks https://github.com/klauspost/compress/pull/718
|
||||||
|
* zstd: Minor speed tweaks by @greatroar in https://github.com/klauspost/compress/pull/716 https://github.com/klauspost/compress/pull/720
|
||||||
|
* export NoGzipResponseWriter for custom ResponseWriter wrappers by @harshavardhana in https://github.com/klauspost/compress/pull/722
|
||||||
|
* s2: Add example for indexing and existing stream https://github.com/klauspost/compress/pull/723
|
||||||
|
|
||||||
|
* Dec 11, 2022 (v1.15.13)
|
||||||
|
* zstd: Add [MaxEncodedSize](https://pkg.go.dev/github.com/klauspost/compress@v1.15.13/zstd#Encoder.MaxEncodedSize) to encoder https://github.com/klauspost/compress/pull/691
|
||||||
|
* zstd: Various tweaks and improvements https://github.com/klauspost/compress/pull/693 https://github.com/klauspost/compress/pull/695 https://github.com/klauspost/compress/pull/696 https://github.com/klauspost/compress/pull/701 https://github.com/klauspost/compress/pull/702 https://github.com/klauspost/compress/pull/703 https://github.com/klauspost/compress/pull/704 https://github.com/klauspost/compress/pull/705 https://github.com/klauspost/compress/pull/706 https://github.com/klauspost/compress/pull/707 https://github.com/klauspost/compress/pull/708
|
||||||
|
|
||||||
|
* Oct 26, 2022 (v1.15.12)
|
||||||
|
|
||||||
|
* zstd: Tweak decoder allocs. https://github.com/klauspost/compress/pull/680
|
||||||
|
* gzhttp: Always delete `HeaderNoCompression` https://github.com/klauspost/compress/pull/683
|
||||||
|
|
||||||
* Sept 26, 2022 (v1.15.11)
|
* Sept 26, 2022 (v1.15.11)
|
||||||
|
|
||||||
* flate: Improve level 1-3 compression https://github.com/klauspost/compress/pull/678
|
* flate: Improve level 1-3 compression https://github.com/klauspost/compress/pull/678
|
||||||
|
31
vendor/github.com/klauspost/compress/fse/compress.go
generated
vendored
31
vendor/github.com/klauspost/compress/fse/compress.go
generated
vendored
@ -146,54 +146,51 @@ func (s *Scratch) compress(src []byte) error {
|
|||||||
c1.encodeZero(tt[src[ip-2]])
|
c1.encodeZero(tt[src[ip-2]])
|
||||||
ip -= 2
|
ip -= 2
|
||||||
}
|
}
|
||||||
|
src = src[:ip]
|
||||||
|
|
||||||
// Main compression loop.
|
// Main compression loop.
|
||||||
switch {
|
switch {
|
||||||
case !s.zeroBits && s.actualTableLog <= 8:
|
case !s.zeroBits && s.actualTableLog <= 8:
|
||||||
// We can encode 4 symbols without requiring a flush.
|
// We can encode 4 symbols without requiring a flush.
|
||||||
// We do not need to check if any output is 0 bits.
|
// We do not need to check if any output is 0 bits.
|
||||||
for ip >= 4 {
|
for ; len(src) >= 4; src = src[:len(src)-4] {
|
||||||
s.bw.flush32()
|
s.bw.flush32()
|
||||||
v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1]
|
v3, v2, v1, v0 := src[len(src)-4], src[len(src)-3], src[len(src)-2], src[len(src)-1]
|
||||||
c2.encode(tt[v0])
|
c2.encode(tt[v0])
|
||||||
c1.encode(tt[v1])
|
c1.encode(tt[v1])
|
||||||
c2.encode(tt[v2])
|
c2.encode(tt[v2])
|
||||||
c1.encode(tt[v3])
|
c1.encode(tt[v3])
|
||||||
ip -= 4
|
|
||||||
}
|
}
|
||||||
case !s.zeroBits:
|
case !s.zeroBits:
|
||||||
// We do not need to check if any output is 0 bits.
|
// We do not need to check if any output is 0 bits.
|
||||||
for ip >= 4 {
|
for ; len(src) >= 4; src = src[:len(src)-4] {
|
||||||
s.bw.flush32()
|
s.bw.flush32()
|
||||||
v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1]
|
v3, v2, v1, v0 := src[len(src)-4], src[len(src)-3], src[len(src)-2], src[len(src)-1]
|
||||||
c2.encode(tt[v0])
|
c2.encode(tt[v0])
|
||||||
c1.encode(tt[v1])
|
c1.encode(tt[v1])
|
||||||
s.bw.flush32()
|
s.bw.flush32()
|
||||||
c2.encode(tt[v2])
|
c2.encode(tt[v2])
|
||||||
c1.encode(tt[v3])
|
c1.encode(tt[v3])
|
||||||
ip -= 4
|
|
||||||
}
|
}
|
||||||
case s.actualTableLog <= 8:
|
case s.actualTableLog <= 8:
|
||||||
// We can encode 4 symbols without requiring a flush
|
// We can encode 4 symbols without requiring a flush
|
||||||
for ip >= 4 {
|
for ; len(src) >= 4; src = src[:len(src)-4] {
|
||||||
s.bw.flush32()
|
s.bw.flush32()
|
||||||
v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1]
|
v3, v2, v1, v0 := src[len(src)-4], src[len(src)-3], src[len(src)-2], src[len(src)-1]
|
||||||
c2.encodeZero(tt[v0])
|
c2.encodeZero(tt[v0])
|
||||||
c1.encodeZero(tt[v1])
|
c1.encodeZero(tt[v1])
|
||||||
c2.encodeZero(tt[v2])
|
c2.encodeZero(tt[v2])
|
||||||
c1.encodeZero(tt[v3])
|
c1.encodeZero(tt[v3])
|
||||||
ip -= 4
|
|
||||||
}
|
}
|
||||||
default:
|
default:
|
||||||
for ip >= 4 {
|
for ; len(src) >= 4; src = src[:len(src)-4] {
|
||||||
s.bw.flush32()
|
s.bw.flush32()
|
||||||
v3, v2, v1, v0 := src[ip-4], src[ip-3], src[ip-2], src[ip-1]
|
v3, v2, v1, v0 := src[len(src)-4], src[len(src)-3], src[len(src)-2], src[len(src)-1]
|
||||||
c2.encodeZero(tt[v0])
|
c2.encodeZero(tt[v0])
|
||||||
c1.encodeZero(tt[v1])
|
c1.encodeZero(tt[v1])
|
||||||
s.bw.flush32()
|
s.bw.flush32()
|
||||||
c2.encodeZero(tt[v2])
|
c2.encodeZero(tt[v2])
|
||||||
c1.encodeZero(tt[v3])
|
c1.encodeZero(tt[v3])
|
||||||
ip -= 4
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -459,15 +456,17 @@ func (s *Scratch) countSimple(in []byte) (max int) {
|
|||||||
for _, v := range in {
|
for _, v := range in {
|
||||||
s.count[v]++
|
s.count[v]++
|
||||||
}
|
}
|
||||||
m := uint32(0)
|
m, symlen := uint32(0), s.symbolLen
|
||||||
for i, v := range s.count[:] {
|
for i, v := range s.count[:] {
|
||||||
|
if v == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
if v > m {
|
if v > m {
|
||||||
m = v
|
m = v
|
||||||
}
|
}
|
||||||
if v > 0 {
|
symlen = uint16(i) + 1
|
||||||
s.symbolLen = uint16(i) + 1
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
s.symbolLen = symlen
|
||||||
return int(m)
|
return int(m)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
8
vendor/github.com/klauspost/compress/huff0/bitreader.go
generated
vendored
8
vendor/github.com/klauspost/compress/huff0/bitreader.go
generated
vendored
@ -67,7 +67,6 @@ func (b *bitReaderBytes) fillFast() {
|
|||||||
|
|
||||||
// 2 bounds checks.
|
// 2 bounds checks.
|
||||||
v := b.in[b.off-4 : b.off]
|
v := b.in[b.off-4 : b.off]
|
||||||
v = v[:4]
|
|
||||||
low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
|
low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
|
||||||
b.value |= uint64(low) << (b.bitsRead - 32)
|
b.value |= uint64(low) << (b.bitsRead - 32)
|
||||||
b.bitsRead -= 32
|
b.bitsRead -= 32
|
||||||
@ -88,8 +87,7 @@ func (b *bitReaderBytes) fill() {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
if b.off > 4 {
|
if b.off > 4 {
|
||||||
v := b.in[b.off-4:]
|
v := b.in[b.off-4 : b.off]
|
||||||
v = v[:4]
|
|
||||||
low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
|
low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
|
||||||
b.value |= uint64(low) << (b.bitsRead - 32)
|
b.value |= uint64(low) << (b.bitsRead - 32)
|
||||||
b.bitsRead -= 32
|
b.bitsRead -= 32
|
||||||
@ -179,7 +177,6 @@ func (b *bitReaderShifted) fillFast() {
|
|||||||
|
|
||||||
// 2 bounds checks.
|
// 2 bounds checks.
|
||||||
v := b.in[b.off-4 : b.off]
|
v := b.in[b.off-4 : b.off]
|
||||||
v = v[:4]
|
|
||||||
low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
|
low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
|
||||||
b.value |= uint64(low) << ((b.bitsRead - 32) & 63)
|
b.value |= uint64(low) << ((b.bitsRead - 32) & 63)
|
||||||
b.bitsRead -= 32
|
b.bitsRead -= 32
|
||||||
@ -200,8 +197,7 @@ func (b *bitReaderShifted) fill() {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
if b.off > 4 {
|
if b.off > 4 {
|
||||||
v := b.in[b.off-4:]
|
v := b.in[b.off-4 : b.off]
|
||||||
v = v[:4]
|
|
||||||
low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
|
low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
|
||||||
b.value |= uint64(low) << ((b.bitsRead - 32) & 63)
|
b.value |= uint64(low) << ((b.bitsRead - 32) & 63)
|
||||||
b.bitsRead -= 32
|
b.bitsRead -= 32
|
||||||
|
114
vendor/github.com/klauspost/compress/huff0/compress.go
generated
vendored
114
vendor/github.com/klauspost/compress/huff0/compress.go
generated
vendored
@ -365,29 +365,29 @@ func (s *Scratch) countSimple(in []byte) (max int, reuse bool) {
|
|||||||
m := uint32(0)
|
m := uint32(0)
|
||||||
if len(s.prevTable) > 0 {
|
if len(s.prevTable) > 0 {
|
||||||
for i, v := range s.count[:] {
|
for i, v := range s.count[:] {
|
||||||
|
if v == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
if v > m {
|
if v > m {
|
||||||
m = v
|
m = v
|
||||||
}
|
}
|
||||||
if v > 0 {
|
s.symbolLen = uint16(i) + 1
|
||||||
s.symbolLen = uint16(i) + 1
|
if i >= len(s.prevTable) {
|
||||||
if i >= len(s.prevTable) {
|
reuse = false
|
||||||
reuse = false
|
} else if s.prevTable[i].nBits == 0 {
|
||||||
} else {
|
reuse = false
|
||||||
if s.prevTable[i].nBits == 0 {
|
|
||||||
reuse = false
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
return int(m), reuse
|
return int(m), reuse
|
||||||
}
|
}
|
||||||
for i, v := range s.count[:] {
|
for i, v := range s.count[:] {
|
||||||
|
if v == 0 {
|
||||||
|
continue
|
||||||
|
}
|
||||||
if v > m {
|
if v > m {
|
||||||
m = v
|
m = v
|
||||||
}
|
}
|
||||||
if v > 0 {
|
s.symbolLen = uint16(i) + 1
|
||||||
s.symbolLen = uint16(i) + 1
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
return int(m), false
|
return int(m), false
|
||||||
}
|
}
|
||||||
@ -484,34 +484,35 @@ func (s *Scratch) buildCTable() error {
|
|||||||
// Different from reference implementation.
|
// Different from reference implementation.
|
||||||
huffNode0 := s.nodes[0 : huffNodesLen+1]
|
huffNode0 := s.nodes[0 : huffNodesLen+1]
|
||||||
|
|
||||||
for huffNode[nonNullRank].count == 0 {
|
for huffNode[nonNullRank].count() == 0 {
|
||||||
nonNullRank--
|
nonNullRank--
|
||||||
}
|
}
|
||||||
|
|
||||||
lowS := int16(nonNullRank)
|
lowS := int16(nonNullRank)
|
||||||
nodeRoot := nodeNb + lowS - 1
|
nodeRoot := nodeNb + lowS - 1
|
||||||
lowN := nodeNb
|
lowN := nodeNb
|
||||||
huffNode[nodeNb].count = huffNode[lowS].count + huffNode[lowS-1].count
|
huffNode[nodeNb].setCount(huffNode[lowS].count() + huffNode[lowS-1].count())
|
||||||
huffNode[lowS].parent, huffNode[lowS-1].parent = uint16(nodeNb), uint16(nodeNb)
|
huffNode[lowS].setParent(nodeNb)
|
||||||
|
huffNode[lowS-1].setParent(nodeNb)
|
||||||
nodeNb++
|
nodeNb++
|
||||||
lowS -= 2
|
lowS -= 2
|
||||||
for n := nodeNb; n <= nodeRoot; n++ {
|
for n := nodeNb; n <= nodeRoot; n++ {
|
||||||
huffNode[n].count = 1 << 30
|
huffNode[n].setCount(1 << 30)
|
||||||
}
|
}
|
||||||
// fake entry, strong barrier
|
// fake entry, strong barrier
|
||||||
huffNode0[0].count = 1 << 31
|
huffNode0[0].setCount(1 << 31)
|
||||||
|
|
||||||
// create parents
|
// create parents
|
||||||
for nodeNb <= nodeRoot {
|
for nodeNb <= nodeRoot {
|
||||||
var n1, n2 int16
|
var n1, n2 int16
|
||||||
if huffNode0[lowS+1].count < huffNode0[lowN+1].count {
|
if huffNode0[lowS+1].count() < huffNode0[lowN+1].count() {
|
||||||
n1 = lowS
|
n1 = lowS
|
||||||
lowS--
|
lowS--
|
||||||
} else {
|
} else {
|
||||||
n1 = lowN
|
n1 = lowN
|
||||||
lowN++
|
lowN++
|
||||||
}
|
}
|
||||||
if huffNode0[lowS+1].count < huffNode0[lowN+1].count {
|
if huffNode0[lowS+1].count() < huffNode0[lowN+1].count() {
|
||||||
n2 = lowS
|
n2 = lowS
|
||||||
lowS--
|
lowS--
|
||||||
} else {
|
} else {
|
||||||
@ -519,18 +520,19 @@ func (s *Scratch) buildCTable() error {
|
|||||||
lowN++
|
lowN++
|
||||||
}
|
}
|
||||||
|
|
||||||
huffNode[nodeNb].count = huffNode0[n1+1].count + huffNode0[n2+1].count
|
huffNode[nodeNb].setCount(huffNode0[n1+1].count() + huffNode0[n2+1].count())
|
||||||
huffNode0[n1+1].parent, huffNode0[n2+1].parent = uint16(nodeNb), uint16(nodeNb)
|
huffNode0[n1+1].setParent(nodeNb)
|
||||||
|
huffNode0[n2+1].setParent(nodeNb)
|
||||||
nodeNb++
|
nodeNb++
|
||||||
}
|
}
|
||||||
|
|
||||||
// distribute weights (unlimited tree height)
|
// distribute weights (unlimited tree height)
|
||||||
huffNode[nodeRoot].nbBits = 0
|
huffNode[nodeRoot].setNbBits(0)
|
||||||
for n := nodeRoot - 1; n >= startNode; n-- {
|
for n := nodeRoot - 1; n >= startNode; n-- {
|
||||||
huffNode[n].nbBits = huffNode[huffNode[n].parent].nbBits + 1
|
huffNode[n].setNbBits(huffNode[huffNode[n].parent()].nbBits() + 1)
|
||||||
}
|
}
|
||||||
for n := uint16(0); n <= nonNullRank; n++ {
|
for n := uint16(0); n <= nonNullRank; n++ {
|
||||||
huffNode[n].nbBits = huffNode[huffNode[n].parent].nbBits + 1
|
huffNode[n].setNbBits(huffNode[huffNode[n].parent()].nbBits() + 1)
|
||||||
}
|
}
|
||||||
s.actualTableLog = s.setMaxHeight(int(nonNullRank))
|
s.actualTableLog = s.setMaxHeight(int(nonNullRank))
|
||||||
maxNbBits := s.actualTableLog
|
maxNbBits := s.actualTableLog
|
||||||
@ -542,7 +544,7 @@ func (s *Scratch) buildCTable() error {
|
|||||||
var nbPerRank [tableLogMax + 1]uint16
|
var nbPerRank [tableLogMax + 1]uint16
|
||||||
var valPerRank [16]uint16
|
var valPerRank [16]uint16
|
||||||
for _, v := range huffNode[:nonNullRank+1] {
|
for _, v := range huffNode[:nonNullRank+1] {
|
||||||
nbPerRank[v.nbBits]++
|
nbPerRank[v.nbBits()]++
|
||||||
}
|
}
|
||||||
// determine stating value per rank
|
// determine stating value per rank
|
||||||
{
|
{
|
||||||
@ -557,7 +559,7 @@ func (s *Scratch) buildCTable() error {
|
|||||||
|
|
||||||
// push nbBits per symbol, symbol order
|
// push nbBits per symbol, symbol order
|
||||||
for _, v := range huffNode[:nonNullRank+1] {
|
for _, v := range huffNode[:nonNullRank+1] {
|
||||||
s.cTable[v.symbol].nBits = v.nbBits
|
s.cTable[v.symbol()].nBits = v.nbBits()
|
||||||
}
|
}
|
||||||
|
|
||||||
// assign value within rank, symbol order
|
// assign value within rank, symbol order
|
||||||
@ -603,12 +605,12 @@ func (s *Scratch) huffSort() {
|
|||||||
pos := rank[r].current
|
pos := rank[r].current
|
||||||
rank[r].current++
|
rank[r].current++
|
||||||
prev := nodes[(pos-1)&huffNodesMask]
|
prev := nodes[(pos-1)&huffNodesMask]
|
||||||
for pos > rank[r].base && c > prev.count {
|
for pos > rank[r].base && c > prev.count() {
|
||||||
nodes[pos&huffNodesMask] = prev
|
nodes[pos&huffNodesMask] = prev
|
||||||
pos--
|
pos--
|
||||||
prev = nodes[(pos-1)&huffNodesMask]
|
prev = nodes[(pos-1)&huffNodesMask]
|
||||||
}
|
}
|
||||||
nodes[pos&huffNodesMask] = nodeElt{count: c, symbol: byte(n)}
|
nodes[pos&huffNodesMask] = makeNodeElt(c, byte(n))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -617,7 +619,7 @@ func (s *Scratch) setMaxHeight(lastNonNull int) uint8 {
|
|||||||
huffNode := s.nodes[1 : huffNodesLen+1]
|
huffNode := s.nodes[1 : huffNodesLen+1]
|
||||||
//huffNode = huffNode[: huffNodesLen]
|
//huffNode = huffNode[: huffNodesLen]
|
||||||
|
|
||||||
largestBits := huffNode[lastNonNull].nbBits
|
largestBits := huffNode[lastNonNull].nbBits()
|
||||||
|
|
||||||
// early exit : no elt > maxNbBits
|
// early exit : no elt > maxNbBits
|
||||||
if largestBits <= maxNbBits {
|
if largestBits <= maxNbBits {
|
||||||
@ -627,14 +629,14 @@ func (s *Scratch) setMaxHeight(lastNonNull int) uint8 {
|
|||||||
baseCost := int(1) << (largestBits - maxNbBits)
|
baseCost := int(1) << (largestBits - maxNbBits)
|
||||||
n := uint32(lastNonNull)
|
n := uint32(lastNonNull)
|
||||||
|
|
||||||
for huffNode[n].nbBits > maxNbBits {
|
for huffNode[n].nbBits() > maxNbBits {
|
||||||
totalCost += baseCost - (1 << (largestBits - huffNode[n].nbBits))
|
totalCost += baseCost - (1 << (largestBits - huffNode[n].nbBits()))
|
||||||
huffNode[n].nbBits = maxNbBits
|
huffNode[n].setNbBits(maxNbBits)
|
||||||
n--
|
n--
|
||||||
}
|
}
|
||||||
// n stops at huffNode[n].nbBits <= maxNbBits
|
// n stops at huffNode[n].nbBits <= maxNbBits
|
||||||
|
|
||||||
for huffNode[n].nbBits == maxNbBits {
|
for huffNode[n].nbBits() == maxNbBits {
|
||||||
n--
|
n--
|
||||||
}
|
}
|
||||||
// n end at index of smallest symbol using < maxNbBits
|
// n end at index of smallest symbol using < maxNbBits
|
||||||
@ -655,10 +657,10 @@ func (s *Scratch) setMaxHeight(lastNonNull int) uint8 {
|
|||||||
{
|
{
|
||||||
currentNbBits := maxNbBits
|
currentNbBits := maxNbBits
|
||||||
for pos := int(n); pos >= 0; pos-- {
|
for pos := int(n); pos >= 0; pos-- {
|
||||||
if huffNode[pos].nbBits >= currentNbBits {
|
if huffNode[pos].nbBits() >= currentNbBits {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
currentNbBits = huffNode[pos].nbBits // < maxNbBits
|
currentNbBits = huffNode[pos].nbBits() // < maxNbBits
|
||||||
rankLast[maxNbBits-currentNbBits] = uint32(pos)
|
rankLast[maxNbBits-currentNbBits] = uint32(pos)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -675,8 +677,8 @@ func (s *Scratch) setMaxHeight(lastNonNull int) uint8 {
|
|||||||
if lowPos == noSymbol {
|
if lowPos == noSymbol {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
highTotal := huffNode[highPos].count
|
highTotal := huffNode[highPos].count()
|
||||||
lowTotal := 2 * huffNode[lowPos].count
|
lowTotal := 2 * huffNode[lowPos].count()
|
||||||
if highTotal <= lowTotal {
|
if highTotal <= lowTotal {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
@ -692,13 +694,14 @@ func (s *Scratch) setMaxHeight(lastNonNull int) uint8 {
|
|||||||
// this rank is no longer empty
|
// this rank is no longer empty
|
||||||
rankLast[nBitsToDecrease-1] = rankLast[nBitsToDecrease]
|
rankLast[nBitsToDecrease-1] = rankLast[nBitsToDecrease]
|
||||||
}
|
}
|
||||||
huffNode[rankLast[nBitsToDecrease]].nbBits++
|
huffNode[rankLast[nBitsToDecrease]].setNbBits(1 +
|
||||||
|
huffNode[rankLast[nBitsToDecrease]].nbBits())
|
||||||
if rankLast[nBitsToDecrease] == 0 {
|
if rankLast[nBitsToDecrease] == 0 {
|
||||||
/* special case, reached largest symbol */
|
/* special case, reached largest symbol */
|
||||||
rankLast[nBitsToDecrease] = noSymbol
|
rankLast[nBitsToDecrease] = noSymbol
|
||||||
} else {
|
} else {
|
||||||
rankLast[nBitsToDecrease]--
|
rankLast[nBitsToDecrease]--
|
||||||
if huffNode[rankLast[nBitsToDecrease]].nbBits != maxNbBits-nBitsToDecrease {
|
if huffNode[rankLast[nBitsToDecrease]].nbBits() != maxNbBits-nBitsToDecrease {
|
||||||
rankLast[nBitsToDecrease] = noSymbol /* this rank is now empty */
|
rankLast[nBitsToDecrease] = noSymbol /* this rank is now empty */
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -706,15 +709,15 @@ func (s *Scratch) setMaxHeight(lastNonNull int) uint8 {
|
|||||||
|
|
||||||
for totalCost < 0 { /* Sometimes, cost correction overshoot */
|
for totalCost < 0 { /* Sometimes, cost correction overshoot */
|
||||||
if rankLast[1] == noSymbol { /* special case : no rank 1 symbol (using maxNbBits-1); let's create one from largest rank 0 (using maxNbBits) */
|
if rankLast[1] == noSymbol { /* special case : no rank 1 symbol (using maxNbBits-1); let's create one from largest rank 0 (using maxNbBits) */
|
||||||
for huffNode[n].nbBits == maxNbBits {
|
for huffNode[n].nbBits() == maxNbBits {
|
||||||
n--
|
n--
|
||||||
}
|
}
|
||||||
huffNode[n+1].nbBits--
|
huffNode[n+1].setNbBits(huffNode[n+1].nbBits() - 1)
|
||||||
rankLast[1] = n + 1
|
rankLast[1] = n + 1
|
||||||
totalCost++
|
totalCost++
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
huffNode[rankLast[1]+1].nbBits--
|
huffNode[rankLast[1]+1].setNbBits(huffNode[rankLast[1]+1].nbBits() - 1)
|
||||||
rankLast[1]++
|
rankLast[1]++
|
||||||
totalCost++
|
totalCost++
|
||||||
}
|
}
|
||||||
@ -722,9 +725,26 @@ func (s *Scratch) setMaxHeight(lastNonNull int) uint8 {
|
|||||||
return maxNbBits
|
return maxNbBits
|
||||||
}
|
}
|
||||||
|
|
||||||
type nodeElt struct {
|
// A nodeElt is the fields
|
||||||
count uint32
|
//
|
||||||
parent uint16
|
// count uint32
|
||||||
symbol byte
|
// parent uint16
|
||||||
nbBits uint8
|
// symbol byte
|
||||||
|
// nbBits uint8
|
||||||
|
//
|
||||||
|
// in some order, all squashed into an integer so that the compiler
|
||||||
|
// always loads and stores entire nodeElts instead of separate fields.
|
||||||
|
type nodeElt uint64
|
||||||
|
|
||||||
|
func makeNodeElt(count uint32, symbol byte) nodeElt {
|
||||||
|
return nodeElt(count) | nodeElt(symbol)<<48
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (e *nodeElt) count() uint32 { return uint32(*e) }
|
||||||
|
func (e *nodeElt) parent() uint16 { return uint16(*e >> 32) }
|
||||||
|
func (e *nodeElt) symbol() byte { return byte(*e >> 48) }
|
||||||
|
func (e *nodeElt) nbBits() uint8 { return uint8(*e >> 56) }
|
||||||
|
|
||||||
|
func (e *nodeElt) setCount(c uint32) { *e = (*e)&0xffffffff00000000 | nodeElt(c) }
|
||||||
|
func (e *nodeElt) setParent(p int16) { *e = (*e)&0xffff0000ffffffff | nodeElt(uint16(p))<<32 }
|
||||||
|
func (e *nodeElt) setNbBits(n uint8) { *e = (*e)&0x00ffffffffffffff | nodeElt(n)<<56 }
|
||||||
|
2
vendor/github.com/klauspost/compress/huff0/decompress.go
generated
vendored
2
vendor/github.com/klauspost/compress/huff0/decompress.go
generated
vendored
@ -61,7 +61,7 @@ func ReadTable(in []byte, s *Scratch) (s2 *Scratch, remain []byte, err error) {
|
|||||||
b, err := fse.Decompress(in[:iSize], s.fse)
|
b, err := fse.Decompress(in[:iSize], s.fse)
|
||||||
s.fse.Out = nil
|
s.fse.Out = nil
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return s, nil, err
|
return s, nil, fmt.Errorf("fse decompress returned: %w", err)
|
||||||
}
|
}
|
||||||
if len(b) > 255 {
|
if len(b) > 255 {
|
||||||
return s, nil, errors.New("corrupt input: output table too large")
|
return s, nil, errors.New("corrupt input: output table too large")
|
||||||
|
584
vendor/github.com/klauspost/compress/huff0/decompress_amd64.s
generated
vendored
584
vendor/github.com/klauspost/compress/huff0/decompress_amd64.s
generated
vendored
@ -4,360 +4,349 @@
|
|||||||
|
|
||||||
// func decompress4x_main_loop_amd64(ctx *decompress4xContext)
|
// func decompress4x_main_loop_amd64(ctx *decompress4xContext)
|
||||||
TEXT ·decompress4x_main_loop_amd64(SB), $0-8
|
TEXT ·decompress4x_main_loop_amd64(SB), $0-8
|
||||||
XORQ DX, DX
|
|
||||||
|
|
||||||
// Preload values
|
// Preload values
|
||||||
MOVQ ctx+0(FP), AX
|
MOVQ ctx+0(FP), AX
|
||||||
MOVBQZX 8(AX), DI
|
MOVBQZX 8(AX), DI
|
||||||
MOVQ 16(AX), SI
|
MOVQ 16(AX), BX
|
||||||
MOVQ 48(AX), BX
|
MOVQ 48(AX), SI
|
||||||
MOVQ 24(AX), R9
|
MOVQ 24(AX), R8
|
||||||
MOVQ 32(AX), R10
|
MOVQ 32(AX), R9
|
||||||
MOVQ (AX), R11
|
MOVQ (AX), R10
|
||||||
|
|
||||||
// Main loop
|
// Main loop
|
||||||
main_loop:
|
main_loop:
|
||||||
MOVQ SI, R8
|
XORL DX, DX
|
||||||
CMPQ R8, BX
|
CMPQ BX, SI
|
||||||
SETGE DL
|
SETGE DL
|
||||||
|
|
||||||
// br0.fillFast32()
|
// br0.fillFast32()
|
||||||
MOVQ 32(R11), R12
|
MOVQ 32(R10), R11
|
||||||
MOVBQZX 40(R11), R13
|
MOVBQZX 40(R10), R12
|
||||||
CMPQ R13, $0x20
|
CMPQ R12, $0x20
|
||||||
JBE skip_fill0
|
JBE skip_fill0
|
||||||
MOVQ 24(R11), AX
|
MOVQ 24(R10), AX
|
||||||
SUBQ $0x20, R13
|
SUBQ $0x20, R12
|
||||||
SUBQ $0x04, AX
|
SUBQ $0x04, AX
|
||||||
MOVQ (R11), R14
|
MOVQ (R10), R13
|
||||||
|
|
||||||
// b.value |= uint64(low) << (b.bitsRead & 63)
|
// b.value |= uint64(low) << (b.bitsRead & 63)
|
||||||
MOVL (AX)(R14*1), R14
|
MOVL (AX)(R13*1), R13
|
||||||
MOVQ R13, CX
|
MOVQ R12, CX
|
||||||
SHLQ CL, R14
|
SHLQ CL, R13
|
||||||
MOVQ AX, 24(R11)
|
MOVQ AX, 24(R10)
|
||||||
ORQ R14, R12
|
ORQ R13, R11
|
||||||
|
|
||||||
// exhausted = exhausted || (br0.off < 4)
|
// exhausted += (br0.off < 4)
|
||||||
CMPQ AX, $0x04
|
CMPQ AX, $0x04
|
||||||
SETLT AL
|
ADCB $+0, DL
|
||||||
ORB AL, DL
|
|
||||||
|
|
||||||
skip_fill0:
|
skip_fill0:
|
||||||
// val0 := br0.peekTopBits(peekBits)
|
// val0 := br0.peekTopBits(peekBits)
|
||||||
MOVQ R12, R14
|
MOVQ R11, R13
|
||||||
MOVQ DI, CX
|
MOVQ DI, CX
|
||||||
SHRQ CL, R14
|
SHRQ CL, R13
|
||||||
|
|
||||||
// v0 := table[val0&mask]
|
// v0 := table[val0&mask]
|
||||||
MOVW (R10)(R14*2), CX
|
MOVW (R9)(R13*2), CX
|
||||||
|
|
||||||
// br0.advance(uint8(v0.entry)
|
// br0.advance(uint8(v0.entry)
|
||||||
MOVB CH, AL
|
MOVB CH, AL
|
||||||
SHLQ CL, R12
|
SHLQ CL, R11
|
||||||
ADDB CL, R13
|
ADDB CL, R12
|
||||||
|
|
||||||
// val1 := br0.peekTopBits(peekBits)
|
// val1 := br0.peekTopBits(peekBits)
|
||||||
MOVQ DI, CX
|
MOVQ DI, CX
|
||||||
MOVQ R12, R14
|
MOVQ R11, R13
|
||||||
SHRQ CL, R14
|
SHRQ CL, R13
|
||||||
|
|
||||||
// v1 := table[val1&mask]
|
// v1 := table[val1&mask]
|
||||||
MOVW (R10)(R14*2), CX
|
MOVW (R9)(R13*2), CX
|
||||||
|
|
||||||
// br0.advance(uint8(v1.entry))
|
// br0.advance(uint8(v1.entry))
|
||||||
MOVB CH, AH
|
MOVB CH, AH
|
||||||
SHLQ CL, R12
|
SHLQ CL, R11
|
||||||
ADDB CL, R13
|
ADDB CL, R12
|
||||||
|
|
||||||
// these two writes get coalesced
|
// these two writes get coalesced
|
||||||
// out[id * dstEvery + 0] = uint8(v0.entry >> 8)
|
// out[id * dstEvery + 0] = uint8(v0.entry >> 8)
|
||||||
// out[id * dstEvery + 1] = uint8(v1.entry >> 8)
|
// out[id * dstEvery + 1] = uint8(v1.entry >> 8)
|
||||||
MOVW AX, (R8)
|
MOVW AX, (BX)
|
||||||
|
|
||||||
// update the bitreader structure
|
// update the bitreader structure
|
||||||
MOVQ R12, 32(R11)
|
MOVQ R11, 32(R10)
|
||||||
MOVB R13, 40(R11)
|
MOVB R12, 40(R10)
|
||||||
ADDQ R9, R8
|
|
||||||
|
|
||||||
// br1.fillFast32()
|
// br1.fillFast32()
|
||||||
MOVQ 80(R11), R12
|
MOVQ 80(R10), R11
|
||||||
MOVBQZX 88(R11), R13
|
MOVBQZX 88(R10), R12
|
||||||
CMPQ R13, $0x20
|
CMPQ R12, $0x20
|
||||||
JBE skip_fill1
|
JBE skip_fill1
|
||||||
MOVQ 72(R11), AX
|
MOVQ 72(R10), AX
|
||||||
SUBQ $0x20, R13
|
SUBQ $0x20, R12
|
||||||
SUBQ $0x04, AX
|
SUBQ $0x04, AX
|
||||||
MOVQ 48(R11), R14
|
MOVQ 48(R10), R13
|
||||||
|
|
||||||
// b.value |= uint64(low) << (b.bitsRead & 63)
|
// b.value |= uint64(low) << (b.bitsRead & 63)
|
||||||
MOVL (AX)(R14*1), R14
|
MOVL (AX)(R13*1), R13
|
||||||
MOVQ R13, CX
|
MOVQ R12, CX
|
||||||
SHLQ CL, R14
|
SHLQ CL, R13
|
||||||
MOVQ AX, 72(R11)
|
MOVQ AX, 72(R10)
|
||||||
ORQ R14, R12
|
ORQ R13, R11
|
||||||
|
|
||||||
// exhausted = exhausted || (br1.off < 4)
|
// exhausted += (br1.off < 4)
|
||||||
CMPQ AX, $0x04
|
CMPQ AX, $0x04
|
||||||
SETLT AL
|
ADCB $+0, DL
|
||||||
ORB AL, DL
|
|
||||||
|
|
||||||
skip_fill1:
|
skip_fill1:
|
||||||
// val0 := br1.peekTopBits(peekBits)
|
// val0 := br1.peekTopBits(peekBits)
|
||||||
MOVQ R12, R14
|
MOVQ R11, R13
|
||||||
MOVQ DI, CX
|
MOVQ DI, CX
|
||||||
SHRQ CL, R14
|
SHRQ CL, R13
|
||||||
|
|
||||||
// v0 := table[val0&mask]
|
// v0 := table[val0&mask]
|
||||||
MOVW (R10)(R14*2), CX
|
MOVW (R9)(R13*2), CX
|
||||||
|
|
||||||
// br1.advance(uint8(v0.entry)
|
// br1.advance(uint8(v0.entry)
|
||||||
MOVB CH, AL
|
MOVB CH, AL
|
||||||
SHLQ CL, R12
|
SHLQ CL, R11
|
||||||
ADDB CL, R13
|
ADDB CL, R12
|
||||||
|
|
||||||
// val1 := br1.peekTopBits(peekBits)
|
// val1 := br1.peekTopBits(peekBits)
|
||||||
MOVQ DI, CX
|
MOVQ DI, CX
|
||||||
MOVQ R12, R14
|
MOVQ R11, R13
|
||||||
SHRQ CL, R14
|
SHRQ CL, R13
|
||||||
|
|
||||||
// v1 := table[val1&mask]
|
// v1 := table[val1&mask]
|
||||||
MOVW (R10)(R14*2), CX
|
MOVW (R9)(R13*2), CX
|
||||||
|
|
||||||
// br1.advance(uint8(v1.entry))
|
// br1.advance(uint8(v1.entry))
|
||||||
MOVB CH, AH
|
MOVB CH, AH
|
||||||
SHLQ CL, R12
|
SHLQ CL, R11
|
||||||
ADDB CL, R13
|
ADDB CL, R12
|
||||||
|
|
||||||
// these two writes get coalesced
|
// these two writes get coalesced
|
||||||
// out[id * dstEvery + 0] = uint8(v0.entry >> 8)
|
// out[id * dstEvery + 0] = uint8(v0.entry >> 8)
|
||||||
// out[id * dstEvery + 1] = uint8(v1.entry >> 8)
|
// out[id * dstEvery + 1] = uint8(v1.entry >> 8)
|
||||||
MOVW AX, (R8)
|
MOVW AX, (BX)(R8*1)
|
||||||
|
|
||||||
// update the bitreader structure
|
// update the bitreader structure
|
||||||
MOVQ R12, 80(R11)
|
MOVQ R11, 80(R10)
|
||||||
MOVB R13, 88(R11)
|
MOVB R12, 88(R10)
|
||||||
ADDQ R9, R8
|
|
||||||
|
|
||||||
// br2.fillFast32()
|
// br2.fillFast32()
|
||||||
MOVQ 128(R11), R12
|
MOVQ 128(R10), R11
|
||||||
MOVBQZX 136(R11), R13
|
MOVBQZX 136(R10), R12
|
||||||
CMPQ R13, $0x20
|
CMPQ R12, $0x20
|
||||||
JBE skip_fill2
|
JBE skip_fill2
|
||||||
MOVQ 120(R11), AX
|
MOVQ 120(R10), AX
|
||||||
SUBQ $0x20, R13
|
SUBQ $0x20, R12
|
||||||
SUBQ $0x04, AX
|
SUBQ $0x04, AX
|
||||||
MOVQ 96(R11), R14
|
MOVQ 96(R10), R13
|
||||||
|
|
||||||
// b.value |= uint64(low) << (b.bitsRead & 63)
|
// b.value |= uint64(low) << (b.bitsRead & 63)
|
||||||
MOVL (AX)(R14*1), R14
|
MOVL (AX)(R13*1), R13
|
||||||
MOVQ R13, CX
|
MOVQ R12, CX
|
||||||
SHLQ CL, R14
|
SHLQ CL, R13
|
||||||
MOVQ AX, 120(R11)
|
MOVQ AX, 120(R10)
|
||||||
ORQ R14, R12
|
ORQ R13, R11
|
||||||
|
|
||||||
// exhausted = exhausted || (br2.off < 4)
|
// exhausted += (br2.off < 4)
|
||||||
CMPQ AX, $0x04
|
CMPQ AX, $0x04
|
||||||
SETLT AL
|
ADCB $+0, DL
|
||||||
ORB AL, DL
|
|
||||||
|
|
||||||
skip_fill2:
|
skip_fill2:
|
||||||
// val0 := br2.peekTopBits(peekBits)
|
// val0 := br2.peekTopBits(peekBits)
|
||||||
MOVQ R12, R14
|
MOVQ R11, R13
|
||||||
MOVQ DI, CX
|
MOVQ DI, CX
|
||||||
SHRQ CL, R14
|
SHRQ CL, R13
|
||||||
|
|
||||||
// v0 := table[val0&mask]
|
// v0 := table[val0&mask]
|
||||||
MOVW (R10)(R14*2), CX
|
MOVW (R9)(R13*2), CX
|
||||||
|
|
||||||
// br2.advance(uint8(v0.entry)
|
// br2.advance(uint8(v0.entry)
|
||||||
MOVB CH, AL
|
MOVB CH, AL
|
||||||
SHLQ CL, R12
|
SHLQ CL, R11
|
||||||
ADDB CL, R13
|
ADDB CL, R12
|
||||||
|
|
||||||
// val1 := br2.peekTopBits(peekBits)
|
// val1 := br2.peekTopBits(peekBits)
|
||||||
MOVQ DI, CX
|
MOVQ DI, CX
|
||||||
MOVQ R12, R14
|
MOVQ R11, R13
|
||||||
SHRQ CL, R14
|
SHRQ CL, R13
|
||||||
|
|
||||||
// v1 := table[val1&mask]
|
// v1 := table[val1&mask]
|
||||||
MOVW (R10)(R14*2), CX
|
MOVW (R9)(R13*2), CX
|
||||||
|
|
||||||
// br2.advance(uint8(v1.entry))
|
// br2.advance(uint8(v1.entry))
|
||||||
MOVB CH, AH
|
MOVB CH, AH
|
||||||
SHLQ CL, R12
|
SHLQ CL, R11
|
||||||
ADDB CL, R13
|
ADDB CL, R12
|
||||||
|
|
||||||
// these two writes get coalesced
|
// these two writes get coalesced
|
||||||
// out[id * dstEvery + 0] = uint8(v0.entry >> 8)
|
// out[id * dstEvery + 0] = uint8(v0.entry >> 8)
|
||||||
// out[id * dstEvery + 1] = uint8(v1.entry >> 8)
|
// out[id * dstEvery + 1] = uint8(v1.entry >> 8)
|
||||||
MOVW AX, (R8)
|
MOVW AX, (BX)(R8*2)
|
||||||
|
|
||||||
// update the bitreader structure
|
// update the bitreader structure
|
||||||
MOVQ R12, 128(R11)
|
MOVQ R11, 128(R10)
|
||||||
MOVB R13, 136(R11)
|
MOVB R12, 136(R10)
|
||||||
ADDQ R9, R8
|
|
||||||
|
|
||||||
// br3.fillFast32()
|
// br3.fillFast32()
|
||||||
MOVQ 176(R11), R12
|
MOVQ 176(R10), R11
|
||||||
MOVBQZX 184(R11), R13
|
MOVBQZX 184(R10), R12
|
||||||
CMPQ R13, $0x20
|
CMPQ R12, $0x20
|
||||||
JBE skip_fill3
|
JBE skip_fill3
|
||||||
MOVQ 168(R11), AX
|
MOVQ 168(R10), AX
|
||||||
SUBQ $0x20, R13
|
SUBQ $0x20, R12
|
||||||
SUBQ $0x04, AX
|
SUBQ $0x04, AX
|
||||||
MOVQ 144(R11), R14
|
MOVQ 144(R10), R13
|
||||||
|
|
||||||
// b.value |= uint64(low) << (b.bitsRead & 63)
|
// b.value |= uint64(low) << (b.bitsRead & 63)
|
||||||
MOVL (AX)(R14*1), R14
|
MOVL (AX)(R13*1), R13
|
||||||
MOVQ R13, CX
|
MOVQ R12, CX
|
||||||
SHLQ CL, R14
|
SHLQ CL, R13
|
||||||
MOVQ AX, 168(R11)
|
MOVQ AX, 168(R10)
|
||||||
ORQ R14, R12
|
ORQ R13, R11
|
||||||
|
|
||||||
// exhausted = exhausted || (br3.off < 4)
|
// exhausted += (br3.off < 4)
|
||||||
CMPQ AX, $0x04
|
CMPQ AX, $0x04
|
||||||
SETLT AL
|
ADCB $+0, DL
|
||||||
ORB AL, DL
|
|
||||||
|
|
||||||
skip_fill3:
|
skip_fill3:
|
||||||
// val0 := br3.peekTopBits(peekBits)
|
// val0 := br3.peekTopBits(peekBits)
|
||||||
MOVQ R12, R14
|
MOVQ R11, R13
|
||||||
MOVQ DI, CX
|
MOVQ DI, CX
|
||||||
SHRQ CL, R14
|
SHRQ CL, R13
|
||||||
|
|
||||||
// v0 := table[val0&mask]
|
// v0 := table[val0&mask]
|
||||||
MOVW (R10)(R14*2), CX
|
MOVW (R9)(R13*2), CX
|
||||||
|
|
||||||
// br3.advance(uint8(v0.entry)
|
// br3.advance(uint8(v0.entry)
|
||||||
MOVB CH, AL
|
MOVB CH, AL
|
||||||
SHLQ CL, R12
|
SHLQ CL, R11
|
||||||
ADDB CL, R13
|
ADDB CL, R12
|
||||||
|
|
||||||
// val1 := br3.peekTopBits(peekBits)
|
// val1 := br3.peekTopBits(peekBits)
|
||||||
MOVQ DI, CX
|
MOVQ DI, CX
|
||||||
MOVQ R12, R14
|
MOVQ R11, R13
|
||||||
SHRQ CL, R14
|
SHRQ CL, R13
|
||||||
|
|
||||||
// v1 := table[val1&mask]
|
// v1 := table[val1&mask]
|
||||||
MOVW (R10)(R14*2), CX
|
MOVW (R9)(R13*2), CX
|
||||||
|
|
||||||
// br3.advance(uint8(v1.entry))
|
// br3.advance(uint8(v1.entry))
|
||||||
MOVB CH, AH
|
MOVB CH, AH
|
||||||
SHLQ CL, R12
|
SHLQ CL, R11
|
||||||
ADDB CL, R13
|
ADDB CL, R12
|
||||||
|
|
||||||
// these two writes get coalesced
|
// these two writes get coalesced
|
||||||
// out[id * dstEvery + 0] = uint8(v0.entry >> 8)
|
// out[id * dstEvery + 0] = uint8(v0.entry >> 8)
|
||||||
// out[id * dstEvery + 1] = uint8(v1.entry >> 8)
|
// out[id * dstEvery + 1] = uint8(v1.entry >> 8)
|
||||||
MOVW AX, (R8)
|
LEAQ (R8)(R8*2), CX
|
||||||
|
MOVW AX, (BX)(CX*1)
|
||||||
|
|
||||||
// update the bitreader structure
|
// update the bitreader structure
|
||||||
MOVQ R12, 176(R11)
|
MOVQ R11, 176(R10)
|
||||||
MOVB R13, 184(R11)
|
MOVB R12, 184(R10)
|
||||||
ADDQ $0x02, SI
|
ADDQ $0x02, BX
|
||||||
TESTB DL, DL
|
TESTB DL, DL
|
||||||
JZ main_loop
|
JZ main_loop
|
||||||
MOVQ ctx+0(FP), AX
|
MOVQ ctx+0(FP), AX
|
||||||
SUBQ 16(AX), SI
|
SUBQ 16(AX), BX
|
||||||
SHLQ $0x02, SI
|
SHLQ $0x02, BX
|
||||||
MOVQ SI, 40(AX)
|
MOVQ BX, 40(AX)
|
||||||
RET
|
RET
|
||||||
|
|
||||||
// func decompress4x_8b_main_loop_amd64(ctx *decompress4xContext)
|
// func decompress4x_8b_main_loop_amd64(ctx *decompress4xContext)
|
||||||
TEXT ·decompress4x_8b_main_loop_amd64(SB), $0-8
|
TEXT ·decompress4x_8b_main_loop_amd64(SB), $0-8
|
||||||
XORQ DX, DX
|
|
||||||
|
|
||||||
// Preload values
|
// Preload values
|
||||||
MOVQ ctx+0(FP), CX
|
MOVQ ctx+0(FP), CX
|
||||||
MOVBQZX 8(CX), DI
|
MOVBQZX 8(CX), DI
|
||||||
MOVQ 16(CX), BX
|
MOVQ 16(CX), BX
|
||||||
MOVQ 48(CX), SI
|
MOVQ 48(CX), SI
|
||||||
MOVQ 24(CX), R9
|
MOVQ 24(CX), R8
|
||||||
MOVQ 32(CX), R10
|
MOVQ 32(CX), R9
|
||||||
MOVQ (CX), R11
|
MOVQ (CX), R10
|
||||||
|
|
||||||
// Main loop
|
// Main loop
|
||||||
main_loop:
|
main_loop:
|
||||||
MOVQ BX, R8
|
XORL DX, DX
|
||||||
CMPQ R8, SI
|
CMPQ BX, SI
|
||||||
SETGE DL
|
SETGE DL
|
||||||
|
|
||||||
// br0.fillFast32()
|
// br0.fillFast32()
|
||||||
MOVQ 32(R11), R12
|
MOVQ 32(R10), R11
|
||||||
MOVBQZX 40(R11), R13
|
MOVBQZX 40(R10), R12
|
||||||
CMPQ R13, $0x20
|
CMPQ R12, $0x20
|
||||||
JBE skip_fill0
|
JBE skip_fill0
|
||||||
MOVQ 24(R11), R14
|
MOVQ 24(R10), R13
|
||||||
SUBQ $0x20, R13
|
SUBQ $0x20, R12
|
||||||
SUBQ $0x04, R14
|
SUBQ $0x04, R13
|
||||||
MOVQ (R11), R15
|
MOVQ (R10), R14
|
||||||
|
|
||||||
// b.value |= uint64(low) << (b.bitsRead & 63)
|
// b.value |= uint64(low) << (b.bitsRead & 63)
|
||||||
MOVL (R14)(R15*1), R15
|
MOVL (R13)(R14*1), R14
|
||||||
MOVQ R13, CX
|
MOVQ R12, CX
|
||||||
SHLQ CL, R15
|
SHLQ CL, R14
|
||||||
MOVQ R14, 24(R11)
|
MOVQ R13, 24(R10)
|
||||||
ORQ R15, R12
|
ORQ R14, R11
|
||||||
|
|
||||||
// exhausted = exhausted || (br0.off < 4)
|
// exhausted += (br0.off < 4)
|
||||||
CMPQ R14, $0x04
|
CMPQ R13, $0x04
|
||||||
SETLT AL
|
ADCB $+0, DL
|
||||||
ORB AL, DL
|
|
||||||
|
|
||||||
skip_fill0:
|
skip_fill0:
|
||||||
// val0 := br0.peekTopBits(peekBits)
|
// val0 := br0.peekTopBits(peekBits)
|
||||||
MOVQ R12, R14
|
MOVQ R11, R13
|
||||||
MOVQ DI, CX
|
MOVQ DI, CX
|
||||||
SHRQ CL, R14
|
SHRQ CL, R13
|
||||||
|
|
||||||
// v0 := table[val0&mask]
|
// v0 := table[val0&mask]
|
||||||
MOVW (R10)(R14*2), CX
|
MOVW (R9)(R13*2), CX
|
||||||
|
|
||||||
// br0.advance(uint8(v0.entry)
|
// br0.advance(uint8(v0.entry)
|
||||||
MOVB CH, AL
|
MOVB CH, AL
|
||||||
SHLQ CL, R12
|
SHLQ CL, R11
|
||||||
ADDB CL, R13
|
ADDB CL, R12
|
||||||
|
|
||||||
// val1 := br0.peekTopBits(peekBits)
|
// val1 := br0.peekTopBits(peekBits)
|
||||||
MOVQ R12, R14
|
MOVQ R11, R13
|
||||||
MOVQ DI, CX
|
MOVQ DI, CX
|
||||||
SHRQ CL, R14
|
SHRQ CL, R13
|
||||||
|
|
||||||
// v1 := table[val0&mask]
|
// v1 := table[val0&mask]
|
||||||
MOVW (R10)(R14*2), CX
|
MOVW (R9)(R13*2), CX
|
||||||
|
|
||||||
// br0.advance(uint8(v1.entry)
|
// br0.advance(uint8(v1.entry)
|
||||||
MOVB CH, AH
|
MOVB CH, AH
|
||||||
SHLQ CL, R12
|
SHLQ CL, R11
|
||||||
ADDB CL, R13
|
ADDB CL, R12
|
||||||
BSWAPL AX
|
BSWAPL AX
|
||||||
|
|
||||||
// val2 := br0.peekTopBits(peekBits)
|
// val2 := br0.peekTopBits(peekBits)
|
||||||
MOVQ R12, R14
|
MOVQ R11, R13
|
||||||
MOVQ DI, CX
|
MOVQ DI, CX
|
||||||
SHRQ CL, R14
|
SHRQ CL, R13
|
||||||
|
|
||||||
// v2 := table[val0&mask]
|
// v2 := table[val0&mask]
|
||||||
MOVW (R10)(R14*2), CX
|
MOVW (R9)(R13*2), CX
|
||||||
|
|
||||||
// br0.advance(uint8(v2.entry)
|
// br0.advance(uint8(v2.entry)
|
||||||
MOVB CH, AH
|
MOVB CH, AH
|
||||||
SHLQ CL, R12
|
SHLQ CL, R11
|
||||||
ADDB CL, R13
|
ADDB CL, R12
|
||||||
|
|
||||||
// val3 := br0.peekTopBits(peekBits)
|
// val3 := br0.peekTopBits(peekBits)
|
||||||
MOVQ R12, R14
|
MOVQ R11, R13
|
||||||
MOVQ DI, CX
|
MOVQ DI, CX
|
||||||
SHRQ CL, R14
|
SHRQ CL, R13
|
||||||
|
|
||||||
// v3 := table[val0&mask]
|
// v3 := table[val0&mask]
|
||||||
MOVW (R10)(R14*2), CX
|
MOVW (R9)(R13*2), CX
|
||||||
|
|
||||||
// br0.advance(uint8(v3.entry)
|
// br0.advance(uint8(v3.entry)
|
||||||
MOVB CH, AL
|
MOVB CH, AL
|
||||||
SHLQ CL, R12
|
SHLQ CL, R11
|
||||||
ADDB CL, R13
|
ADDB CL, R12
|
||||||
BSWAPL AX
|
BSWAPL AX
|
||||||
|
|
||||||
// these four writes get coalesced
|
// these four writes get coalesced
|
||||||
@ -365,88 +354,86 @@ skip_fill0:
|
|||||||
// out[id * dstEvery + 1] = uint8(v1.entry >> 8)
|
// out[id * dstEvery + 1] = uint8(v1.entry >> 8)
|
||||||
// out[id * dstEvery + 3] = uint8(v2.entry >> 8)
|
// out[id * dstEvery + 3] = uint8(v2.entry >> 8)
|
||||||
// out[id * dstEvery + 4] = uint8(v3.entry >> 8)
|
// out[id * dstEvery + 4] = uint8(v3.entry >> 8)
|
||||||
MOVL AX, (R8)
|
MOVL AX, (BX)
|
||||||
|
|
||||||
// update the bitreader structure
|
// update the bitreader structure
|
||||||
MOVQ R12, 32(R11)
|
MOVQ R11, 32(R10)
|
||||||
MOVB R13, 40(R11)
|
MOVB R12, 40(R10)
|
||||||
ADDQ R9, R8
|
|
||||||
|
|
||||||
// br1.fillFast32()
|
// br1.fillFast32()
|
||||||
MOVQ 80(R11), R12
|
MOVQ 80(R10), R11
|
||||||
MOVBQZX 88(R11), R13
|
MOVBQZX 88(R10), R12
|
||||||
CMPQ R13, $0x20
|
CMPQ R12, $0x20
|
||||||
JBE skip_fill1
|
JBE skip_fill1
|
||||||
MOVQ 72(R11), R14
|
MOVQ 72(R10), R13
|
||||||
SUBQ $0x20, R13
|
SUBQ $0x20, R12
|
||||||
SUBQ $0x04, R14
|
SUBQ $0x04, R13
|
||||||
MOVQ 48(R11), R15
|
MOVQ 48(R10), R14
|
||||||
|
|
||||||
// b.value |= uint64(low) << (b.bitsRead & 63)
|
// b.value |= uint64(low) << (b.bitsRead & 63)
|
||||||
MOVL (R14)(R15*1), R15
|
MOVL (R13)(R14*1), R14
|
||||||
MOVQ R13, CX
|
MOVQ R12, CX
|
||||||
SHLQ CL, R15
|
SHLQ CL, R14
|
||||||
MOVQ R14, 72(R11)
|
MOVQ R13, 72(R10)
|
||||||
ORQ R15, R12
|
ORQ R14, R11
|
||||||
|
|
||||||
// exhausted = exhausted || (br1.off < 4)
|
// exhausted += (br1.off < 4)
|
||||||
CMPQ R14, $0x04
|
CMPQ R13, $0x04
|
||||||
SETLT AL
|
ADCB $+0, DL
|
||||||
ORB AL, DL
|
|
||||||
|
|
||||||
skip_fill1:
|
skip_fill1:
|
||||||
// val0 := br1.peekTopBits(peekBits)
|
// val0 := br1.peekTopBits(peekBits)
|
||||||
MOVQ R12, R14
|
MOVQ R11, R13
|
||||||
MOVQ DI, CX
|
MOVQ DI, CX
|
||||||
SHRQ CL, R14
|
SHRQ CL, R13
|
||||||
|
|
||||||
// v0 := table[val0&mask]
|
// v0 := table[val0&mask]
|
||||||
MOVW (R10)(R14*2), CX
|
MOVW (R9)(R13*2), CX
|
||||||
|
|
||||||
// br1.advance(uint8(v0.entry)
|
// br1.advance(uint8(v0.entry)
|
||||||
MOVB CH, AL
|
MOVB CH, AL
|
||||||
SHLQ CL, R12
|
SHLQ CL, R11
|
||||||
ADDB CL, R13
|
ADDB CL, R12
|
||||||
|
|
||||||
// val1 := br1.peekTopBits(peekBits)
|
// val1 := br1.peekTopBits(peekBits)
|
||||||
MOVQ R12, R14
|
MOVQ R11, R13
|
||||||
MOVQ DI, CX
|
MOVQ DI, CX
|
||||||
SHRQ CL, R14
|
SHRQ CL, R13
|
||||||
|
|
||||||
// v1 := table[val0&mask]
|
// v1 := table[val0&mask]
|
||||||
MOVW (R10)(R14*2), CX
|
MOVW (R9)(R13*2), CX
|
||||||
|
|
||||||
// br1.advance(uint8(v1.entry)
|
// br1.advance(uint8(v1.entry)
|
||||||
MOVB CH, AH
|
MOVB CH, AH
|
||||||
SHLQ CL, R12
|
SHLQ CL, R11
|
||||||
ADDB CL, R13
|
ADDB CL, R12
|
||||||
BSWAPL AX
|
BSWAPL AX
|
||||||
|
|
||||||
// val2 := br1.peekTopBits(peekBits)
|
// val2 := br1.peekTopBits(peekBits)
|
||||||
MOVQ R12, R14
|
MOVQ R11, R13
|
||||||
MOVQ DI, CX
|
MOVQ DI, CX
|
||||||
SHRQ CL, R14
|
SHRQ CL, R13
|
||||||
|
|
||||||
// v2 := table[val0&mask]
|
// v2 := table[val0&mask]
|
||||||
MOVW (R10)(R14*2), CX
|
MOVW (R9)(R13*2), CX
|
||||||
|
|
||||||
// br1.advance(uint8(v2.entry)
|
// br1.advance(uint8(v2.entry)
|
||||||
MOVB CH, AH
|
MOVB CH, AH
|
||||||
SHLQ CL, R12
|
SHLQ CL, R11
|
||||||
ADDB CL, R13
|
ADDB CL, R12
|
||||||
|
|
||||||
// val3 := br1.peekTopBits(peekBits)
|
// val3 := br1.peekTopBits(peekBits)
|
||||||
MOVQ R12, R14
|
MOVQ R11, R13
|
||||||
MOVQ DI, CX
|
MOVQ DI, CX
|
||||||
SHRQ CL, R14
|
SHRQ CL, R13
|
||||||
|
|
||||||
// v3 := table[val0&mask]
|
// v3 := table[val0&mask]
|
||||||
MOVW (R10)(R14*2), CX
|
MOVW (R9)(R13*2), CX
|
||||||
|
|
||||||
// br1.advance(uint8(v3.entry)
|
// br1.advance(uint8(v3.entry)
|
||||||
MOVB CH, AL
|
MOVB CH, AL
|
||||||
SHLQ CL, R12
|
SHLQ CL, R11
|
||||||
ADDB CL, R13
|
ADDB CL, R12
|
||||||
BSWAPL AX
|
BSWAPL AX
|
||||||
|
|
||||||
// these four writes get coalesced
|
// these four writes get coalesced
|
||||||
@ -454,88 +441,86 @@ skip_fill1:
|
|||||||
// out[id * dstEvery + 1] = uint8(v1.entry >> 8)
|
// out[id * dstEvery + 1] = uint8(v1.entry >> 8)
|
||||||
// out[id * dstEvery + 3] = uint8(v2.entry >> 8)
|
// out[id * dstEvery + 3] = uint8(v2.entry >> 8)
|
||||||
// out[id * dstEvery + 4] = uint8(v3.entry >> 8)
|
// out[id * dstEvery + 4] = uint8(v3.entry >> 8)
|
||||||
MOVL AX, (R8)
|
MOVL AX, (BX)(R8*1)
|
||||||
|
|
||||||
// update the bitreader structure
|
// update the bitreader structure
|
||||||
MOVQ R12, 80(R11)
|
MOVQ R11, 80(R10)
|
||||||
MOVB R13, 88(R11)
|
MOVB R12, 88(R10)
|
||||||
ADDQ R9, R8
|
|
||||||
|
|
||||||
// br2.fillFast32()
|
// br2.fillFast32()
|
||||||
MOVQ 128(R11), R12
|
MOVQ 128(R10), R11
|
||||||
MOVBQZX 136(R11), R13
|
MOVBQZX 136(R10), R12
|
||||||
CMPQ R13, $0x20
|
CMPQ R12, $0x20
|
||||||
JBE skip_fill2
|
JBE skip_fill2
|
||||||
MOVQ 120(R11), R14
|
MOVQ 120(R10), R13
|
||||||
SUBQ $0x20, R13
|
SUBQ $0x20, R12
|
||||||
SUBQ $0x04, R14
|
SUBQ $0x04, R13
|
||||||
MOVQ 96(R11), R15
|
MOVQ 96(R10), R14
|
||||||
|
|
||||||
// b.value |= uint64(low) << (b.bitsRead & 63)
|
// b.value |= uint64(low) << (b.bitsRead & 63)
|
||||||
MOVL (R14)(R15*1), R15
|
MOVL (R13)(R14*1), R14
|
||||||
MOVQ R13, CX
|
MOVQ R12, CX
|
||||||
SHLQ CL, R15
|
SHLQ CL, R14
|
||||||
MOVQ R14, 120(R11)
|
MOVQ R13, 120(R10)
|
||||||
ORQ R15, R12
|
ORQ R14, R11
|
||||||
|
|
||||||
// exhausted = exhausted || (br2.off < 4)
|
// exhausted += (br2.off < 4)
|
||||||
CMPQ R14, $0x04
|
CMPQ R13, $0x04
|
||||||
SETLT AL
|
ADCB $+0, DL
|
||||||
ORB AL, DL
|
|
||||||
|
|
||||||
skip_fill2:
|
skip_fill2:
|
||||||
// val0 := br2.peekTopBits(peekBits)
|
// val0 := br2.peekTopBits(peekBits)
|
||||||
MOVQ R12, R14
|
MOVQ R11, R13
|
||||||
MOVQ DI, CX
|
MOVQ DI, CX
|
||||||
SHRQ CL, R14
|
SHRQ CL, R13
|
||||||
|
|
||||||
// v0 := table[val0&mask]
|
// v0 := table[val0&mask]
|
||||||
MOVW (R10)(R14*2), CX
|
MOVW (R9)(R13*2), CX
|
||||||
|
|
||||||
// br2.advance(uint8(v0.entry)
|
// br2.advance(uint8(v0.entry)
|
||||||
MOVB CH, AL
|
MOVB CH, AL
|
||||||
SHLQ CL, R12
|
SHLQ CL, R11
|
||||||
ADDB CL, R13
|
ADDB CL, R12
|
||||||
|
|
||||||
// val1 := br2.peekTopBits(peekBits)
|
// val1 := br2.peekTopBits(peekBits)
|
||||||
MOVQ R12, R14
|
MOVQ R11, R13
|
||||||
MOVQ DI, CX
|
MOVQ DI, CX
|
||||||
SHRQ CL, R14
|
SHRQ CL, R13
|
||||||
|
|
||||||
// v1 := table[val0&mask]
|
// v1 := table[val0&mask]
|
||||||
MOVW (R10)(R14*2), CX
|
MOVW (R9)(R13*2), CX
|
||||||
|
|
||||||
// br2.advance(uint8(v1.entry)
|
// br2.advance(uint8(v1.entry)
|
||||||
MOVB CH, AH
|
MOVB CH, AH
|
||||||
SHLQ CL, R12
|
SHLQ CL, R11
|
||||||
ADDB CL, R13
|
ADDB CL, R12
|
||||||
BSWAPL AX
|
BSWAPL AX
|
||||||
|
|
||||||
// val2 := br2.peekTopBits(peekBits)
|
// val2 := br2.peekTopBits(peekBits)
|
||||||
MOVQ R12, R14
|
MOVQ R11, R13
|
||||||
MOVQ DI, CX
|
MOVQ DI, CX
|
||||||
SHRQ CL, R14
|
SHRQ CL, R13
|
||||||
|
|
||||||
// v2 := table[val0&mask]
|
// v2 := table[val0&mask]
|
||||||
MOVW (R10)(R14*2), CX
|
MOVW (R9)(R13*2), CX
|
||||||
|
|
||||||
// br2.advance(uint8(v2.entry)
|
// br2.advance(uint8(v2.entry)
|
||||||
MOVB CH, AH
|
MOVB CH, AH
|
||||||
SHLQ CL, R12
|
SHLQ CL, R11
|
||||||
ADDB CL, R13
|
ADDB CL, R12
|
||||||
|
|
||||||
// val3 := br2.peekTopBits(peekBits)
|
// val3 := br2.peekTopBits(peekBits)
|
||||||
MOVQ R12, R14
|
MOVQ R11, R13
|
||||||
MOVQ DI, CX
|
MOVQ DI, CX
|
||||||
SHRQ CL, R14
|
SHRQ CL, R13
|
||||||
|
|
||||||
// v3 := table[val0&mask]
|
// v3 := table[val0&mask]
|
||||||
MOVW (R10)(R14*2), CX
|
MOVW (R9)(R13*2), CX
|
||||||
|
|
||||||
// br2.advance(uint8(v3.entry)
|
// br2.advance(uint8(v3.entry)
|
||||||
MOVB CH, AL
|
MOVB CH, AL
|
||||||
SHLQ CL, R12
|
SHLQ CL, R11
|
||||||
ADDB CL, R13
|
ADDB CL, R12
|
||||||
BSWAPL AX
|
BSWAPL AX
|
||||||
|
|
||||||
// these four writes get coalesced
|
// these four writes get coalesced
|
||||||
@ -543,88 +528,86 @@ skip_fill2:
|
|||||||
// out[id * dstEvery + 1] = uint8(v1.entry >> 8)
|
// out[id * dstEvery + 1] = uint8(v1.entry >> 8)
|
||||||
// out[id * dstEvery + 3] = uint8(v2.entry >> 8)
|
// out[id * dstEvery + 3] = uint8(v2.entry >> 8)
|
||||||
// out[id * dstEvery + 4] = uint8(v3.entry >> 8)
|
// out[id * dstEvery + 4] = uint8(v3.entry >> 8)
|
||||||
MOVL AX, (R8)
|
MOVL AX, (BX)(R8*2)
|
||||||
|
|
||||||
// update the bitreader structure
|
// update the bitreader structure
|
||||||
MOVQ R12, 128(R11)
|
MOVQ R11, 128(R10)
|
||||||
MOVB R13, 136(R11)
|
MOVB R12, 136(R10)
|
||||||
ADDQ R9, R8
|
|
||||||
|
|
||||||
// br3.fillFast32()
|
// br3.fillFast32()
|
||||||
MOVQ 176(R11), R12
|
MOVQ 176(R10), R11
|
||||||
MOVBQZX 184(R11), R13
|
MOVBQZX 184(R10), R12
|
||||||
CMPQ R13, $0x20
|
CMPQ R12, $0x20
|
||||||
JBE skip_fill3
|
JBE skip_fill3
|
||||||
MOVQ 168(R11), R14
|
MOVQ 168(R10), R13
|
||||||
SUBQ $0x20, R13
|
SUBQ $0x20, R12
|
||||||
SUBQ $0x04, R14
|
SUBQ $0x04, R13
|
||||||
MOVQ 144(R11), R15
|
MOVQ 144(R10), R14
|
||||||
|
|
||||||
// b.value |= uint64(low) << (b.bitsRead & 63)
|
// b.value |= uint64(low) << (b.bitsRead & 63)
|
||||||
MOVL (R14)(R15*1), R15
|
MOVL (R13)(R14*1), R14
|
||||||
MOVQ R13, CX
|
MOVQ R12, CX
|
||||||
SHLQ CL, R15
|
SHLQ CL, R14
|
||||||
MOVQ R14, 168(R11)
|
MOVQ R13, 168(R10)
|
||||||
ORQ R15, R12
|
ORQ R14, R11
|
||||||
|
|
||||||
// exhausted = exhausted || (br3.off < 4)
|
// exhausted += (br3.off < 4)
|
||||||
CMPQ R14, $0x04
|
CMPQ R13, $0x04
|
||||||
SETLT AL
|
ADCB $+0, DL
|
||||||
ORB AL, DL
|
|
||||||
|
|
||||||
skip_fill3:
|
skip_fill3:
|
||||||
// val0 := br3.peekTopBits(peekBits)
|
// val0 := br3.peekTopBits(peekBits)
|
||||||
MOVQ R12, R14
|
MOVQ R11, R13
|
||||||
MOVQ DI, CX
|
MOVQ DI, CX
|
||||||
SHRQ CL, R14
|
SHRQ CL, R13
|
||||||
|
|
||||||
// v0 := table[val0&mask]
|
// v0 := table[val0&mask]
|
||||||
MOVW (R10)(R14*2), CX
|
MOVW (R9)(R13*2), CX
|
||||||
|
|
||||||
// br3.advance(uint8(v0.entry)
|
// br3.advance(uint8(v0.entry)
|
||||||
MOVB CH, AL
|
MOVB CH, AL
|
||||||
SHLQ CL, R12
|
SHLQ CL, R11
|
||||||
ADDB CL, R13
|
ADDB CL, R12
|
||||||
|
|
||||||
// val1 := br3.peekTopBits(peekBits)
|
// val1 := br3.peekTopBits(peekBits)
|
||||||
MOVQ R12, R14
|
MOVQ R11, R13
|
||||||
MOVQ DI, CX
|
MOVQ DI, CX
|
||||||
SHRQ CL, R14
|
SHRQ CL, R13
|
||||||
|
|
||||||
// v1 := table[val0&mask]
|
// v1 := table[val0&mask]
|
||||||
MOVW (R10)(R14*2), CX
|
MOVW (R9)(R13*2), CX
|
||||||
|
|
||||||
// br3.advance(uint8(v1.entry)
|
// br3.advance(uint8(v1.entry)
|
||||||
MOVB CH, AH
|
MOVB CH, AH
|
||||||
SHLQ CL, R12
|
SHLQ CL, R11
|
||||||
ADDB CL, R13
|
ADDB CL, R12
|
||||||
BSWAPL AX
|
BSWAPL AX
|
||||||
|
|
||||||
// val2 := br3.peekTopBits(peekBits)
|
// val2 := br3.peekTopBits(peekBits)
|
||||||
MOVQ R12, R14
|
MOVQ R11, R13
|
||||||
MOVQ DI, CX
|
MOVQ DI, CX
|
||||||
SHRQ CL, R14
|
SHRQ CL, R13
|
||||||
|
|
||||||
// v2 := table[val0&mask]
|
// v2 := table[val0&mask]
|
||||||
MOVW (R10)(R14*2), CX
|
MOVW (R9)(R13*2), CX
|
||||||
|
|
||||||
// br3.advance(uint8(v2.entry)
|
// br3.advance(uint8(v2.entry)
|
||||||
MOVB CH, AH
|
MOVB CH, AH
|
||||||
SHLQ CL, R12
|
SHLQ CL, R11
|
||||||
ADDB CL, R13
|
ADDB CL, R12
|
||||||
|
|
||||||
// val3 := br3.peekTopBits(peekBits)
|
// val3 := br3.peekTopBits(peekBits)
|
||||||
MOVQ R12, R14
|
MOVQ R11, R13
|
||||||
MOVQ DI, CX
|
MOVQ DI, CX
|
||||||
SHRQ CL, R14
|
SHRQ CL, R13
|
||||||
|
|
||||||
// v3 := table[val0&mask]
|
// v3 := table[val0&mask]
|
||||||
MOVW (R10)(R14*2), CX
|
MOVW (R9)(R13*2), CX
|
||||||
|
|
||||||
// br3.advance(uint8(v3.entry)
|
// br3.advance(uint8(v3.entry)
|
||||||
MOVB CH, AL
|
MOVB CH, AL
|
||||||
SHLQ CL, R12
|
SHLQ CL, R11
|
||||||
ADDB CL, R13
|
ADDB CL, R12
|
||||||
BSWAPL AX
|
BSWAPL AX
|
||||||
|
|
||||||
// these four writes get coalesced
|
// these four writes get coalesced
|
||||||
@ -632,11 +615,12 @@ skip_fill3:
|
|||||||
// out[id * dstEvery + 1] = uint8(v1.entry >> 8)
|
// out[id * dstEvery + 1] = uint8(v1.entry >> 8)
|
||||||
// out[id * dstEvery + 3] = uint8(v2.entry >> 8)
|
// out[id * dstEvery + 3] = uint8(v2.entry >> 8)
|
||||||
// out[id * dstEvery + 4] = uint8(v3.entry >> 8)
|
// out[id * dstEvery + 4] = uint8(v3.entry >> 8)
|
||||||
MOVL AX, (R8)
|
LEAQ (R8)(R8*2), CX
|
||||||
|
MOVL AX, (BX)(CX*1)
|
||||||
|
|
||||||
// update the bitreader structure
|
// update the bitreader structure
|
||||||
MOVQ R12, 176(R11)
|
MOVQ R11, 176(R10)
|
||||||
MOVB R13, 184(R11)
|
MOVB R12, 184(R10)
|
||||||
ADDQ $0x04, BX
|
ADDQ $0x04, BX
|
||||||
TESTB DL, DL
|
TESTB DL, DL
|
||||||
JZ main_loop
|
JZ main_loop
|
||||||
@ -652,7 +636,7 @@ TEXT ·decompress1x_main_loop_amd64(SB), $0-8
|
|||||||
MOVQ 16(CX), DX
|
MOVQ 16(CX), DX
|
||||||
MOVQ 24(CX), BX
|
MOVQ 24(CX), BX
|
||||||
CMPQ BX, $0x04
|
CMPQ BX, $0x04
|
||||||
JB error_max_decoded_size_exeeded
|
JB error_max_decoded_size_exceeded
|
||||||
LEAQ (DX)(BX*1), BX
|
LEAQ (DX)(BX*1), BX
|
||||||
MOVQ (CX), SI
|
MOVQ (CX), SI
|
||||||
MOVQ (SI), R8
|
MOVQ (SI), R8
|
||||||
@ -667,7 +651,7 @@ main_loop:
|
|||||||
// Check if we have room for 4 bytes in the output buffer
|
// Check if we have room for 4 bytes in the output buffer
|
||||||
LEAQ 4(DX), CX
|
LEAQ 4(DX), CX
|
||||||
CMPQ CX, BX
|
CMPQ CX, BX
|
||||||
JGE error_max_decoded_size_exeeded
|
JGE error_max_decoded_size_exceeded
|
||||||
|
|
||||||
// Decode 4 values
|
// Decode 4 values
|
||||||
CMPQ R11, $0x20
|
CMPQ R11, $0x20
|
||||||
@ -744,7 +728,7 @@ loop_condition:
|
|||||||
RET
|
RET
|
||||||
|
|
||||||
// Report error
|
// Report error
|
||||||
error_max_decoded_size_exeeded:
|
error_max_decoded_size_exceeded:
|
||||||
MOVQ ctx+0(FP), AX
|
MOVQ ctx+0(FP), AX
|
||||||
MOVQ $-1, CX
|
MOVQ $-1, CX
|
||||||
MOVQ CX, 40(AX)
|
MOVQ CX, 40(AX)
|
||||||
@ -757,7 +741,7 @@ TEXT ·decompress1x_main_loop_bmi2(SB), $0-8
|
|||||||
MOVQ 16(CX), DX
|
MOVQ 16(CX), DX
|
||||||
MOVQ 24(CX), BX
|
MOVQ 24(CX), BX
|
||||||
CMPQ BX, $0x04
|
CMPQ BX, $0x04
|
||||||
JB error_max_decoded_size_exeeded
|
JB error_max_decoded_size_exceeded
|
||||||
LEAQ (DX)(BX*1), BX
|
LEAQ (DX)(BX*1), BX
|
||||||
MOVQ (CX), SI
|
MOVQ (CX), SI
|
||||||
MOVQ (SI), R8
|
MOVQ (SI), R8
|
||||||
@ -772,7 +756,7 @@ main_loop:
|
|||||||
// Check if we have room for 4 bytes in the output buffer
|
// Check if we have room for 4 bytes in the output buffer
|
||||||
LEAQ 4(DX), CX
|
LEAQ 4(DX), CX
|
||||||
CMPQ CX, BX
|
CMPQ CX, BX
|
||||||
JGE error_max_decoded_size_exeeded
|
JGE error_max_decoded_size_exceeded
|
||||||
|
|
||||||
// Decode 4 values
|
// Decode 4 values
|
||||||
CMPQ R11, $0x20
|
CMPQ R11, $0x20
|
||||||
@ -839,7 +823,7 @@ loop_condition:
|
|||||||
RET
|
RET
|
||||||
|
|
||||||
// Report error
|
// Report error
|
||||||
error_max_decoded_size_exeeded:
|
error_max_decoded_size_exceeded:
|
||||||
MOVQ ctx+0(FP), AX
|
MOVQ ctx+0(FP), AX
|
||||||
MOVQ $-1, CX
|
MOVQ $-1, CX
|
||||||
MOVQ CX, 40(AX)
|
MOVQ CX, 40(AX)
|
||||||
|
22
vendor/github.com/klauspost/compress/internal/snapref/encode_other.go
generated
vendored
22
vendor/github.com/klauspost/compress/internal/snapref/encode_other.go
generated
vendored
@ -103,6 +103,28 @@ func hash(u, shift uint32) uint32 {
|
|||||||
return (u * 0x1e35a7bd) >> shift
|
return (u * 0x1e35a7bd) >> shift
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// EncodeBlockInto exposes encodeBlock but checks dst size.
|
||||||
|
func EncodeBlockInto(dst, src []byte) (d int) {
|
||||||
|
if MaxEncodedLen(len(src)) > len(dst) {
|
||||||
|
return 0
|
||||||
|
}
|
||||||
|
|
||||||
|
// encodeBlock breaks on too big blocks, so split.
|
||||||
|
for len(src) > 0 {
|
||||||
|
p := src
|
||||||
|
src = nil
|
||||||
|
if len(p) > maxBlockSize {
|
||||||
|
p, src = p[:maxBlockSize], p[maxBlockSize:]
|
||||||
|
}
|
||||||
|
if len(p) < minNonLiteralBlockSize {
|
||||||
|
d += emitLiteral(dst[d:], p)
|
||||||
|
} else {
|
||||||
|
d += encodeBlock(dst[d:], p)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return d
|
||||||
|
}
|
||||||
|
|
||||||
// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It
|
// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It
|
||||||
// assumes that the varint-encoded length of the decompressed bytes has already
|
// assumes that the varint-encoded length of the decompressed bytes has already
|
||||||
// been written.
|
// been written.
|
||||||
|
14
vendor/github.com/klauspost/compress/zstd/blockdec.go
generated
vendored
14
vendor/github.com/klauspost/compress/zstd/blockdec.go
generated
vendored
@ -82,8 +82,9 @@ type blockDec struct {
|
|||||||
|
|
||||||
err error
|
err error
|
||||||
|
|
||||||
// Check against this crc
|
// Check against this crc, if hasCRC is true.
|
||||||
checkCRC []byte
|
checkCRC uint32
|
||||||
|
hasCRC bool
|
||||||
|
|
||||||
// Frame to use for singlethreaded decoding.
|
// Frame to use for singlethreaded decoding.
|
||||||
// Should not be used by the decoder itself since parent may be another frame.
|
// Should not be used by the decoder itself since parent may be another frame.
|
||||||
@ -191,16 +192,14 @@ func (b *blockDec) reset(br byteBuffer, windowSize uint64) error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Read block data.
|
// Read block data.
|
||||||
if cap(b.dataStorage) < cSize {
|
if _, ok := br.(*byteBuf); !ok && cap(b.dataStorage) < cSize {
|
||||||
|
// byteBuf doesn't need a destination buffer.
|
||||||
if b.lowMem || cSize > maxCompressedBlockSize {
|
if b.lowMem || cSize > maxCompressedBlockSize {
|
||||||
b.dataStorage = make([]byte, 0, cSize+compressedBlockOverAlloc)
|
b.dataStorage = make([]byte, 0, cSize+compressedBlockOverAlloc)
|
||||||
} else {
|
} else {
|
||||||
b.dataStorage = make([]byte, 0, maxCompressedBlockSizeAlloc)
|
b.dataStorage = make([]byte, 0, maxCompressedBlockSizeAlloc)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if cap(b.dst) <= maxSize {
|
|
||||||
b.dst = make([]byte, 0, maxSize+1)
|
|
||||||
}
|
|
||||||
b.data, err = br.readBig(cSize, b.dataStorage)
|
b.data, err = br.readBig(cSize, b.dataStorage)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if debugDecoder {
|
if debugDecoder {
|
||||||
@ -209,6 +208,9 @@ func (b *blockDec) reset(br byteBuffer, windowSize uint64) error {
|
|||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
if cap(b.dst) <= maxSize {
|
||||||
|
b.dst = make([]byte, 0, maxSize+1)
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
9
vendor/github.com/klauspost/compress/zstd/decodeheader.go
generated
vendored
9
vendor/github.com/klauspost/compress/zstd/decodeheader.go
generated
vendored
@ -4,7 +4,6 @@
|
|||||||
package zstd
|
package zstd
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
"errors"
|
"errors"
|
||||||
"io"
|
"io"
|
||||||
@ -102,8 +101,8 @@ func (h *Header) Decode(in []byte) error {
|
|||||||
}
|
}
|
||||||
h.HeaderSize += 4
|
h.HeaderSize += 4
|
||||||
b, in := in[:4], in[4:]
|
b, in := in[:4], in[4:]
|
||||||
if !bytes.Equal(b, frameMagic) {
|
if string(b) != frameMagic {
|
||||||
if !bytes.Equal(b[1:4], skippableFrameMagic) || b[0]&0xf0 != 0x50 {
|
if string(b[1:4]) != skippableFrameMagic || b[0]&0xf0 != 0x50 {
|
||||||
return ErrMagicMismatch
|
return ErrMagicMismatch
|
||||||
}
|
}
|
||||||
if len(in) < 4 {
|
if len(in) < 4 {
|
||||||
@ -153,7 +152,7 @@ func (h *Header) Decode(in []byte) error {
|
|||||||
}
|
}
|
||||||
b, in = in[:size], in[size:]
|
b, in = in[:size], in[size:]
|
||||||
h.HeaderSize += int(size)
|
h.HeaderSize += int(size)
|
||||||
switch size {
|
switch len(b) {
|
||||||
case 1:
|
case 1:
|
||||||
h.DictionaryID = uint32(b[0])
|
h.DictionaryID = uint32(b[0])
|
||||||
case 2:
|
case 2:
|
||||||
@ -183,7 +182,7 @@ func (h *Header) Decode(in []byte) error {
|
|||||||
}
|
}
|
||||||
b, in = in[:fcsSize], in[fcsSize:]
|
b, in = in[:fcsSize], in[fcsSize:]
|
||||||
h.HeaderSize += int(fcsSize)
|
h.HeaderSize += int(fcsSize)
|
||||||
switch fcsSize {
|
switch len(b) {
|
||||||
case 1:
|
case 1:
|
||||||
h.FrameContentSize = uint64(b[0])
|
h.FrameContentSize = uint64(b[0])
|
||||||
case 2:
|
case 2:
|
||||||
|
93
vendor/github.com/klauspost/compress/zstd/decoder.go
generated
vendored
93
vendor/github.com/klauspost/compress/zstd/decoder.go
generated
vendored
@ -5,7 +5,6 @@
|
|||||||
package zstd
|
package zstd
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
|
||||||
"context"
|
"context"
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
"io"
|
"io"
|
||||||
@ -41,8 +40,7 @@ type Decoder struct {
|
|||||||
frame *frameDec
|
frame *frameDec
|
||||||
|
|
||||||
// Custom dictionaries.
|
// Custom dictionaries.
|
||||||
// Always uses copies.
|
dicts map[uint32]*dict
|
||||||
dicts map[uint32]dict
|
|
||||||
|
|
||||||
// streamWg is the waitgroup for all streams
|
// streamWg is the waitgroup for all streams
|
||||||
streamWg sync.WaitGroup
|
streamWg sync.WaitGroup
|
||||||
@ -104,7 +102,7 @@ func NewReader(r io.Reader, opts ...DOption) (*Decoder, error) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Transfer option dicts.
|
// Transfer option dicts.
|
||||||
d.dicts = make(map[uint32]dict, len(d.o.dicts))
|
d.dicts = make(map[uint32]*dict, len(d.o.dicts))
|
||||||
for _, dc := range d.o.dicts {
|
for _, dc := range d.o.dicts {
|
||||||
d.dicts[dc.id] = dc
|
d.dicts[dc.id] = dc
|
||||||
}
|
}
|
||||||
@ -342,15 +340,8 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) {
|
|||||||
}
|
}
|
||||||
return dst, err
|
return dst, err
|
||||||
}
|
}
|
||||||
if frame.DictionaryID != nil {
|
if err = d.setDict(frame); err != nil {
|
||||||
dict, ok := d.dicts[*frame.DictionaryID]
|
return nil, err
|
||||||
if !ok {
|
|
||||||
return nil, ErrUnknownDictionary
|
|
||||||
}
|
|
||||||
if debugDecoder {
|
|
||||||
println("setting dict", frame.DictionaryID)
|
|
||||||
}
|
|
||||||
frame.history.setDict(&dict)
|
|
||||||
}
|
}
|
||||||
if frame.WindowSize > d.o.maxWindowSize {
|
if frame.WindowSize > d.o.maxWindowSize {
|
||||||
if debugDecoder {
|
if debugDecoder {
|
||||||
@ -459,7 +450,11 @@ func (d *Decoder) nextBlock(blocking bool) (ok bool) {
|
|||||||
println("got", len(d.current.b), "bytes, error:", d.current.err, "data crc:", tmp)
|
println("got", len(d.current.b), "bytes, error:", d.current.err, "data crc:", tmp)
|
||||||
}
|
}
|
||||||
|
|
||||||
if !d.o.ignoreChecksum && len(next.b) > 0 {
|
if d.o.ignoreChecksum {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
|
||||||
|
if len(next.b) > 0 {
|
||||||
n, err := d.current.crc.Write(next.b)
|
n, err := d.current.crc.Write(next.b)
|
||||||
if err == nil {
|
if err == nil {
|
||||||
if n != len(next.b) {
|
if n != len(next.b) {
|
||||||
@ -467,18 +462,16 @@ func (d *Decoder) nextBlock(blocking bool) (ok bool) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if next.err == nil && next.d != nil && len(next.d.checkCRC) != 0 {
|
if next.err == nil && next.d != nil && next.d.hasCRC {
|
||||||
got := d.current.crc.Sum64()
|
got := uint32(d.current.crc.Sum64())
|
||||||
var tmp [4]byte
|
if got != next.d.checkCRC {
|
||||||
binary.LittleEndian.PutUint32(tmp[:], uint32(got))
|
|
||||||
if !d.o.ignoreChecksum && !bytes.Equal(tmp[:], next.d.checkCRC) {
|
|
||||||
if debugDecoder {
|
if debugDecoder {
|
||||||
println("CRC Check Failed:", tmp[:], " (got) !=", next.d.checkCRC, "(on stream)")
|
printf("CRC Check Failed: %08x (got) != %08x (on stream)\n", got, next.d.checkCRC)
|
||||||
}
|
}
|
||||||
d.current.err = ErrCRCMismatch
|
d.current.err = ErrCRCMismatch
|
||||||
} else {
|
} else {
|
||||||
if debugDecoder {
|
if debugDecoder {
|
||||||
println("CRC ok", tmp[:])
|
printf("CRC ok %08x\n", got)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -494,18 +487,12 @@ func (d *Decoder) nextBlockSync() (ok bool) {
|
|||||||
if !d.syncStream.inFrame {
|
if !d.syncStream.inFrame {
|
||||||
d.frame.history.reset()
|
d.frame.history.reset()
|
||||||
d.current.err = d.frame.reset(&d.syncStream.br)
|
d.current.err = d.frame.reset(&d.syncStream.br)
|
||||||
|
if d.current.err == nil {
|
||||||
|
d.current.err = d.setDict(d.frame)
|
||||||
|
}
|
||||||
if d.current.err != nil {
|
if d.current.err != nil {
|
||||||
return false
|
return false
|
||||||
}
|
}
|
||||||
if d.frame.DictionaryID != nil {
|
|
||||||
dict, ok := d.dicts[*d.frame.DictionaryID]
|
|
||||||
if !ok {
|
|
||||||
d.current.err = ErrUnknownDictionary
|
|
||||||
return false
|
|
||||||
} else {
|
|
||||||
d.frame.history.setDict(&dict)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
if d.frame.WindowSize > d.o.maxDecodedSize || d.frame.WindowSize > d.o.maxWindowSize {
|
if d.frame.WindowSize > d.o.maxDecodedSize || d.frame.WindowSize > d.o.maxWindowSize {
|
||||||
d.current.err = ErrDecoderSizeExceeded
|
d.current.err = ErrDecoderSizeExceeded
|
||||||
return false
|
return false
|
||||||
@ -864,13 +851,8 @@ decodeStream:
|
|||||||
if debugDecoder && err != nil {
|
if debugDecoder && err != nil {
|
||||||
println("Frame decoder returned", err)
|
println("Frame decoder returned", err)
|
||||||
}
|
}
|
||||||
if err == nil && frame.DictionaryID != nil {
|
if err == nil {
|
||||||
dict, ok := d.dicts[*frame.DictionaryID]
|
err = d.setDict(frame)
|
||||||
if !ok {
|
|
||||||
err = ErrUnknownDictionary
|
|
||||||
} else {
|
|
||||||
frame.history.setDict(&dict)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
if err == nil && d.frame.WindowSize > d.o.maxWindowSize {
|
if err == nil && d.frame.WindowSize > d.o.maxWindowSize {
|
||||||
if debugDecoder {
|
if debugDecoder {
|
||||||
@ -918,18 +900,22 @@ decodeStream:
|
|||||||
println("next block returned error:", err)
|
println("next block returned error:", err)
|
||||||
}
|
}
|
||||||
dec.err = err
|
dec.err = err
|
||||||
dec.checkCRC = nil
|
dec.hasCRC = false
|
||||||
if dec.Last && frame.HasCheckSum && err == nil {
|
if dec.Last && frame.HasCheckSum && err == nil {
|
||||||
crc, err := frame.rawInput.readSmall(4)
|
crc, err := frame.rawInput.readSmall(4)
|
||||||
if err != nil {
|
if len(crc) < 4 {
|
||||||
|
if err == nil {
|
||||||
|
err = io.ErrUnexpectedEOF
|
||||||
|
|
||||||
|
}
|
||||||
println("CRC missing?", err)
|
println("CRC missing?", err)
|
||||||
dec.err = err
|
dec.err = err
|
||||||
}
|
} else {
|
||||||
var tmp [4]byte
|
dec.checkCRC = binary.LittleEndian.Uint32(crc)
|
||||||
copy(tmp[:], crc)
|
dec.hasCRC = true
|
||||||
dec.checkCRC = tmp[:]
|
if debugDecoder {
|
||||||
if debugDecoder {
|
printf("found crc to check: %08x\n", dec.checkCRC)
|
||||||
println("found crc to check:", dec.checkCRC)
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
err = dec.err
|
err = dec.err
|
||||||
@ -948,3 +934,20 @@ decodeStream:
|
|||||||
hist.reset()
|
hist.reset()
|
||||||
d.frame.history.b = frameHistCache
|
d.frame.history.b = frameHistCache
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func (d *Decoder) setDict(frame *frameDec) (err error) {
|
||||||
|
dict, ok := d.dicts[frame.DictionaryID]
|
||||||
|
if ok {
|
||||||
|
if debugDecoder {
|
||||||
|
println("setting dict", frame.DictionaryID)
|
||||||
|
}
|
||||||
|
frame.history.setDict(dict)
|
||||||
|
} else if frame.DictionaryID != 0 {
|
||||||
|
// A zero or missing dictionary id is ambiguous:
|
||||||
|
// either dictionary zero, or no dictionary. In particular,
|
||||||
|
// zstd --patch-from uses this id for the source file,
|
||||||
|
// so only return an error if the dictionary id is not zero.
|
||||||
|
err = ErrUnknownDictionary
|
||||||
|
}
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
26
vendor/github.com/klauspost/compress/zstd/decoder_options.go
generated
vendored
26
vendor/github.com/klauspost/compress/zstd/decoder_options.go
generated
vendored
@ -6,6 +6,8 @@ package zstd
|
|||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"math/bits"
|
||||||
"runtime"
|
"runtime"
|
||||||
)
|
)
|
||||||
|
|
||||||
@ -18,7 +20,7 @@ type decoderOptions struct {
|
|||||||
concurrent int
|
concurrent int
|
||||||
maxDecodedSize uint64
|
maxDecodedSize uint64
|
||||||
maxWindowSize uint64
|
maxWindowSize uint64
|
||||||
dicts []dict
|
dicts []*dict
|
||||||
ignoreChecksum bool
|
ignoreChecksum bool
|
||||||
limitToCap bool
|
limitToCap bool
|
||||||
decodeBufsBelow int
|
decodeBufsBelow int
|
||||||
@ -85,7 +87,13 @@ func WithDecoderMaxMemory(n uint64) DOption {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// WithDecoderDicts allows to register one or more dictionaries for the decoder.
|
// WithDecoderDicts allows to register one or more dictionaries for the decoder.
|
||||||
// If several dictionaries with the same ID is provided the last one will be used.
|
//
|
||||||
|
// Each slice in dict must be in the [dictionary format] produced by
|
||||||
|
// "zstd --train" from the Zstandard reference implementation.
|
||||||
|
//
|
||||||
|
// If several dictionaries with the same ID are provided, the last one will be used.
|
||||||
|
//
|
||||||
|
// [dictionary format]: https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary-format
|
||||||
func WithDecoderDicts(dicts ...[]byte) DOption {
|
func WithDecoderDicts(dicts ...[]byte) DOption {
|
||||||
return func(o *decoderOptions) error {
|
return func(o *decoderOptions) error {
|
||||||
for _, b := range dicts {
|
for _, b := range dicts {
|
||||||
@ -93,12 +101,24 @@ func WithDecoderDicts(dicts ...[]byte) DOption {
|
|||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
o.dicts = append(o.dicts, *d)
|
o.dicts = append(o.dicts, d)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// WithEncoderDictRaw registers a dictionary that may be used by the decoder.
|
||||||
|
// The slice content can be arbitrary data.
|
||||||
|
func WithDecoderDictRaw(id uint32, content []byte) DOption {
|
||||||
|
return func(o *decoderOptions) error {
|
||||||
|
if bits.UintSize > 32 && uint(len(content)) > dictMaxLength {
|
||||||
|
return fmt.Errorf("dictionary of size %d > 2GiB too large", len(content))
|
||||||
|
}
|
||||||
|
o.dicts = append(o.dicts, &dict{id: id, content: content, offsets: [3]int{1, 4, 8}})
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
// WithDecoderMaxWindow allows to set a maximum window size for decodes.
|
// WithDecoderMaxWindow allows to set a maximum window size for decodes.
|
||||||
// This allows rejecting packets that will cause big memory usage.
|
// This allows rejecting packets that will cause big memory usage.
|
||||||
// The Decoder will likely allocate more memory based on the WithDecoderLowmem setting.
|
// The Decoder will likely allocate more memory based on the WithDecoderLowmem setting.
|
||||||
|
51
vendor/github.com/klauspost/compress/zstd/dict.go
generated
vendored
51
vendor/github.com/klauspost/compress/zstd/dict.go
generated
vendored
@ -1,7 +1,6 @@
|
|||||||
package zstd
|
package zstd
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
|
||||||
"encoding/binary"
|
"encoding/binary"
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
@ -20,7 +19,10 @@ type dict struct {
|
|||||||
content []byte
|
content []byte
|
||||||
}
|
}
|
||||||
|
|
||||||
var dictMagic = [4]byte{0x37, 0xa4, 0x30, 0xec}
|
const dictMagic = "\x37\xa4\x30\xec"
|
||||||
|
|
||||||
|
// Maximum dictionary size for the reference implementation (1.5.3) is 2 GiB.
|
||||||
|
const dictMaxLength = 1 << 31
|
||||||
|
|
||||||
// ID returns the dictionary id or 0 if d is nil.
|
// ID returns the dictionary id or 0 if d is nil.
|
||||||
func (d *dict) ID() uint32 {
|
func (d *dict) ID() uint32 {
|
||||||
@ -30,14 +32,38 @@ func (d *dict) ID() uint32 {
|
|||||||
return d.id
|
return d.id
|
||||||
}
|
}
|
||||||
|
|
||||||
// DictContentSize returns the dictionary content size or 0 if d is nil.
|
// ContentSize returns the dictionary content size or 0 if d is nil.
|
||||||
func (d *dict) DictContentSize() int {
|
func (d *dict) ContentSize() int {
|
||||||
if d == nil {
|
if d == nil {
|
||||||
return 0
|
return 0
|
||||||
}
|
}
|
||||||
return len(d.content)
|
return len(d.content)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Content returns the dictionary content.
|
||||||
|
func (d *dict) Content() []byte {
|
||||||
|
if d == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return d.content
|
||||||
|
}
|
||||||
|
|
||||||
|
// Offsets returns the initial offsets.
|
||||||
|
func (d *dict) Offsets() [3]int {
|
||||||
|
if d == nil {
|
||||||
|
return [3]int{}
|
||||||
|
}
|
||||||
|
return d.offsets
|
||||||
|
}
|
||||||
|
|
||||||
|
// LitEncoder returns the literal encoder.
|
||||||
|
func (d *dict) LitEncoder() *huff0.Scratch {
|
||||||
|
if d == nil {
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
return d.litEnc
|
||||||
|
}
|
||||||
|
|
||||||
// Load a dictionary as described in
|
// Load a dictionary as described in
|
||||||
// https://github.com/facebook/zstd/blob/master/doc/zstd_compression_format.md#dictionary-format
|
// https://github.com/facebook/zstd/blob/master/doc/zstd_compression_format.md#dictionary-format
|
||||||
func loadDict(b []byte) (*dict, error) {
|
func loadDict(b []byte) (*dict, error) {
|
||||||
@ -50,7 +76,7 @@ func loadDict(b []byte) (*dict, error) {
|
|||||||
ofDec: sequenceDec{fse: &fseDecoder{}},
|
ofDec: sequenceDec{fse: &fseDecoder{}},
|
||||||
mlDec: sequenceDec{fse: &fseDecoder{}},
|
mlDec: sequenceDec{fse: &fseDecoder{}},
|
||||||
}
|
}
|
||||||
if !bytes.Equal(b[:4], dictMagic[:]) {
|
if string(b[:4]) != dictMagic {
|
||||||
return nil, ErrMagicMismatch
|
return nil, ErrMagicMismatch
|
||||||
}
|
}
|
||||||
d.id = binary.LittleEndian.Uint32(b[4:8])
|
d.id = binary.LittleEndian.Uint32(b[4:8])
|
||||||
@ -62,7 +88,7 @@ func loadDict(b []byte) (*dict, error) {
|
|||||||
var err error
|
var err error
|
||||||
d.litEnc, b, err = huff0.ReadTable(b[8:], nil)
|
d.litEnc, b, err = huff0.ReadTable(b[8:], nil)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, fmt.Errorf("loading literal table: %w", err)
|
||||||
}
|
}
|
||||||
d.litEnc.Reuse = huff0.ReusePolicyMust
|
d.litEnc.Reuse = huff0.ReusePolicyMust
|
||||||
|
|
||||||
@ -120,3 +146,16 @@ func loadDict(b []byte) (*dict, error) {
|
|||||||
|
|
||||||
return &d, nil
|
return &d, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// InspectDictionary loads a zstd dictionary and provides functions to inspect the content.
|
||||||
|
func InspectDictionary(b []byte) (interface {
|
||||||
|
ID() uint32
|
||||||
|
ContentSize() int
|
||||||
|
Content() []byte
|
||||||
|
Offsets() [3]int
|
||||||
|
LitEncoder() *huff0.Scratch
|
||||||
|
}, error) {
|
||||||
|
initPredefined()
|
||||||
|
d, err := loadDict(b)
|
||||||
|
return d, err
|
||||||
|
}
|
||||||
|
28
vendor/github.com/klauspost/compress/zstd/enc_base.go
generated
vendored
28
vendor/github.com/klauspost/compress/zstd/enc_base.go
generated
vendored
@ -16,6 +16,7 @@ type fastBase struct {
|
|||||||
cur int32
|
cur int32
|
||||||
// maximum offset. Should be at least 2x block size.
|
// maximum offset. Should be at least 2x block size.
|
||||||
maxMatchOff int32
|
maxMatchOff int32
|
||||||
|
bufferReset int32
|
||||||
hist []byte
|
hist []byte
|
||||||
crc *xxhash.Digest
|
crc *xxhash.Digest
|
||||||
tmp [8]byte
|
tmp [8]byte
|
||||||
@ -56,8 +57,8 @@ func (e *fastBase) Block() *blockEnc {
|
|||||||
}
|
}
|
||||||
|
|
||||||
func (e *fastBase) addBlock(src []byte) int32 {
|
func (e *fastBase) addBlock(src []byte) int32 {
|
||||||
if debugAsserts && e.cur > bufferReset {
|
if debugAsserts && e.cur > e.bufferReset {
|
||||||
panic(fmt.Sprintf("ecur (%d) > buffer reset (%d)", e.cur, bufferReset))
|
panic(fmt.Sprintf("ecur (%d) > buffer reset (%d)", e.cur, e.bufferReset))
|
||||||
}
|
}
|
||||||
// check if we have space already
|
// check if we have space already
|
||||||
if len(e.hist)+len(src) > cap(e.hist) {
|
if len(e.hist)+len(src) > cap(e.hist) {
|
||||||
@ -126,24 +127,7 @@ func (e *fastBase) matchlen(s, t int32, src []byte) int32 {
|
|||||||
panic(fmt.Sprintf("len(src)-s (%d) > maxCompressedBlockSize (%d)", len(src)-int(s), maxCompressedBlockSize))
|
panic(fmt.Sprintf("len(src)-s (%d) > maxCompressedBlockSize (%d)", len(src)-int(s), maxCompressedBlockSize))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
a := src[s:]
|
return int32(matchLen(src[s:], src[t:]))
|
||||||
b := src[t:]
|
|
||||||
b = b[:len(a)]
|
|
||||||
end := int32((len(a) >> 3) << 3)
|
|
||||||
for i := int32(0); i < end; i += 8 {
|
|
||||||
if diff := load6432(a, i) ^ load6432(b, i); diff != 0 {
|
|
||||||
return i + int32(bits.TrailingZeros64(diff)>>3)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
a = a[end:]
|
|
||||||
b = b[end:]
|
|
||||||
for i := range a {
|
|
||||||
if a[i] != b[i] {
|
|
||||||
return int32(i) + end
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return int32(len(a)) + end
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Reset the encoding table.
|
// Reset the encoding table.
|
||||||
@ -165,13 +149,13 @@ func (e *fastBase) resetBase(d *dict, singleBlock bool) {
|
|||||||
if singleBlock {
|
if singleBlock {
|
||||||
e.lowMem = true
|
e.lowMem = true
|
||||||
}
|
}
|
||||||
e.ensureHist(d.DictContentSize() + maxCompressedBlockSize)
|
e.ensureHist(d.ContentSize() + maxCompressedBlockSize)
|
||||||
e.lowMem = low
|
e.lowMem = low
|
||||||
}
|
}
|
||||||
|
|
||||||
// We offset current position so everything will be out of reach.
|
// We offset current position so everything will be out of reach.
|
||||||
// If above reset line, history will be purged.
|
// If above reset line, history will be purged.
|
||||||
if e.cur < bufferReset {
|
if e.cur < e.bufferReset {
|
||||||
e.cur += e.maxMatchOff + int32(len(e.hist))
|
e.cur += e.maxMatchOff + int32(len(e.hist))
|
||||||
}
|
}
|
||||||
e.hist = e.hist[:0]
|
e.hist = e.hist[:0]
|
||||||
|
63
vendor/github.com/klauspost/compress/zstd/enc_best.go
generated
vendored
63
vendor/github.com/klauspost/compress/zstd/enc_best.go
generated
vendored
@ -85,14 +85,10 @@ func (e *bestFastEncoder) Encode(blk *blockEnc, src []byte) {
|
|||||||
)
|
)
|
||||||
|
|
||||||
// Protect against e.cur wraparound.
|
// Protect against e.cur wraparound.
|
||||||
for e.cur >= bufferReset {
|
for e.cur >= e.bufferReset-int32(len(e.hist)) {
|
||||||
if len(e.hist) == 0 {
|
if len(e.hist) == 0 {
|
||||||
for i := range e.table[:] {
|
e.table = [bestShortTableSize]prevEntry{}
|
||||||
e.table[i] = prevEntry{}
|
e.longTable = [bestLongTableSize]prevEntry{}
|
||||||
}
|
|
||||||
for i := range e.longTable[:] {
|
|
||||||
e.longTable[i] = prevEntry{}
|
|
||||||
}
|
|
||||||
e.cur = e.maxMatchOff
|
e.cur = e.maxMatchOff
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
@ -193,8 +189,8 @@ encodeLoop:
|
|||||||
panic("offset0 was 0")
|
panic("offset0 was 0")
|
||||||
}
|
}
|
||||||
|
|
||||||
bestOf := func(a, b match) match {
|
bestOf := func(a, b *match) *match {
|
||||||
if a.est+(a.s-b.s)*bitsPerByte>>10 < b.est+(b.s-a.s)*bitsPerByte>>10 {
|
if a.est-b.est+(a.s-b.s)*bitsPerByte>>10 < 0 {
|
||||||
return a
|
return a
|
||||||
}
|
}
|
||||||
return b
|
return b
|
||||||
@ -220,22 +216,26 @@ encodeLoop:
|
|||||||
return m
|
return m
|
||||||
}
|
}
|
||||||
|
|
||||||
best := bestOf(matchAt(candidateL.offset-e.cur, s, uint32(cv), -1), matchAt(candidateL.prev-e.cur, s, uint32(cv), -1))
|
m1 := matchAt(candidateL.offset-e.cur, s, uint32(cv), -1)
|
||||||
best = bestOf(best, matchAt(candidateS.offset-e.cur, s, uint32(cv), -1))
|
m2 := matchAt(candidateL.prev-e.cur, s, uint32(cv), -1)
|
||||||
best = bestOf(best, matchAt(candidateS.prev-e.cur, s, uint32(cv), -1))
|
m3 := matchAt(candidateS.offset-e.cur, s, uint32(cv), -1)
|
||||||
|
m4 := matchAt(candidateS.prev-e.cur, s, uint32(cv), -1)
|
||||||
|
best := bestOf(bestOf(&m1, &m2), bestOf(&m3, &m4))
|
||||||
|
|
||||||
if canRepeat && best.length < goodEnough {
|
if canRepeat && best.length < goodEnough {
|
||||||
cv32 := uint32(cv >> 8)
|
cv32 := uint32(cv >> 8)
|
||||||
spp := s + 1
|
spp := s + 1
|
||||||
best = bestOf(best, matchAt(spp-offset1, spp, cv32, 1))
|
m1 := matchAt(spp-offset1, spp, cv32, 1)
|
||||||
best = bestOf(best, matchAt(spp-offset2, spp, cv32, 2))
|
m2 := matchAt(spp-offset2, spp, cv32, 2)
|
||||||
best = bestOf(best, matchAt(spp-offset3, spp, cv32, 3))
|
m3 := matchAt(spp-offset3, spp, cv32, 3)
|
||||||
|
best = bestOf(bestOf(best, &m1), bestOf(&m2, &m3))
|
||||||
if best.length > 0 {
|
if best.length > 0 {
|
||||||
cv32 = uint32(cv >> 24)
|
cv32 = uint32(cv >> 24)
|
||||||
spp += 2
|
spp += 2
|
||||||
best = bestOf(best, matchAt(spp-offset1, spp, cv32, 1))
|
m1 := matchAt(spp-offset1, spp, cv32, 1)
|
||||||
best = bestOf(best, matchAt(spp-offset2, spp, cv32, 2))
|
m2 := matchAt(spp-offset2, spp, cv32, 2)
|
||||||
best = bestOf(best, matchAt(spp-offset3, spp, cv32, 3))
|
m3 := matchAt(spp-offset3, spp, cv32, 3)
|
||||||
|
best = bestOf(bestOf(best, &m1), bestOf(&m2, &m3))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Load next and check...
|
// Load next and check...
|
||||||
@ -262,26 +262,33 @@ encodeLoop:
|
|||||||
candidateL2 := e.longTable[hashLen(cv2, bestLongTableBits, bestLongLen)]
|
candidateL2 := e.longTable[hashLen(cv2, bestLongTableBits, bestLongLen)]
|
||||||
|
|
||||||
// Short at s+1
|
// Short at s+1
|
||||||
best = bestOf(best, matchAt(candidateS.offset-e.cur, s, uint32(cv), -1))
|
m1 := matchAt(candidateS.offset-e.cur, s, uint32(cv), -1)
|
||||||
// Long at s+1, s+2
|
// Long at s+1, s+2
|
||||||
best = bestOf(best, matchAt(candidateL.offset-e.cur, s, uint32(cv), -1))
|
m2 := matchAt(candidateL.offset-e.cur, s, uint32(cv), -1)
|
||||||
best = bestOf(best, matchAt(candidateL.prev-e.cur, s, uint32(cv), -1))
|
m3 := matchAt(candidateL.prev-e.cur, s, uint32(cv), -1)
|
||||||
best = bestOf(best, matchAt(candidateL2.offset-e.cur, s+1, uint32(cv2), -1))
|
m4 := matchAt(candidateL2.offset-e.cur, s+1, uint32(cv2), -1)
|
||||||
best = bestOf(best, matchAt(candidateL2.prev-e.cur, s+1, uint32(cv2), -1))
|
m5 := matchAt(candidateL2.prev-e.cur, s+1, uint32(cv2), -1)
|
||||||
|
best = bestOf(bestOf(bestOf(best, &m1), &m2), bestOf(bestOf(&m3, &m4), &m5))
|
||||||
if false {
|
if false {
|
||||||
// Short at s+3.
|
// Short at s+3.
|
||||||
// Too often worse...
|
// Too often worse...
|
||||||
best = bestOf(best, matchAt(e.table[hashLen(cv2>>8, bestShortTableBits, bestShortLen)].offset-e.cur, s+2, uint32(cv2>>8), -1))
|
m := matchAt(e.table[hashLen(cv2>>8, bestShortTableBits, bestShortLen)].offset-e.cur, s+2, uint32(cv2>>8), -1)
|
||||||
|
best = bestOf(best, &m)
|
||||||
}
|
}
|
||||||
// See if we can find a better match by checking where the current best ends.
|
// See if we can find a better match by checking where the current best ends.
|
||||||
// Use that offset to see if we can find a better full match.
|
// Use that offset to see if we can find a better full match.
|
||||||
if sAt := best.s + best.length; sAt < sLimit {
|
if sAt := best.s + best.length; sAt < sLimit {
|
||||||
nextHashL := hashLen(load6432(src, sAt), bestLongTableBits, bestLongLen)
|
nextHashL := hashLen(load6432(src, sAt), bestLongTableBits, bestLongLen)
|
||||||
candidateEnd := e.longTable[nextHashL]
|
candidateEnd := e.longTable[nextHashL]
|
||||||
if pos := candidateEnd.offset - e.cur - best.length; pos >= 0 {
|
// Start check at a fixed offset to allow for a few mismatches.
|
||||||
bestEnd := bestOf(best, matchAt(pos, best.s, load3232(src, best.s), -1))
|
// For this compression level 2 yields the best results.
|
||||||
if pos := candidateEnd.prev - e.cur - best.length; pos >= 0 {
|
const skipBeginning = 2
|
||||||
bestEnd = bestOf(bestEnd, matchAt(pos, best.s, load3232(src, best.s), -1))
|
if pos := candidateEnd.offset - e.cur - best.length + skipBeginning; pos >= 0 {
|
||||||
|
m := matchAt(pos, best.s+skipBeginning, load3232(src, best.s+skipBeginning), -1)
|
||||||
|
bestEnd := bestOf(best, &m)
|
||||||
|
if pos := candidateEnd.prev - e.cur - best.length + skipBeginning; pos >= 0 {
|
||||||
|
m := matchAt(pos, best.s+skipBeginning, load3232(src, best.s+skipBeginning), -1)
|
||||||
|
bestEnd = bestOf(bestEnd, &m)
|
||||||
}
|
}
|
||||||
best = bestEnd
|
best = bestEnd
|
||||||
}
|
}
|
||||||
|
12
vendor/github.com/klauspost/compress/zstd/enc_better.go
generated
vendored
12
vendor/github.com/klauspost/compress/zstd/enc_better.go
generated
vendored
@ -62,14 +62,10 @@ func (e *betterFastEncoder) Encode(blk *blockEnc, src []byte) {
|
|||||||
)
|
)
|
||||||
|
|
||||||
// Protect against e.cur wraparound.
|
// Protect against e.cur wraparound.
|
||||||
for e.cur >= bufferReset {
|
for e.cur >= e.bufferReset-int32(len(e.hist)) {
|
||||||
if len(e.hist) == 0 {
|
if len(e.hist) == 0 {
|
||||||
for i := range e.table[:] {
|
e.table = [betterShortTableSize]tableEntry{}
|
||||||
e.table[i] = tableEntry{}
|
e.longTable = [betterLongTableSize]prevEntry{}
|
||||||
}
|
|
||||||
for i := range e.longTable[:] {
|
|
||||||
e.longTable[i] = prevEntry{}
|
|
||||||
}
|
|
||||||
e.cur = e.maxMatchOff
|
e.cur = e.maxMatchOff
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
@ -587,7 +583,7 @@ func (e *betterFastEncoderDict) Encode(blk *blockEnc, src []byte) {
|
|||||||
)
|
)
|
||||||
|
|
||||||
// Protect against e.cur wraparound.
|
// Protect against e.cur wraparound.
|
||||||
for e.cur >= bufferReset {
|
for e.cur >= e.bufferReset-int32(len(e.hist)) {
|
||||||
if len(e.hist) == 0 {
|
if len(e.hist) == 0 {
|
||||||
for i := range e.table[:] {
|
for i := range e.table[:] {
|
||||||
e.table[i] = tableEntry{}
|
e.table[i] = tableEntry{}
|
||||||
|
16
vendor/github.com/klauspost/compress/zstd/enc_dfast.go
generated
vendored
16
vendor/github.com/klauspost/compress/zstd/enc_dfast.go
generated
vendored
@ -44,14 +44,10 @@ func (e *doubleFastEncoder) Encode(blk *blockEnc, src []byte) {
|
|||||||
)
|
)
|
||||||
|
|
||||||
// Protect against e.cur wraparound.
|
// Protect against e.cur wraparound.
|
||||||
for e.cur >= bufferReset {
|
for e.cur >= e.bufferReset-int32(len(e.hist)) {
|
||||||
if len(e.hist) == 0 {
|
if len(e.hist) == 0 {
|
||||||
for i := range e.table[:] {
|
e.table = [dFastShortTableSize]tableEntry{}
|
||||||
e.table[i] = tableEntry{}
|
e.longTable = [dFastLongTableSize]tableEntry{}
|
||||||
}
|
|
||||||
for i := range e.longTable[:] {
|
|
||||||
e.longTable[i] = tableEntry{}
|
|
||||||
}
|
|
||||||
e.cur = e.maxMatchOff
|
e.cur = e.maxMatchOff
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
@ -388,7 +384,7 @@ func (e *doubleFastEncoder) EncodeNoHist(blk *blockEnc, src []byte) {
|
|||||||
)
|
)
|
||||||
|
|
||||||
// Protect against e.cur wraparound.
|
// Protect against e.cur wraparound.
|
||||||
if e.cur >= bufferReset {
|
if e.cur >= e.bufferReset {
|
||||||
for i := range e.table[:] {
|
for i := range e.table[:] {
|
||||||
e.table[i] = tableEntry{}
|
e.table[i] = tableEntry{}
|
||||||
}
|
}
|
||||||
@ -685,7 +681,7 @@ encodeLoop:
|
|||||||
}
|
}
|
||||||
|
|
||||||
// We do not store history, so we must offset e.cur to avoid false matches for next user.
|
// We do not store history, so we must offset e.cur to avoid false matches for next user.
|
||||||
if e.cur < bufferReset {
|
if e.cur < e.bufferReset {
|
||||||
e.cur += int32(len(src))
|
e.cur += int32(len(src))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -700,7 +696,7 @@ func (e *doubleFastEncoderDict) Encode(blk *blockEnc, src []byte) {
|
|||||||
)
|
)
|
||||||
|
|
||||||
// Protect against e.cur wraparound.
|
// Protect against e.cur wraparound.
|
||||||
for e.cur >= bufferReset {
|
for e.cur >= e.bufferReset-int32(len(e.hist)) {
|
||||||
if len(e.hist) == 0 {
|
if len(e.hist) == 0 {
|
||||||
for i := range e.table[:] {
|
for i := range e.table[:] {
|
||||||
e.table[i] = tableEntry{}
|
e.table[i] = tableEntry{}
|
||||||
|
12
vendor/github.com/klauspost/compress/zstd/enc_fast.go
generated
vendored
12
vendor/github.com/klauspost/compress/zstd/enc_fast.go
generated
vendored
@ -43,7 +43,7 @@ func (e *fastEncoder) Encode(blk *blockEnc, src []byte) {
|
|||||||
)
|
)
|
||||||
|
|
||||||
// Protect against e.cur wraparound.
|
// Protect against e.cur wraparound.
|
||||||
for e.cur >= bufferReset {
|
for e.cur >= e.bufferReset-int32(len(e.hist)) {
|
||||||
if len(e.hist) == 0 {
|
if len(e.hist) == 0 {
|
||||||
for i := range e.table[:] {
|
for i := range e.table[:] {
|
||||||
e.table[i] = tableEntry{}
|
e.table[i] = tableEntry{}
|
||||||
@ -310,7 +310,7 @@ func (e *fastEncoder) EncodeNoHist(blk *blockEnc, src []byte) {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// Protect against e.cur wraparound.
|
// Protect against e.cur wraparound.
|
||||||
if e.cur >= bufferReset {
|
if e.cur >= e.bufferReset {
|
||||||
for i := range e.table[:] {
|
for i := range e.table[:] {
|
||||||
e.table[i] = tableEntry{}
|
e.table[i] = tableEntry{}
|
||||||
}
|
}
|
||||||
@ -538,7 +538,7 @@ encodeLoop:
|
|||||||
println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits)
|
println("returning, recent offsets:", blk.recentOffsets, "extra literals:", blk.extraLits)
|
||||||
}
|
}
|
||||||
// We do not store history, so we must offset e.cur to avoid false matches for next user.
|
// We do not store history, so we must offset e.cur to avoid false matches for next user.
|
||||||
if e.cur < bufferReset {
|
if e.cur < e.bufferReset {
|
||||||
e.cur += int32(len(src))
|
e.cur += int32(len(src))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -555,11 +555,9 @@ func (e *fastEncoderDict) Encode(blk *blockEnc, src []byte) {
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
// Protect against e.cur wraparound.
|
// Protect against e.cur wraparound.
|
||||||
for e.cur >= bufferReset {
|
for e.cur >= e.bufferReset-int32(len(e.hist)) {
|
||||||
if len(e.hist) == 0 {
|
if len(e.hist) == 0 {
|
||||||
for i := range e.table[:] {
|
e.table = [tableSize]tableEntry{}
|
||||||
e.table[i] = tableEntry{}
|
|
||||||
}
|
|
||||||
e.cur = e.maxMatchOff
|
e.cur = e.maxMatchOff
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
35
vendor/github.com/klauspost/compress/zstd/encoder.go
generated
vendored
35
vendor/github.com/klauspost/compress/zstd/encoder.go
generated
vendored
@ -8,6 +8,7 @@ import (
|
|||||||
"crypto/rand"
|
"crypto/rand"
|
||||||
"fmt"
|
"fmt"
|
||||||
"io"
|
"io"
|
||||||
|
"math"
|
||||||
rdebug "runtime/debug"
|
rdebug "runtime/debug"
|
||||||
"sync"
|
"sync"
|
||||||
|
|
||||||
@ -639,3 +640,37 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte {
|
|||||||
}
|
}
|
||||||
return dst
|
return dst
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// MaxEncodedSize returns the expected maximum
|
||||||
|
// size of an encoded block or stream.
|
||||||
|
func (e *Encoder) MaxEncodedSize(size int) int {
|
||||||
|
frameHeader := 4 + 2 // magic + frame header & window descriptor
|
||||||
|
if e.o.dict != nil {
|
||||||
|
frameHeader += 4
|
||||||
|
}
|
||||||
|
// Frame content size:
|
||||||
|
if size < 256 {
|
||||||
|
frameHeader++
|
||||||
|
} else if size < 65536+256 {
|
||||||
|
frameHeader += 2
|
||||||
|
} else if size < math.MaxInt32 {
|
||||||
|
frameHeader += 4
|
||||||
|
} else {
|
||||||
|
frameHeader += 8
|
||||||
|
}
|
||||||
|
// Final crc
|
||||||
|
if e.o.crc {
|
||||||
|
frameHeader += 4
|
||||||
|
}
|
||||||
|
|
||||||
|
// Max overhead is 3 bytes/block.
|
||||||
|
// There cannot be 0 blocks.
|
||||||
|
blocks := (size + e.o.blockSize) / e.o.blockSize
|
||||||
|
|
||||||
|
// Combine, add padding.
|
||||||
|
maxSz := frameHeader + 3*blocks + size
|
||||||
|
if e.o.pad > 1 {
|
||||||
|
maxSz += calcSkippableFrame(int64(maxSz), int64(e.o.pad))
|
||||||
|
}
|
||||||
|
return maxSz
|
||||||
|
}
|
||||||
|
36
vendor/github.com/klauspost/compress/zstd/encoder_options.go
generated
vendored
36
vendor/github.com/klauspost/compress/zstd/encoder_options.go
generated
vendored
@ -3,6 +3,8 @@ package zstd
|
|||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"math"
|
||||||
|
"math/bits"
|
||||||
"runtime"
|
"runtime"
|
||||||
"strings"
|
"strings"
|
||||||
)
|
)
|
||||||
@ -47,22 +49,22 @@ func (o encoderOptions) encoder() encoder {
|
|||||||
switch o.level {
|
switch o.level {
|
||||||
case SpeedFastest:
|
case SpeedFastest:
|
||||||
if o.dict != nil {
|
if o.dict != nil {
|
||||||
return &fastEncoderDict{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}}
|
return &fastEncoderDict{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}}
|
||||||
}
|
}
|
||||||
return &fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}
|
return &fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}
|
||||||
|
|
||||||
case SpeedDefault:
|
case SpeedDefault:
|
||||||
if o.dict != nil {
|
if o.dict != nil {
|
||||||
return &doubleFastEncoderDict{fastEncoderDict: fastEncoderDict{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}}}
|
return &doubleFastEncoderDict{fastEncoderDict: fastEncoderDict{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}}}
|
||||||
}
|
}
|
||||||
return &doubleFastEncoder{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}}
|
return &doubleFastEncoder{fastEncoder: fastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}}
|
||||||
case SpeedBetterCompression:
|
case SpeedBetterCompression:
|
||||||
if o.dict != nil {
|
if o.dict != nil {
|
||||||
return &betterFastEncoderDict{betterFastEncoder: betterFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}}
|
return &betterFastEncoderDict{betterFastEncoder: betterFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}}
|
||||||
}
|
}
|
||||||
return &betterFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}
|
return &betterFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}
|
||||||
case SpeedBestCompression:
|
case SpeedBestCompression:
|
||||||
return &bestFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), lowMem: o.lowMem}}
|
return &bestFastEncoder{fastBase: fastBase{maxMatchOff: int32(o.windowSize), bufferReset: math.MaxInt32 - int32(o.windowSize*2), lowMem: o.lowMem}}
|
||||||
}
|
}
|
||||||
panic("unknown compression level")
|
panic("unknown compression level")
|
||||||
}
|
}
|
||||||
@ -304,7 +306,13 @@ func WithLowerEncoderMem(b bool) EOption {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// WithEncoderDict allows to register a dictionary that will be used for the encode.
|
// WithEncoderDict allows to register a dictionary that will be used for the encode.
|
||||||
|
//
|
||||||
|
// The slice dict must be in the [dictionary format] produced by
|
||||||
|
// "zstd --train" from the Zstandard reference implementation.
|
||||||
|
//
|
||||||
// The encoder *may* choose to use no dictionary instead for certain payloads.
|
// The encoder *may* choose to use no dictionary instead for certain payloads.
|
||||||
|
//
|
||||||
|
// [dictionary format]: https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary-format
|
||||||
func WithEncoderDict(dict []byte) EOption {
|
func WithEncoderDict(dict []byte) EOption {
|
||||||
return func(o *encoderOptions) error {
|
return func(o *encoderOptions) error {
|
||||||
d, err := loadDict(dict)
|
d, err := loadDict(dict)
|
||||||
@ -315,3 +323,17 @@ func WithEncoderDict(dict []byte) EOption {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// WithEncoderDictRaw registers a dictionary that may be used by the encoder.
|
||||||
|
//
|
||||||
|
// The slice content may contain arbitrary data. It will be used as an initial
|
||||||
|
// history.
|
||||||
|
func WithEncoderDictRaw(id uint32, content []byte) EOption {
|
||||||
|
return func(o *encoderOptions) error {
|
||||||
|
if bits.UintSize > 32 && uint(len(content)) > dictMaxLength {
|
||||||
|
return fmt.Errorf("dictionary of size %d > 2GiB too large", len(content))
|
||||||
|
}
|
||||||
|
o.dict = &dict{id: id, content: content, offsets: [3]int{1, 4, 8}}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
}
|
||||||
|
47
vendor/github.com/klauspost/compress/zstd/framedec.go
generated
vendored
47
vendor/github.com/klauspost/compress/zstd/framedec.go
generated
vendored
@ -5,7 +5,7 @@
|
|||||||
package zstd
|
package zstd
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"bytes"
|
"encoding/binary"
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"errors"
|
"errors"
|
||||||
"io"
|
"io"
|
||||||
@ -29,7 +29,7 @@ type frameDec struct {
|
|||||||
|
|
||||||
FrameContentSize uint64
|
FrameContentSize uint64
|
||||||
|
|
||||||
DictionaryID *uint32
|
DictionaryID uint32
|
||||||
HasCheckSum bool
|
HasCheckSum bool
|
||||||
SingleSegment bool
|
SingleSegment bool
|
||||||
}
|
}
|
||||||
@ -43,9 +43,9 @@ const (
|
|||||||
MaxWindowSize = 1 << 29
|
MaxWindowSize = 1 << 29
|
||||||
)
|
)
|
||||||
|
|
||||||
var (
|
const (
|
||||||
frameMagic = []byte{0x28, 0xb5, 0x2f, 0xfd}
|
frameMagic = "\x28\xb5\x2f\xfd"
|
||||||
skippableFrameMagic = []byte{0x2a, 0x4d, 0x18}
|
skippableFrameMagic = "\x2a\x4d\x18"
|
||||||
)
|
)
|
||||||
|
|
||||||
func newFrameDec(o decoderOptions) *frameDec {
|
func newFrameDec(o decoderOptions) *frameDec {
|
||||||
@ -89,9 +89,9 @@ func (d *frameDec) reset(br byteBuffer) error {
|
|||||||
copy(signature[1:], b)
|
copy(signature[1:], b)
|
||||||
}
|
}
|
||||||
|
|
||||||
if !bytes.Equal(signature[1:4], skippableFrameMagic) || signature[0]&0xf0 != 0x50 {
|
if string(signature[1:4]) != skippableFrameMagic || signature[0]&0xf0 != 0x50 {
|
||||||
if debugDecoder {
|
if debugDecoder {
|
||||||
println("Not skippable", hex.EncodeToString(signature[:]), hex.EncodeToString(skippableFrameMagic))
|
println("Not skippable", hex.EncodeToString(signature[:]), hex.EncodeToString([]byte(skippableFrameMagic)))
|
||||||
}
|
}
|
||||||
// Break if not skippable frame.
|
// Break if not skippable frame.
|
||||||
break
|
break
|
||||||
@ -114,9 +114,9 @@ func (d *frameDec) reset(br byteBuffer) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if !bytes.Equal(signature[:], frameMagic) {
|
if string(signature[:]) != frameMagic {
|
||||||
if debugDecoder {
|
if debugDecoder {
|
||||||
println("Got magic numbers: ", signature, "want:", frameMagic)
|
println("Got magic numbers: ", signature, "want:", []byte(frameMagic))
|
||||||
}
|
}
|
||||||
return ErrMagicMismatch
|
return ErrMagicMismatch
|
||||||
}
|
}
|
||||||
@ -155,7 +155,7 @@ func (d *frameDec) reset(br byteBuffer) error {
|
|||||||
|
|
||||||
// Read Dictionary_ID
|
// Read Dictionary_ID
|
||||||
// https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary_id
|
// https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary_id
|
||||||
d.DictionaryID = nil
|
d.DictionaryID = 0
|
||||||
if size := fhd & 3; size != 0 {
|
if size := fhd & 3; size != 0 {
|
||||||
if size == 3 {
|
if size == 3 {
|
||||||
size = 4
|
size = 4
|
||||||
@ -167,7 +167,7 @@ func (d *frameDec) reset(br byteBuffer) error {
|
|||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
var id uint32
|
var id uint32
|
||||||
switch size {
|
switch len(b) {
|
||||||
case 1:
|
case 1:
|
||||||
id = uint32(b[0])
|
id = uint32(b[0])
|
||||||
case 2:
|
case 2:
|
||||||
@ -178,11 +178,7 @@ func (d *frameDec) reset(br byteBuffer) error {
|
|||||||
if debugDecoder {
|
if debugDecoder {
|
||||||
println("Dict size", size, "ID:", id)
|
println("Dict size", size, "ID:", id)
|
||||||
}
|
}
|
||||||
if id > 0 {
|
d.DictionaryID = id
|
||||||
// ID 0 means "sorry, no dictionary anyway".
|
|
||||||
// https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#dictionary-format
|
|
||||||
d.DictionaryID = &id
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Read Frame_Content_Size
|
// Read Frame_Content_Size
|
||||||
@ -204,7 +200,7 @@ func (d *frameDec) reset(br byteBuffer) error {
|
|||||||
println("Reading Frame content", err)
|
println("Reading Frame content", err)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
switch fcsSize {
|
switch len(b) {
|
||||||
case 1:
|
case 1:
|
||||||
d.FrameContentSize = uint64(b[0])
|
d.FrameContentSize = uint64(b[0])
|
||||||
case 2:
|
case 2:
|
||||||
@ -305,7 +301,7 @@ func (d *frameDec) checkCRC() error {
|
|||||||
}
|
}
|
||||||
|
|
||||||
// We can overwrite upper tmp now
|
// We can overwrite upper tmp now
|
||||||
want, err := d.rawInput.readSmall(4)
|
buf, err := d.rawInput.readSmall(4)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
println("CRC missing?", err)
|
println("CRC missing?", err)
|
||||||
return err
|
return err
|
||||||
@ -315,22 +311,17 @@ func (d *frameDec) checkCRC() error {
|
|||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
var tmp [4]byte
|
want := binary.LittleEndian.Uint32(buf[:4])
|
||||||
got := d.crc.Sum64()
|
got := uint32(d.crc.Sum64())
|
||||||
// Flip to match file order.
|
|
||||||
tmp[0] = byte(got >> 0)
|
|
||||||
tmp[1] = byte(got >> 8)
|
|
||||||
tmp[2] = byte(got >> 16)
|
|
||||||
tmp[3] = byte(got >> 24)
|
|
||||||
|
|
||||||
if !bytes.Equal(tmp[:], want) {
|
if got != want {
|
||||||
if debugDecoder {
|
if debugDecoder {
|
||||||
println("CRC Check Failed:", tmp[:], "!=", want)
|
printf("CRC check failed: got %08x, want %08x\n", got, want)
|
||||||
}
|
}
|
||||||
return ErrCRCMismatch
|
return ErrCRCMismatch
|
||||||
}
|
}
|
||||||
if debugDecoder {
|
if debugDecoder {
|
||||||
println("CRC ok", tmp[:])
|
printf("CRC ok %08x\n", got)
|
||||||
}
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
49
vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md
generated
vendored
49
vendor/github.com/klauspost/compress/zstd/internal/xxhash/README.md
generated
vendored
@ -2,12 +2,7 @@
|
|||||||
|
|
||||||
VENDORED: Go to [github.com/cespare/xxhash](https://github.com/cespare/xxhash) for original package.
|
VENDORED: Go to [github.com/cespare/xxhash](https://github.com/cespare/xxhash) for original package.
|
||||||
|
|
||||||
|
xxhash is a Go implementation of the 64-bit [xxHash] algorithm, XXH64. This is a
|
||||||
[](https://godoc.org/github.com/cespare/xxhash)
|
|
||||||
[](https://travis-ci.org/cespare/xxhash)
|
|
||||||
|
|
||||||
xxhash is a Go implementation of the 64-bit
|
|
||||||
[xxHash](http://cyan4973.github.io/xxHash/) algorithm, XXH64. This is a
|
|
||||||
high-quality hashing algorithm that is much faster than anything in the Go
|
high-quality hashing algorithm that is much faster than anything in the Go
|
||||||
standard library.
|
standard library.
|
||||||
|
|
||||||
@ -28,31 +23,49 @@ func (*Digest) WriteString(string) (int, error)
|
|||||||
func (*Digest) Sum64() uint64
|
func (*Digest) Sum64() uint64
|
||||||
```
|
```
|
||||||
|
|
||||||
This implementation provides a fast pure-Go implementation and an even faster
|
The package is written with optimized pure Go and also contains even faster
|
||||||
assembly implementation for amd64.
|
assembly implementations for amd64 and arm64. If desired, the `purego` build tag
|
||||||
|
opts into using the Go code even on those architectures.
|
||||||
|
|
||||||
|
[xxHash]: http://cyan4973.github.io/xxHash/
|
||||||
|
|
||||||
|
## Compatibility
|
||||||
|
|
||||||
|
This package is in a module and the latest code is in version 2 of the module.
|
||||||
|
You need a version of Go with at least "minimal module compatibility" to use
|
||||||
|
github.com/cespare/xxhash/v2:
|
||||||
|
|
||||||
|
* 1.9.7+ for Go 1.9
|
||||||
|
* 1.10.3+ for Go 1.10
|
||||||
|
* Go 1.11 or later
|
||||||
|
|
||||||
|
I recommend using the latest release of Go.
|
||||||
|
|
||||||
## Benchmarks
|
## Benchmarks
|
||||||
|
|
||||||
Here are some quick benchmarks comparing the pure-Go and assembly
|
Here are some quick benchmarks comparing the pure-Go and assembly
|
||||||
implementations of Sum64.
|
implementations of Sum64.
|
||||||
|
|
||||||
| input size | purego | asm |
|
| input size | purego | asm |
|
||||||
| --- | --- | --- |
|
| ---------- | --------- | --------- |
|
||||||
| 5 B | 979.66 MB/s | 1291.17 MB/s |
|
| 4 B | 1.3 GB/s | 1.2 GB/s |
|
||||||
| 100 B | 7475.26 MB/s | 7973.40 MB/s |
|
| 16 B | 2.9 GB/s | 3.5 GB/s |
|
||||||
| 4 KB | 17573.46 MB/s | 17602.65 MB/s |
|
| 100 B | 6.9 GB/s | 8.1 GB/s |
|
||||||
| 10 MB | 17131.46 MB/s | 17142.16 MB/s |
|
| 4 KB | 11.7 GB/s | 16.7 GB/s |
|
||||||
|
| 10 MB | 12.0 GB/s | 17.3 GB/s |
|
||||||
|
|
||||||
These numbers were generated on Ubuntu 18.04 with an Intel i7-8700K CPU using
|
These numbers were generated on Ubuntu 20.04 with an Intel Xeon Platinum 8252C
|
||||||
the following commands under Go 1.11.2:
|
CPU using the following commands under Go 1.19.2:
|
||||||
|
|
||||||
```
|
```
|
||||||
$ go test -tags purego -benchtime 10s -bench '/xxhash,direct,bytes'
|
benchstat <(go test -tags purego -benchtime 500ms -count 15 -bench 'Sum64$')
|
||||||
$ go test -benchtime 10s -bench '/xxhash,direct,bytes'
|
benchstat <(go test -benchtime 500ms -count 15 -bench 'Sum64$')
|
||||||
```
|
```
|
||||||
|
|
||||||
## Projects using this package
|
## Projects using this package
|
||||||
|
|
||||||
- [InfluxDB](https://github.com/influxdata/influxdb)
|
- [InfluxDB](https://github.com/influxdata/influxdb)
|
||||||
- [Prometheus](https://github.com/prometheus/prometheus)
|
- [Prometheus](https://github.com/prometheus/prometheus)
|
||||||
|
- [VictoriaMetrics](https://github.com/VictoriaMetrics/VictoriaMetrics)
|
||||||
- [FreeCache](https://github.com/coocood/freecache)
|
- [FreeCache](https://github.com/coocood/freecache)
|
||||||
|
- [FastCache](https://github.com/VictoriaMetrics/fastcache)
|
||||||
|
47
vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go
generated
vendored
47
vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash.go
generated
vendored
@ -18,19 +18,11 @@ const (
|
|||||||
prime5 uint64 = 2870177450012600261
|
prime5 uint64 = 2870177450012600261
|
||||||
)
|
)
|
||||||
|
|
||||||
// NOTE(caleb): I'm using both consts and vars of the primes. Using consts where
|
// Store the primes in an array as well.
|
||||||
// possible in the Go code is worth a small (but measurable) performance boost
|
//
|
||||||
// by avoiding some MOVQs. Vars are needed for the asm and also are useful for
|
// The consts are used when possible in Go code to avoid MOVs but we need a
|
||||||
// convenience in the Go code in a few places where we need to intentionally
|
// contiguous array of the assembly code.
|
||||||
// avoid constant arithmetic (e.g., v1 := prime1 + prime2 fails because the
|
var primes = [...]uint64{prime1, prime2, prime3, prime4, prime5}
|
||||||
// result overflows a uint64).
|
|
||||||
var (
|
|
||||||
prime1v = prime1
|
|
||||||
prime2v = prime2
|
|
||||||
prime3v = prime3
|
|
||||||
prime4v = prime4
|
|
||||||
prime5v = prime5
|
|
||||||
)
|
|
||||||
|
|
||||||
// Digest implements hash.Hash64.
|
// Digest implements hash.Hash64.
|
||||||
type Digest struct {
|
type Digest struct {
|
||||||
@ -52,10 +44,10 @@ func New() *Digest {
|
|||||||
|
|
||||||
// Reset clears the Digest's state so that it can be reused.
|
// Reset clears the Digest's state so that it can be reused.
|
||||||
func (d *Digest) Reset() {
|
func (d *Digest) Reset() {
|
||||||
d.v1 = prime1v + prime2
|
d.v1 = primes[0] + prime2
|
||||||
d.v2 = prime2
|
d.v2 = prime2
|
||||||
d.v3 = 0
|
d.v3 = 0
|
||||||
d.v4 = -prime1v
|
d.v4 = -primes[0]
|
||||||
d.total = 0
|
d.total = 0
|
||||||
d.n = 0
|
d.n = 0
|
||||||
}
|
}
|
||||||
@ -71,21 +63,23 @@ func (d *Digest) Write(b []byte) (n int, err error) {
|
|||||||
n = len(b)
|
n = len(b)
|
||||||
d.total += uint64(n)
|
d.total += uint64(n)
|
||||||
|
|
||||||
|
memleft := d.mem[d.n&(len(d.mem)-1):]
|
||||||
|
|
||||||
if d.n+n < 32 {
|
if d.n+n < 32 {
|
||||||
// This new data doesn't even fill the current block.
|
// This new data doesn't even fill the current block.
|
||||||
copy(d.mem[d.n:], b)
|
copy(memleft, b)
|
||||||
d.n += n
|
d.n += n
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if d.n > 0 {
|
if d.n > 0 {
|
||||||
// Finish off the partial block.
|
// Finish off the partial block.
|
||||||
copy(d.mem[d.n:], b)
|
c := copy(memleft, b)
|
||||||
d.v1 = round(d.v1, u64(d.mem[0:8]))
|
d.v1 = round(d.v1, u64(d.mem[0:8]))
|
||||||
d.v2 = round(d.v2, u64(d.mem[8:16]))
|
d.v2 = round(d.v2, u64(d.mem[8:16]))
|
||||||
d.v3 = round(d.v3, u64(d.mem[16:24]))
|
d.v3 = round(d.v3, u64(d.mem[16:24]))
|
||||||
d.v4 = round(d.v4, u64(d.mem[24:32]))
|
d.v4 = round(d.v4, u64(d.mem[24:32]))
|
||||||
b = b[32-d.n:]
|
b = b[c:]
|
||||||
d.n = 0
|
d.n = 0
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -135,21 +129,20 @@ func (d *Digest) Sum64() uint64 {
|
|||||||
|
|
||||||
h += d.total
|
h += d.total
|
||||||
|
|
||||||
i, end := 0, d.n
|
b := d.mem[:d.n&(len(d.mem)-1)]
|
||||||
for ; i+8 <= end; i += 8 {
|
for ; len(b) >= 8; b = b[8:] {
|
||||||
k1 := round(0, u64(d.mem[i:i+8]))
|
k1 := round(0, u64(b[:8]))
|
||||||
h ^= k1
|
h ^= k1
|
||||||
h = rol27(h)*prime1 + prime4
|
h = rol27(h)*prime1 + prime4
|
||||||
}
|
}
|
||||||
if i+4 <= end {
|
if len(b) >= 4 {
|
||||||
h ^= uint64(u32(d.mem[i:i+4])) * prime1
|
h ^= uint64(u32(b[:4])) * prime1
|
||||||
h = rol23(h)*prime2 + prime3
|
h = rol23(h)*prime2 + prime3
|
||||||
i += 4
|
b = b[4:]
|
||||||
}
|
}
|
||||||
for i < end {
|
for ; len(b) > 0; b = b[1:] {
|
||||||
h ^= uint64(d.mem[i]) * prime5
|
h ^= uint64(b[0]) * prime5
|
||||||
h = rol11(h) * prime1
|
h = rol11(h) * prime1
|
||||||
i++
|
|
||||||
}
|
}
|
||||||
|
|
||||||
h ^= h >> 33
|
h ^= h >> 33
|
||||||
|
308
vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s
generated
vendored
308
vendor/github.com/klauspost/compress/zstd/internal/xxhash/xxhash_amd64.s
generated
vendored
@ -1,3 +1,4 @@
|
|||||||
|
//go:build !appengine && gc && !purego && !noasm
|
||||||
// +build !appengine
|
// +build !appengine
|
||||||
// +build gc
|
// +build gc
|
||||||
// +build !purego
|
// +build !purego
|
||||||
@ -5,212 +6,205 @@
|
|||||||
|
|
||||||
#include "textflag.h"
|
#include "textflag.h"
|
||||||
|
|
||||||
// Register allocation:
|
// Registers:
|
||||||
// AX h
|
#define h AX
|
||||||
// SI pointer to advance through b
|
#define d AX
|
||||||
// DX n
|
#define p SI // pointer to advance through b
|
||||||
// BX loop end
|
#define n DX
|
||||||
// R8 v1, k1
|
#define end BX // loop end
|
||||||
// R9 v2
|
#define v1 R8
|
||||||
// R10 v3
|
#define v2 R9
|
||||||
// R11 v4
|
#define v3 R10
|
||||||
// R12 tmp
|
#define v4 R11
|
||||||
// R13 prime1v
|
#define x R12
|
||||||
// R14 prime2v
|
#define prime1 R13
|
||||||
// DI prime4v
|
#define prime2 R14
|
||||||
|
#define prime4 DI
|
||||||
|
|
||||||
// round reads from and advances the buffer pointer in SI.
|
#define round(acc, x) \
|
||||||
// It assumes that R13 has prime1v and R14 has prime2v.
|
IMULQ prime2, x \
|
||||||
#define round(r) \
|
ADDQ x, acc \
|
||||||
MOVQ (SI), R12 \
|
ROLQ $31, acc \
|
||||||
ADDQ $8, SI \
|
IMULQ prime1, acc
|
||||||
IMULQ R14, R12 \
|
|
||||||
ADDQ R12, r \
|
|
||||||
ROLQ $31, r \
|
|
||||||
IMULQ R13, r
|
|
||||||
|
|
||||||
// mergeRound applies a merge round on the two registers acc and val.
|
// round0 performs the operation x = round(0, x).
|
||||||
// It assumes that R13 has prime1v, R14 has prime2v, and DI has prime4v.
|
#define round0(x) \
|
||||||
#define mergeRound(acc, val) \
|
IMULQ prime2, x \
|
||||||
IMULQ R14, val \
|
ROLQ $31, x \
|
||||||
ROLQ $31, val \
|
IMULQ prime1, x
|
||||||
IMULQ R13, val \
|
|
||||||
XORQ val, acc \
|
// mergeRound applies a merge round on the two registers acc and x.
|
||||||
IMULQ R13, acc \
|
// It assumes that prime1, prime2, and prime4 have been loaded.
|
||||||
ADDQ DI, acc
|
#define mergeRound(acc, x) \
|
||||||
|
round0(x) \
|
||||||
|
XORQ x, acc \
|
||||||
|
IMULQ prime1, acc \
|
||||||
|
ADDQ prime4, acc
|
||||||
|
|
||||||
|
// blockLoop processes as many 32-byte blocks as possible,
|
||||||
|
// updating v1, v2, v3, and v4. It assumes that there is at least one block
|
||||||
|
// to process.
|
||||||
|
#define blockLoop() \
|
||||||
|
loop: \
|
||||||
|
MOVQ +0(p), x \
|
||||||
|
round(v1, x) \
|
||||||
|
MOVQ +8(p), x \
|
||||||
|
round(v2, x) \
|
||||||
|
MOVQ +16(p), x \
|
||||||
|
round(v3, x) \
|
||||||
|
MOVQ +24(p), x \
|
||||||
|
round(v4, x) \
|
||||||
|
ADDQ $32, p \
|
||||||
|
CMPQ p, end \
|
||||||
|
JLE loop
|
||||||
|
|
||||||
// func Sum64(b []byte) uint64
|
// func Sum64(b []byte) uint64
|
||||||
TEXT ·Sum64(SB), NOSPLIT, $0-32
|
TEXT ·Sum64(SB), NOSPLIT|NOFRAME, $0-32
|
||||||
// Load fixed primes.
|
// Load fixed primes.
|
||||||
MOVQ ·prime1v(SB), R13
|
MOVQ ·primes+0(SB), prime1
|
||||||
MOVQ ·prime2v(SB), R14
|
MOVQ ·primes+8(SB), prime2
|
||||||
MOVQ ·prime4v(SB), DI
|
MOVQ ·primes+24(SB), prime4
|
||||||
|
|
||||||
// Load slice.
|
// Load slice.
|
||||||
MOVQ b_base+0(FP), SI
|
MOVQ b_base+0(FP), p
|
||||||
MOVQ b_len+8(FP), DX
|
MOVQ b_len+8(FP), n
|
||||||
LEAQ (SI)(DX*1), BX
|
LEAQ (p)(n*1), end
|
||||||
|
|
||||||
// The first loop limit will be len(b)-32.
|
// The first loop limit will be len(b)-32.
|
||||||
SUBQ $32, BX
|
SUBQ $32, end
|
||||||
|
|
||||||
// Check whether we have at least one block.
|
// Check whether we have at least one block.
|
||||||
CMPQ DX, $32
|
CMPQ n, $32
|
||||||
JLT noBlocks
|
JLT noBlocks
|
||||||
|
|
||||||
// Set up initial state (v1, v2, v3, v4).
|
// Set up initial state (v1, v2, v3, v4).
|
||||||
MOVQ R13, R8
|
MOVQ prime1, v1
|
||||||
ADDQ R14, R8
|
ADDQ prime2, v1
|
||||||
MOVQ R14, R9
|
MOVQ prime2, v2
|
||||||
XORQ R10, R10
|
XORQ v3, v3
|
||||||
XORQ R11, R11
|
XORQ v4, v4
|
||||||
SUBQ R13, R11
|
SUBQ prime1, v4
|
||||||
|
|
||||||
// Loop until SI > BX.
|
blockLoop()
|
||||||
blockLoop:
|
|
||||||
round(R8)
|
|
||||||
round(R9)
|
|
||||||
round(R10)
|
|
||||||
round(R11)
|
|
||||||
|
|
||||||
CMPQ SI, BX
|
MOVQ v1, h
|
||||||
JLE blockLoop
|
ROLQ $1, h
|
||||||
|
MOVQ v2, x
|
||||||
|
ROLQ $7, x
|
||||||
|
ADDQ x, h
|
||||||
|
MOVQ v3, x
|
||||||
|
ROLQ $12, x
|
||||||
|
ADDQ x, h
|
||||||
|
MOVQ v4, x
|
||||||
|
ROLQ $18, x
|
||||||
|
ADDQ x, h
|
||||||
|
|
||||||
MOVQ R8, AX
|
mergeRound(h, v1)
|
||||||
ROLQ $1, AX
|
mergeRound(h, v2)
|
||||||
MOVQ R9, R12
|
mergeRound(h, v3)
|
||||||
ROLQ $7, R12
|
mergeRound(h, v4)
|
||||||
ADDQ R12, AX
|
|
||||||
MOVQ R10, R12
|
|
||||||
ROLQ $12, R12
|
|
||||||
ADDQ R12, AX
|
|
||||||
MOVQ R11, R12
|
|
||||||
ROLQ $18, R12
|
|
||||||
ADDQ R12, AX
|
|
||||||
|
|
||||||
mergeRound(AX, R8)
|
|
||||||
mergeRound(AX, R9)
|
|
||||||
mergeRound(AX, R10)
|
|
||||||
mergeRound(AX, R11)
|
|
||||||
|
|
||||||
JMP afterBlocks
|
JMP afterBlocks
|
||||||
|
|
||||||
noBlocks:
|
noBlocks:
|
||||||
MOVQ ·prime5v(SB), AX
|
MOVQ ·primes+32(SB), h
|
||||||
|
|
||||||
afterBlocks:
|
afterBlocks:
|
||||||
ADDQ DX, AX
|
ADDQ n, h
|
||||||
|
|
||||||
// Right now BX has len(b)-32, and we want to loop until SI > len(b)-8.
|
ADDQ $24, end
|
||||||
ADDQ $24, BX
|
CMPQ p, end
|
||||||
|
JG try4
|
||||||
|
|
||||||
CMPQ SI, BX
|
loop8:
|
||||||
JG fourByte
|
MOVQ (p), x
|
||||||
|
ADDQ $8, p
|
||||||
|
round0(x)
|
||||||
|
XORQ x, h
|
||||||
|
ROLQ $27, h
|
||||||
|
IMULQ prime1, h
|
||||||
|
ADDQ prime4, h
|
||||||
|
|
||||||
wordLoop:
|
CMPQ p, end
|
||||||
// Calculate k1.
|
JLE loop8
|
||||||
MOVQ (SI), R8
|
|
||||||
ADDQ $8, SI
|
|
||||||
IMULQ R14, R8
|
|
||||||
ROLQ $31, R8
|
|
||||||
IMULQ R13, R8
|
|
||||||
|
|
||||||
XORQ R8, AX
|
try4:
|
||||||
ROLQ $27, AX
|
ADDQ $4, end
|
||||||
IMULQ R13, AX
|
CMPQ p, end
|
||||||
ADDQ DI, AX
|
JG try1
|
||||||
|
|
||||||
CMPQ SI, BX
|
MOVL (p), x
|
||||||
JLE wordLoop
|
ADDQ $4, p
|
||||||
|
IMULQ prime1, x
|
||||||
|
XORQ x, h
|
||||||
|
|
||||||
fourByte:
|
ROLQ $23, h
|
||||||
ADDQ $4, BX
|
IMULQ prime2, h
|
||||||
CMPQ SI, BX
|
ADDQ ·primes+16(SB), h
|
||||||
JG singles
|
|
||||||
|
|
||||||
MOVL (SI), R8
|
try1:
|
||||||
ADDQ $4, SI
|
ADDQ $4, end
|
||||||
IMULQ R13, R8
|
CMPQ p, end
|
||||||
XORQ R8, AX
|
|
||||||
|
|
||||||
ROLQ $23, AX
|
|
||||||
IMULQ R14, AX
|
|
||||||
ADDQ ·prime3v(SB), AX
|
|
||||||
|
|
||||||
singles:
|
|
||||||
ADDQ $4, BX
|
|
||||||
CMPQ SI, BX
|
|
||||||
JGE finalize
|
JGE finalize
|
||||||
|
|
||||||
singlesLoop:
|
loop1:
|
||||||
MOVBQZX (SI), R12
|
MOVBQZX (p), x
|
||||||
ADDQ $1, SI
|
ADDQ $1, p
|
||||||
IMULQ ·prime5v(SB), R12
|
IMULQ ·primes+32(SB), x
|
||||||
XORQ R12, AX
|
XORQ x, h
|
||||||
|
ROLQ $11, h
|
||||||
|
IMULQ prime1, h
|
||||||
|
|
||||||
ROLQ $11, AX
|
CMPQ p, end
|
||||||
IMULQ R13, AX
|
JL loop1
|
||||||
|
|
||||||
CMPQ SI, BX
|
|
||||||
JL singlesLoop
|
|
||||||
|
|
||||||
finalize:
|
finalize:
|
||||||
MOVQ AX, R12
|
MOVQ h, x
|
||||||
SHRQ $33, R12
|
SHRQ $33, x
|
||||||
XORQ R12, AX
|
XORQ x, h
|
||||||
IMULQ R14, AX
|
IMULQ prime2, h
|
||||||
MOVQ AX, R12
|
MOVQ h, x
|
||||||
SHRQ $29, R12
|
SHRQ $29, x
|
||||||
XORQ R12, AX
|
XORQ x, h
|
||||||
IMULQ ·prime3v(SB), AX
|
IMULQ ·primes+16(SB), h
|
||||||
MOVQ AX, R12
|
MOVQ h, x
|
||||||
SHRQ $32, R12
|
SHRQ $32, x
|
||||||
XORQ R12, AX
|
XORQ x, h
|
||||||
|
|
||||||
MOVQ AX, ret+24(FP)
|
MOVQ h, ret+24(FP)
|
||||||
RET
|
RET
|
||||||
|
|
||||||
// writeBlocks uses the same registers as above except that it uses AX to store
|
|
||||||
// the d pointer.
|
|
||||||
|
|
||||||
// func writeBlocks(d *Digest, b []byte) int
|
// func writeBlocks(d *Digest, b []byte) int
|
||||||
TEXT ·writeBlocks(SB), NOSPLIT, $0-40
|
TEXT ·writeBlocks(SB), NOSPLIT|NOFRAME, $0-40
|
||||||
// Load fixed primes needed for round.
|
// Load fixed primes needed for round.
|
||||||
MOVQ ·prime1v(SB), R13
|
MOVQ ·primes+0(SB), prime1
|
||||||
MOVQ ·prime2v(SB), R14
|
MOVQ ·primes+8(SB), prime2
|
||||||
|
|
||||||
// Load slice.
|
// Load slice.
|
||||||
MOVQ b_base+8(FP), SI
|
MOVQ b_base+8(FP), p
|
||||||
MOVQ b_len+16(FP), DX
|
MOVQ b_len+16(FP), n
|
||||||
LEAQ (SI)(DX*1), BX
|
LEAQ (p)(n*1), end
|
||||||
SUBQ $32, BX
|
SUBQ $32, end
|
||||||
|
|
||||||
// Load vN from d.
|
// Load vN from d.
|
||||||
MOVQ d+0(FP), AX
|
MOVQ s+0(FP), d
|
||||||
MOVQ 0(AX), R8 // v1
|
MOVQ 0(d), v1
|
||||||
MOVQ 8(AX), R9 // v2
|
MOVQ 8(d), v2
|
||||||
MOVQ 16(AX), R10 // v3
|
MOVQ 16(d), v3
|
||||||
MOVQ 24(AX), R11 // v4
|
MOVQ 24(d), v4
|
||||||
|
|
||||||
// We don't need to check the loop condition here; this function is
|
// We don't need to check the loop condition here; this function is
|
||||||
// always called with at least one block of data to process.
|
// always called with at least one block of data to process.
|
||||||
blockLoop:
|
blockLoop()
|
||||||
round(R8)
|
|
||||||
round(R9)
|
|
||||||
round(R10)
|
|
||||||
round(R11)
|
|
||||||
|
|
||||||
CMPQ SI, BX
|
|
||||||
JLE blockLoop
|
|
||||||
|
|
||||||
// Copy vN back to d.
|
// Copy vN back to d.
|
||||||
MOVQ R8, 0(AX)
|
MOVQ v1, 0(d)
|
||||||
MOVQ R9, 8(AX)
|
MOVQ v2, 8(d)
|
||||||
MOVQ R10, 16(AX)
|
MOVQ v3, 16(d)
|
||||||
MOVQ R11, 24(AX)
|
MOVQ v4, 24(d)
|
||||||
|
|
||||||
// The number of bytes written is SI minus the old base pointer.
|
// The number of bytes written is p minus the old base pointer.
|
||||||
SUBQ b_base+8(FP), SI
|
SUBQ b_base+8(FP), p
|
||||||
MOVQ SI, ret+32(FP)
|
MOVQ p, ret+32(FP)
|
||||||
|
|
||||||
RET
|
RET
|
||||||
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
x
Reference in New Issue
Block a user