mirror of
https://gitea.com/Lydanne/buildx.git
synced 2025-07-09 21:17:09 +08:00
vendor: update buildkit to master@ae9d0f5
Signed-off-by: Justin Chadwell <me@jedevc.com>
This commit is contained in:
68
vendor/github.com/containerd/containerd/content/helpers.go
generated
vendored
68
vendor/github.com/containerd/containerd/content/helpers.go
generated
vendored
@ -26,10 +26,16 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
"github.com/containerd/containerd/log"
|
||||
"github.com/opencontainers/go-digest"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
)
|
||||
|
||||
// maxResets is the no.of times the Copy() method can tolerate a reset of the body
|
||||
const maxResets = 5
|
||||
|
||||
var ErrReset = errors.New("writer has been reset")
|
||||
|
||||
var bufPool = sync.Pool{
|
||||
New: func() interface{} {
|
||||
buffer := make([]byte, 1<<20)
|
||||
@ -80,7 +86,7 @@ func WriteBlob(ctx context.Context, cs Ingester, ref string, r io.Reader, desc o
|
||||
return fmt.Errorf("failed to open writer: %w", err)
|
||||
}
|
||||
|
||||
return nil // all ready present
|
||||
return nil // already present
|
||||
}
|
||||
defer cw.Close()
|
||||
|
||||
@ -131,35 +137,63 @@ func OpenWriter(ctx context.Context, cs Ingester, opts ...WriterOpt) (Writer, er
|
||||
// the size or digest is unknown, these values may be empty.
|
||||
//
|
||||
// Copy is buffered, so no need to wrap reader in buffered io.
|
||||
func Copy(ctx context.Context, cw Writer, r io.Reader, size int64, expected digest.Digest, opts ...Opt) error {
|
||||
func Copy(ctx context.Context, cw Writer, or io.Reader, size int64, expected digest.Digest, opts ...Opt) error {
|
||||
ws, err := cw.Status()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get status: %w", err)
|
||||
}
|
||||
|
||||
r := or
|
||||
if ws.Offset > 0 {
|
||||
r, err = seekReader(r, ws.Offset, size)
|
||||
r, err = seekReader(or, ws.Offset, size)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to resume write to %v: %w", ws.Ref, err)
|
||||
}
|
||||
}
|
||||
|
||||
copied, err := copyWithBuffer(cw, r)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to copy: %w", err)
|
||||
}
|
||||
if size != 0 && copied < size-ws.Offset {
|
||||
// Short writes would return its own error, this indicates a read failure
|
||||
return fmt.Errorf("failed to read expected number of bytes: %w", io.ErrUnexpectedEOF)
|
||||
}
|
||||
|
||||
if err := cw.Commit(ctx, size, expected, opts...); err != nil {
|
||||
if !errdefs.IsAlreadyExists(err) {
|
||||
return fmt.Errorf("failed commit on ref %q: %w", ws.Ref, err)
|
||||
for i := 0; i < maxResets; i++ {
|
||||
if i >= 1 {
|
||||
log.G(ctx).WithField("digest", expected).Debugf("retrying copy due to reset")
|
||||
}
|
||||
copied, err := copyWithBuffer(cw, r)
|
||||
if errors.Is(err, ErrReset) {
|
||||
ws, err := cw.Status()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get status: %w", err)
|
||||
}
|
||||
r, err = seekReader(or, ws.Offset, size)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to resume write to %v: %w", ws.Ref, err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to copy: %w", err)
|
||||
}
|
||||
if size != 0 && copied < size-ws.Offset {
|
||||
// Short writes would return its own error, this indicates a read failure
|
||||
return fmt.Errorf("failed to read expected number of bytes: %w", io.ErrUnexpectedEOF)
|
||||
}
|
||||
if err := cw.Commit(ctx, size, expected, opts...); err != nil {
|
||||
if errors.Is(err, ErrReset) {
|
||||
ws, err := cw.Status()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get status: %w", err)
|
||||
}
|
||||
r, err = seekReader(or, ws.Offset, size)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to resume write to %v: %w", ws.Ref, err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
if !errdefs.IsAlreadyExists(err) {
|
||||
return fmt.Errorf("failed commit on ref %q: %w", ws.Ref, err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
return nil
|
||||
log.G(ctx).WithField("digest", expected).Errorf("failed to copy after %d retries", maxResets)
|
||||
return fmt.Errorf("failed to copy after %d retries", maxResets)
|
||||
}
|
||||
|
||||
// CopyReaderAt copies to a writer from a given reader at for the given
|
||||
|
1
vendor/github.com/containerd/containerd/content/local/store.go
generated
vendored
1
vendor/github.com/containerd/containerd/content/local/store.go
generated
vendored
@ -643,7 +643,6 @@ func (s *store) ingestRoot(ref string) string {
|
||||
// - root: entire ingest directory
|
||||
// - ref: name of the starting ref, must be unique
|
||||
// - data: file where data is written
|
||||
//
|
||||
func (s *store) ingestPaths(ref string) (string, string, string) {
|
||||
var (
|
||||
fp = s.ingestRoot(ref)
|
||||
|
1
vendor/github.com/containerd/containerd/filters/filter.go
generated
vendored
1
vendor/github.com/containerd/containerd/filters/filter.go
generated
vendored
@ -65,7 +65,6 @@
|
||||
// ```
|
||||
// name==foo,labels.bar
|
||||
// ```
|
||||
//
|
||||
package filters
|
||||
|
||||
import (
|
||||
|
1
vendor/github.com/containerd/containerd/filters/parser.go
generated
vendored
1
vendor/github.com/containerd/containerd/filters/parser.go
generated
vendored
@ -45,7 +45,6 @@ field := quoted | [A-Za-z] [A-Za-z0-9_]+
|
||||
operator := "==" | "!=" | "~="
|
||||
value := quoted | [^\s,]+
|
||||
quoted := <go string syntax>
|
||||
|
||||
*/
|
||||
func Parse(s string) (Filter, error) {
|
||||
// special case empty to match all
|
||||
|
8
vendor/github.com/containerd/containerd/filters/quote.go
generated
vendored
8
vendor/github.com/containerd/containerd/filters/quote.go
generated
vendored
@ -31,10 +31,10 @@ var errQuoteSyntax = errors.New("quote syntax error")
|
||||
// or character literal represented by the string s.
|
||||
// It returns four values:
|
||||
//
|
||||
// 1) value, the decoded Unicode code point or byte value;
|
||||
// 2) multibyte, a boolean indicating whether the decoded character requires a multibyte UTF-8 representation;
|
||||
// 3) tail, the remainder of the string after the character; and
|
||||
// 4) an error that will be nil if the character is syntactically valid.
|
||||
// 1. value, the decoded Unicode code point or byte value;
|
||||
// 2. multibyte, a boolean indicating whether the decoded character requires a multibyte UTF-8 representation;
|
||||
// 3. tail, the remainder of the string after the character; and
|
||||
// 4. an error that will be nil if the character is syntactically valid.
|
||||
//
|
||||
// The second argument, quote, specifies the type of literal being parsed
|
||||
// and therefore which escaped quote character is permitted.
|
||||
|
2
vendor/github.com/containerd/containerd/namespaces/store.go
generated
vendored
2
vendor/github.com/containerd/containerd/namespaces/store.go
generated
vendored
@ -24,8 +24,6 @@ import "context"
|
||||
// oriented. A namespace is really just a name and a set of labels. Objects
|
||||
// that belong to a namespace are returned when the namespace is assigned to a
|
||||
// given context.
|
||||
//
|
||||
//
|
||||
type Store interface {
|
||||
Create(ctx context.Context, namespace string, labels map[string]string) error
|
||||
Labels(ctx context.Context, namespace string) (map[string]string, error)
|
||||
|
38
vendor/github.com/containerd/containerd/platforms/platforms.go
generated
vendored
38
vendor/github.com/containerd/containerd/platforms/platforms.go
generated
vendored
@ -27,40 +27,40 @@
|
||||
// The vast majority of use cases should simply use the match function with
|
||||
// user input. The first step is to parse a specifier into a matcher:
|
||||
//
|
||||
// m, err := Parse("linux")
|
||||
// if err != nil { ... }
|
||||
// m, err := Parse("linux")
|
||||
// if err != nil { ... }
|
||||
//
|
||||
// Once you have a matcher, use it to match against the platform declared by a
|
||||
// component, typically from an image or runtime. Since extracting an images
|
||||
// platform is a little more involved, we'll use an example against the
|
||||
// platform default:
|
||||
//
|
||||
// if ok := m.Match(Default()); !ok { /* doesn't match */ }
|
||||
// if ok := m.Match(Default()); !ok { /* doesn't match */ }
|
||||
//
|
||||
// This can be composed in loops for resolving runtimes or used as a filter for
|
||||
// fetch and select images.
|
||||
//
|
||||
// More details of the specifier syntax and platform spec follow.
|
||||
//
|
||||
// Declaring Platform Support
|
||||
// # Declaring Platform Support
|
||||
//
|
||||
// Components that have strict platform requirements should use the OCI
|
||||
// platform specification to declare their support. Typically, this will be
|
||||
// images and runtimes that should make these declaring which platform they
|
||||
// support specifically. This looks roughly as follows:
|
||||
//
|
||||
// type Platform struct {
|
||||
// Architecture string
|
||||
// OS string
|
||||
// Variant string
|
||||
// }
|
||||
// type Platform struct {
|
||||
// Architecture string
|
||||
// OS string
|
||||
// Variant string
|
||||
// }
|
||||
//
|
||||
// Most images and runtimes should at least set Architecture and OS, according
|
||||
// to their GOARCH and GOOS values, respectively (follow the OCI image
|
||||
// specification when in doubt). ARM should set variant under certain
|
||||
// discussions, which are outlined below.
|
||||
//
|
||||
// Platform Specifiers
|
||||
// # Platform Specifiers
|
||||
//
|
||||
// While the OCI platform specifications provide a tool for components to
|
||||
// specify structured information, user input typically doesn't need the full
|
||||
@ -77,7 +77,7 @@
|
||||
// where the architecture may be known but a runtime may support images from
|
||||
// different operating systems.
|
||||
//
|
||||
// Normalization
|
||||
// # Normalization
|
||||
//
|
||||
// Because not all users are familiar with the way the Go runtime represents
|
||||
// platforms, several normalizations have been provided to make this package
|
||||
@ -85,17 +85,17 @@
|
||||
//
|
||||
// The following are performed for architectures:
|
||||
//
|
||||
// Value Normalized
|
||||
// aarch64 arm64
|
||||
// armhf arm
|
||||
// armel arm/v6
|
||||
// i386 386
|
||||
// x86_64 amd64
|
||||
// x86-64 amd64
|
||||
// Value Normalized
|
||||
// aarch64 arm64
|
||||
// armhf arm
|
||||
// armel arm/v6
|
||||
// i386 386
|
||||
// x86_64 amd64
|
||||
// x86-64 amd64
|
||||
//
|
||||
// We also normalize the operating system `macos` to `darwin`.
|
||||
//
|
||||
// ARM Support
|
||||
// # ARM Support
|
||||
//
|
||||
// To qualify ARM architecture, the Variant field is used to qualify the arm
|
||||
// version. The most common arm version, v7, is represented without the variant
|
||||
|
3
vendor/github.com/containerd/containerd/remotes/docker/auth/parse.go
generated
vendored
3
vendor/github.com/containerd/containerd/remotes/docker/auth/parse.go
generated
vendored
@ -134,9 +134,6 @@ func parseValueAndParams(header string) (value string, params map[string]string)
|
||||
}
|
||||
var pvalue string
|
||||
pvalue, s = expectTokenOrQuoted(s[1:])
|
||||
if pvalue == "" {
|
||||
return
|
||||
}
|
||||
pkey = strings.ToLower(pkey)
|
||||
params[pkey] = pvalue
|
||||
s = skipSpace(s)
|
||||
|
157
vendor/github.com/containerd/containerd/remotes/docker/pusher.go
generated
vendored
157
vendor/github.com/containerd/containerd/remotes/docker/pusher.go
generated
vendored
@ -24,6 +24,7 @@ import (
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/containerd/containerd/content"
|
||||
@ -261,27 +262,20 @@ func (p dockerPusher) push(ctx context.Context, desc ocispec.Descriptor, ref str
|
||||
|
||||
// TODO: Support chunked upload
|
||||
|
||||
pr, pw := io.Pipe()
|
||||
respC := make(chan response, 1)
|
||||
body := io.NopCloser(pr)
|
||||
pushw := newPushWriter(p.dockerBase, ref, desc.Digest, p.tracker, isManifest)
|
||||
|
||||
req.body = func() (io.ReadCloser, error) {
|
||||
if body == nil {
|
||||
return nil, errors.New("cannot reuse body, request must be retried")
|
||||
}
|
||||
// Only use the body once since pipe cannot be seeked
|
||||
ob := body
|
||||
body = nil
|
||||
return ob, nil
|
||||
pr, pw := io.Pipe()
|
||||
pushw.setPipe(pw)
|
||||
return io.NopCloser(pr), nil
|
||||
}
|
||||
req.size = desc.Size
|
||||
|
||||
go func() {
|
||||
defer close(respC)
|
||||
resp, err := req.doWithRetries(ctx, nil)
|
||||
if err != nil {
|
||||
respC <- response{err: err}
|
||||
pr.CloseWithError(err)
|
||||
pushw.setError(err)
|
||||
pushw.Close()
|
||||
return
|
||||
}
|
||||
|
||||
@ -290,20 +284,13 @@ func (p dockerPusher) push(ctx context.Context, desc ocispec.Descriptor, ref str
|
||||
default:
|
||||
err := remoteserrors.NewUnexpectedStatusErr(resp)
|
||||
log.G(ctx).WithField("resp", resp).WithField("body", string(err.(remoteserrors.ErrUnexpectedStatus).Body)).Debug("unexpected response")
|
||||
pr.CloseWithError(err)
|
||||
pushw.setError(err)
|
||||
pushw.Close()
|
||||
}
|
||||
respC <- response{Response: resp}
|
||||
pushw.setResponse(resp)
|
||||
}()
|
||||
|
||||
return &pushWriter{
|
||||
base: p.dockerBase,
|
||||
ref: ref,
|
||||
pipe: pw,
|
||||
responseC: respC,
|
||||
isManifest: isManifest,
|
||||
expected: desc.Digest,
|
||||
tracker: p.tracker,
|
||||
}, nil
|
||||
return pushw, nil
|
||||
}
|
||||
|
||||
func getManifestPath(object string, dgst digest.Digest) []string {
|
||||
@ -325,28 +312,81 @@ func getManifestPath(object string, dgst digest.Digest) []string {
|
||||
return []string{"manifests", object}
|
||||
}
|
||||
|
||||
type response struct {
|
||||
*http.Response
|
||||
err error
|
||||
}
|
||||
|
||||
type pushWriter struct {
|
||||
base *dockerBase
|
||||
ref string
|
||||
|
||||
pipe *io.PipeWriter
|
||||
responseC <-chan response
|
||||
pipe *io.PipeWriter
|
||||
|
||||
pipeC chan *io.PipeWriter
|
||||
respC chan *http.Response
|
||||
closeOnce sync.Once
|
||||
errC chan error
|
||||
|
||||
isManifest bool
|
||||
|
||||
expected digest.Digest
|
||||
tracker StatusTracker
|
||||
}
|
||||
|
||||
func newPushWriter(db *dockerBase, ref string, expected digest.Digest, tracker StatusTracker, isManifest bool) *pushWriter {
|
||||
// Initialize and create response
|
||||
return &pushWriter{
|
||||
base: db,
|
||||
ref: ref,
|
||||
expected: expected,
|
||||
tracker: tracker,
|
||||
pipeC: make(chan *io.PipeWriter, 1),
|
||||
respC: make(chan *http.Response, 1),
|
||||
errC: make(chan error, 1),
|
||||
isManifest: isManifest,
|
||||
}
|
||||
}
|
||||
|
||||
func (pw *pushWriter) setPipe(p *io.PipeWriter) {
|
||||
pw.pipeC <- p
|
||||
}
|
||||
|
||||
func (pw *pushWriter) setError(err error) {
|
||||
pw.errC <- err
|
||||
}
|
||||
func (pw *pushWriter) setResponse(resp *http.Response) {
|
||||
pw.respC <- resp
|
||||
}
|
||||
|
||||
func (pw *pushWriter) Write(p []byte) (n int, err error) {
|
||||
status, err := pw.tracker.GetStatus(pw.ref)
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
|
||||
if pw.pipe == nil {
|
||||
p, ok := <-pw.pipeC
|
||||
if !ok {
|
||||
return 0, io.ErrClosedPipe
|
||||
}
|
||||
pw.pipe = p
|
||||
} else {
|
||||
select {
|
||||
case p, ok := <-pw.pipeC:
|
||||
if !ok {
|
||||
return 0, io.ErrClosedPipe
|
||||
}
|
||||
pw.pipe.CloseWithError(content.ErrReset)
|
||||
pw.pipe = p
|
||||
|
||||
// If content has already been written, the bytes
|
||||
// cannot be written and the caller must reset
|
||||
if status.Offset > 0 {
|
||||
status.Offset = 0
|
||||
status.UpdatedAt = time.Now()
|
||||
pw.tracker.SetStatus(pw.ref, status)
|
||||
return 0, content.ErrReset
|
||||
}
|
||||
default:
|
||||
}
|
||||
}
|
||||
|
||||
n, err = pw.pipe.Write(p)
|
||||
status.Offset += int64(n)
|
||||
status.UpdatedAt = time.Now()
|
||||
@ -355,13 +395,21 @@ func (pw *pushWriter) Write(p []byte) (n int, err error) {
|
||||
}
|
||||
|
||||
func (pw *pushWriter) Close() error {
|
||||
status, err := pw.tracker.GetStatus(pw.ref)
|
||||
if err == nil && !status.Committed {
|
||||
// Closing an incomplete writer. Record this as an error so that following write can retry it.
|
||||
status.ErrClosed = errors.New("closed incomplete writer")
|
||||
pw.tracker.SetStatus(pw.ref, status)
|
||||
// Ensure pipeC is closed but handle `Close()` being
|
||||
// called multiple times without panicking
|
||||
pw.closeOnce.Do(func() {
|
||||
close(pw.pipeC)
|
||||
})
|
||||
if pw.pipe != nil {
|
||||
status, err := pw.tracker.GetStatus(pw.ref)
|
||||
if err == nil && !status.Committed {
|
||||
// Closing an incomplete writer. Record this as an error so that following write can retry it.
|
||||
status.ErrClosed = errors.New("closed incomplete writer")
|
||||
pw.tracker.SetStatus(pw.ref, status)
|
||||
}
|
||||
return pw.pipe.Close()
|
||||
}
|
||||
return pw.pipe.Close()
|
||||
return nil
|
||||
}
|
||||
|
||||
func (pw *pushWriter) Status() (content.Status, error) {
|
||||
@ -388,18 +436,43 @@ func (pw *pushWriter) Commit(ctx context.Context, size int64, expected digest.Di
|
||||
return err
|
||||
}
|
||||
// TODO: timeout waiting for response
|
||||
resp := <-pw.responseC
|
||||
if resp.err != nil {
|
||||
return resp.err
|
||||
var resp *http.Response
|
||||
select {
|
||||
case err := <-pw.errC:
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
case resp = <-pw.respC:
|
||||
defer resp.Body.Close()
|
||||
case p, ok := <-pw.pipeC:
|
||||
// check whether the pipe has changed in the commit, because sometimes Write
|
||||
// can complete successfully, but the pipe may have changed. In that case, the
|
||||
// content needs to be reset.
|
||||
if !ok {
|
||||
return io.ErrClosedPipe
|
||||
}
|
||||
pw.pipe.CloseWithError(content.ErrReset)
|
||||
pw.pipe = p
|
||||
status, err := pw.tracker.GetStatus(pw.ref)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// If content has already been written, the bytes
|
||||
// cannot be written again and the caller must reset
|
||||
if status.Offset > 0 {
|
||||
status.Offset = 0
|
||||
status.UpdatedAt = time.Now()
|
||||
pw.tracker.SetStatus(pw.ref, status)
|
||||
return content.ErrReset
|
||||
}
|
||||
}
|
||||
defer resp.Response.Body.Close()
|
||||
|
||||
// 201 is specified return status, some registries return
|
||||
// 200, 202 or 204.
|
||||
switch resp.StatusCode {
|
||||
case http.StatusOK, http.StatusCreated, http.StatusNoContent, http.StatusAccepted:
|
||||
default:
|
||||
return remoteserrors.NewUnexpectedStatusErr(resp.Response)
|
||||
return remoteserrors.NewUnexpectedStatusErr(resp)
|
||||
}
|
||||
|
||||
status, err := pw.tracker.GetStatus(pw.ref)
|
||||
|
15
vendor/github.com/containerd/containerd/remotes/docker/resolver.go
generated
vendored
15
vendor/github.com/containerd/containerd/remotes/docker/resolver.go
generated
vendored
@ -21,6 +21,7 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"path"
|
||||
@ -667,3 +668,17 @@ func responseFields(resp *http.Response) logrus.Fields {
|
||||
|
||||
return logrus.Fields(fields)
|
||||
}
|
||||
|
||||
// IsLocalhost checks if the registry host is local.
|
||||
func IsLocalhost(host string) bool {
|
||||
if h, _, err := net.SplitHostPort(host); err == nil {
|
||||
host = h
|
||||
}
|
||||
|
||||
if host == "localhost" {
|
||||
return true
|
||||
}
|
||||
|
||||
ip := net.ParseIP(host)
|
||||
return ip.IsLoopback()
|
||||
}
|
||||
|
4
vendor/github.com/containerd/containerd/remotes/handlers.go
generated
vendored
4
vendor/github.com/containerd/containerd/remotes/handlers.go
generated
vendored
@ -257,8 +257,8 @@ func PushContent(ctx context.Context, pusher Pusher, desc ocispec.Descriptor, st
|
||||
// An example of this kind of content would be a Windows base layer, which is not supposed to be redistributed.
|
||||
//
|
||||
// This is based on the media type of the content:
|
||||
// - application/vnd.oci.image.layer.nondistributable
|
||||
// - application/vnd.docker.image.rootfs.foreign
|
||||
// - application/vnd.oci.image.layer.nondistributable
|
||||
// - application/vnd.docker.image.rootfs.foreign
|
||||
func SkipNonDistributableBlobs(f images.HandlerFunc) images.HandlerFunc {
|
||||
return func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
|
||||
if images.IsNonDistributable(desc.MediaType) {
|
||||
|
2
vendor/github.com/containerd/containerd/version/version.go
generated
vendored
2
vendor/github.com/containerd/containerd/version/version.go
generated
vendored
@ -23,7 +23,7 @@ var (
|
||||
Package = "github.com/containerd/containerd"
|
||||
|
||||
// Version holds the complete version number. Filled in at linking time.
|
||||
Version = "1.6.6+unknown"
|
||||
Version = "1.6.10+unknown"
|
||||
|
||||
// Revision is filled with the VCS (e.g. git) revision being used to build
|
||||
// the program at linking time.
|
||||
|
86
vendor/github.com/docker/docker/api/swagger.yaml
generated
vendored
86
vendor/github.com/docker/docker/api/swagger.yaml
generated
vendored
@ -24,7 +24,7 @@ info:
|
||||
title: "Docker Engine API"
|
||||
version: "1.42"
|
||||
x-logo:
|
||||
url: "https://docs.docker.com/images/logo-docker-main.png"
|
||||
url: "https://docs.docker.com/assets/images/logo-docker-main.png"
|
||||
description: |
|
||||
The Engine API is an HTTP API served by Docker Engine. It is the API the
|
||||
Docker client uses to communicate with the Engine, so everything the Docker
|
||||
@ -214,12 +214,14 @@ definitions:
|
||||
- `volume` a docker volume with the given `Name`.
|
||||
- `tmpfs` a `tmpfs`.
|
||||
- `npipe` a named pipe from the host into the container.
|
||||
- `cluster` a Swarm cluster volume
|
||||
type: "string"
|
||||
enum:
|
||||
- "bind"
|
||||
- "volume"
|
||||
- "tmpfs"
|
||||
- "npipe"
|
||||
- "cluster"
|
||||
example: "volume"
|
||||
Name:
|
||||
description: |
|
||||
@ -350,12 +352,14 @@ definitions:
|
||||
- `volume` Creates a volume with the given name and options (or uses a pre-existing volume with the same name and options). These are **not** removed when the container is removed.
|
||||
- `tmpfs` Create a tmpfs with the given options. The mount source cannot be specified for tmpfs.
|
||||
- `npipe` Mounts a named pipe from the host into the container. Must exist prior to creating the container.
|
||||
- `cluster` a Swarm cluster volume
|
||||
type: "string"
|
||||
enum:
|
||||
- "bind"
|
||||
- "volume"
|
||||
- "tmpfs"
|
||||
- "npipe"
|
||||
- "cluster"
|
||||
ReadOnly:
|
||||
description: "Whether the mount should be read-only."
|
||||
type: "boolean"
|
||||
@ -2247,23 +2251,63 @@ definitions:
|
||||
|
||||
BuildCache:
|
||||
type: "object"
|
||||
description: |
|
||||
BuildCache contains information about a build cache record.
|
||||
properties:
|
||||
ID:
|
||||
type: "string"
|
||||
description: |
|
||||
Unique ID of the build cache record.
|
||||
example: "ndlpt0hhvkqcdfkputsk4cq9c"
|
||||
Parent:
|
||||
description: |
|
||||
ID of the parent build cache record.
|
||||
|
||||
> **Deprecated**: This field is deprecated, and omitted if empty.
|
||||
type: "string"
|
||||
x-nullable: true
|
||||
example: ""
|
||||
Parents:
|
||||
description: |
|
||||
List of parent build cache record IDs.
|
||||
type: "array"
|
||||
items:
|
||||
type: "string"
|
||||
x-nullable: true
|
||||
example: ["hw53o5aio51xtltp5xjp8v7fx"]
|
||||
Type:
|
||||
type: "string"
|
||||
description: |
|
||||
Cache record type.
|
||||
example: "regular"
|
||||
# see https://github.com/moby/buildkit/blob/fce4a32258dc9d9664f71a4831d5de10f0670677/client/diskusage.go#L75-L84
|
||||
enum:
|
||||
- "internal"
|
||||
- "frontend"
|
||||
- "source.local"
|
||||
- "source.git.checkout"
|
||||
- "exec.cachemount"
|
||||
- "regular"
|
||||
Description:
|
||||
type: "string"
|
||||
description: |
|
||||
Description of the build-step that produced the build cache.
|
||||
example: "mount / from exec /bin/sh -c echo 'Binary::apt::APT::Keep-Downloaded-Packages \"true\";' > /etc/apt/apt.conf.d/keep-cache"
|
||||
InUse:
|
||||
type: "boolean"
|
||||
description: |
|
||||
Indicates if the build cache is in use.
|
||||
example: false
|
||||
Shared:
|
||||
type: "boolean"
|
||||
description: |
|
||||
Indicates if the build cache is shared.
|
||||
example: true
|
||||
Size:
|
||||
description: |
|
||||
Amount of disk space used by the build cache (in bytes).
|
||||
type: "integer"
|
||||
example: 51
|
||||
CreatedAt:
|
||||
description: |
|
||||
Date and time at which the build cache was created in
|
||||
@ -2281,6 +2325,7 @@ definitions:
|
||||
example: "2017-08-09T07:09:37.632105588Z"
|
||||
UsageCount:
|
||||
type: "integer"
|
||||
example: 26
|
||||
|
||||
ImageID:
|
||||
type: "object"
|
||||
@ -6210,6 +6255,28 @@ paths:
|
||||
`/?[a-zA-Z0-9][a-zA-Z0-9_.-]+`.
|
||||
type: "string"
|
||||
pattern: "^/?[a-zA-Z0-9][a-zA-Z0-9_.-]+$"
|
||||
- name: "platform"
|
||||
in: "query"
|
||||
description: |
|
||||
Platform in the format `os[/arch[/variant]]` used for image lookup.
|
||||
|
||||
When specified, the daemon checks if the requested image is present
|
||||
in the local image cache with the given OS and Architecture, and
|
||||
otherwise returns a `404` status.
|
||||
|
||||
If the option is not set, the host's native OS and Architecture are
|
||||
used to look up the image in the image cache. However, if no platform
|
||||
is passed and the given image does exist in the local image cache,
|
||||
but its OS or architecture does not match, the container is created
|
||||
with the available image, and a warning is added to the `Warnings`
|
||||
field in the response, for example;
|
||||
|
||||
WARNING: The requested image's platform (linux/arm64/v8) does not
|
||||
match the detected host platform (linux/amd64) and no
|
||||
specific platform was requested
|
||||
|
||||
type: "string"
|
||||
default: ""
|
||||
- name: "body"
|
||||
in: "body"
|
||||
description: "Container to create"
|
||||
@ -8719,7 +8786,17 @@ paths:
|
||||
description: "Max API Version the server supports"
|
||||
Builder-Version:
|
||||
type: "string"
|
||||
description: "Default version of docker image builder"
|
||||
description: |
|
||||
Default version of docker image builder
|
||||
|
||||
The default on Linux is version "2" (BuildKit), but the daemon
|
||||
can be configured to recommend version "1" (classic Builder).
|
||||
Windows does not yet support BuildKit for native Windows images,
|
||||
and uses "1" (classic builder) as a default.
|
||||
|
||||
This value is a recommendation as advertised by the daemon, and
|
||||
it is up to the client to choose which builder to use.
|
||||
default: "2"
|
||||
Docker-Experimental:
|
||||
type: "boolean"
|
||||
description: "If the server is running with experimental mode enabled"
|
||||
@ -9013,7 +9090,7 @@ paths:
|
||||
BuildCache:
|
||||
-
|
||||
ID: "hw53o5aio51xtltp5xjp8v7fx"
|
||||
Parent: ""
|
||||
Parents: []
|
||||
Type: "regular"
|
||||
Description: "pulled from docker.io/library/debian@sha256:234cb88d3020898631af0ccbbcca9a66ae7306ecd30c9720690858c1b007d2a0"
|
||||
InUse: false
|
||||
@ -9024,7 +9101,7 @@ paths:
|
||||
UsageCount: 26
|
||||
-
|
||||
ID: "ndlpt0hhvkqcdfkputsk4cq9c"
|
||||
Parent: "hw53o5aio51xtltp5xjp8v7fx"
|
||||
Parents: ["ndlpt0hhvkqcdfkputsk4cq9c"]
|
||||
Type: "regular"
|
||||
Description: "mount / from exec /bin/sh -c echo 'Binary::apt::APT::Keep-Downloaded-Packages \"true\";' > /etc/apt/apt.conf.d/keep-cache"
|
||||
InUse: false
|
||||
@ -9622,6 +9699,7 @@ paths:
|
||||
|
||||
Available filters:
|
||||
- `label` (`label=<key>`, `label=<key>=<value>`, `label!=<key>`, or `label!=<key>=<value>`) Prune volumes with (or without, in case `label!=...` is used) the specified labels.
|
||||
- `all` (`all=true`) - Consider all (local) volumes for pruning and not just anonymous volumes.
|
||||
type: "string"
|
||||
responses:
|
||||
200:
|
||||
|
2
vendor/github.com/docker/docker/api/types/mount/mount.go
generated
vendored
2
vendor/github.com/docker/docker/api/types/mount/mount.go
generated
vendored
@ -18,7 +18,7 @@ const (
|
||||
// TypeNamedPipe is the type for mounting Windows named pipes
|
||||
TypeNamedPipe Type = "npipe"
|
||||
// TypeCluster is the type for Swarm Cluster Volumes.
|
||||
TypeCluster = "csi"
|
||||
TypeCluster Type = "cluster"
|
||||
)
|
||||
|
||||
// Mount represents a mount (volume).
|
||||
|
33
vendor/github.com/docker/docker/api/types/types.go
generated
vendored
33
vendor/github.com/docker/docker/api/types/types.go
generated
vendored
@ -774,18 +774,31 @@ type BuildResult struct {
|
||||
ID string
|
||||
}
|
||||
|
||||
// BuildCache contains information about a build cache record
|
||||
// BuildCache contains information about a build cache record.
|
||||
type BuildCache struct {
|
||||
ID string
|
||||
Parent string
|
||||
Type string
|
||||
// ID is the unique ID of the build cache record.
|
||||
ID string
|
||||
// Parent is the ID of the parent build cache record.
|
||||
//
|
||||
// Deprecated: deprecated in API v1.42 and up, as it was deprecated in BuildKit; use Parents instead.
|
||||
Parent string `json:"Parent,omitempty"`
|
||||
// Parents is the list of parent build cache record IDs.
|
||||
Parents []string `json:" Parents,omitempty"`
|
||||
// Type is the cache record type.
|
||||
Type string
|
||||
// Description is a description of the build-step that produced the build cache.
|
||||
Description string
|
||||
InUse bool
|
||||
Shared bool
|
||||
Size int64
|
||||
CreatedAt time.Time
|
||||
LastUsedAt *time.Time
|
||||
UsageCount int
|
||||
// InUse indicates if the build cache is in use.
|
||||
InUse bool
|
||||
// Shared indicates if the build cache is shared.
|
||||
Shared bool
|
||||
// Size is the amount of disk space used by the build cache (in bytes).
|
||||
Size int64
|
||||
// CreatedAt is the date and time at which the build cache was created.
|
||||
CreatedAt time.Time
|
||||
// LastUsedAt is the date and time at which the build cache was last used.
|
||||
LastUsedAt *time.Time
|
||||
UsageCount int
|
||||
}
|
||||
|
||||
// BuildCachePruneOptions hold parameters to prune the build cache
|
||||
|
4
vendor/github.com/docker/docker/api/types/volume/cluster_volume.go
generated
vendored
4
vendor/github.com/docker/docker/api/types/volume/cluster_volume.go
generated
vendored
@ -104,7 +104,7 @@ type AccessMode struct {
|
||||
BlockVolume *TypeBlock `json:",omitempty"`
|
||||
}
|
||||
|
||||
// Scope defines the Scope of a CSI Volume. This is how many nodes a
|
||||
// Scope defines the Scope of a Cluster Volume. This is how many nodes a
|
||||
// Volume can be accessed simultaneously on.
|
||||
type Scope string
|
||||
|
||||
@ -118,7 +118,7 @@ const (
|
||||
ScopeMultiNode Scope = "multi"
|
||||
)
|
||||
|
||||
// SharingMode defines the Sharing of a CSI Volume. This is how Tasks using a
|
||||
// SharingMode defines the Sharing of a Cluster Volume. This is how Tasks using a
|
||||
// Volume at the same time can use it.
|
||||
type SharingMode string
|
||||
|
||||
|
28
vendor/github.com/docker/docker/client/container_create.go
generated
vendored
28
vendor/github.com/docker/docker/client/container_create.go
generated
vendored
@ -26,23 +26,25 @@ func (cli *Client) ContainerCreate(ctx context.Context, config *container.Config
|
||||
if err := cli.NewVersionError("1.25", "stop timeout"); config != nil && config.StopTimeout != nil && err != nil {
|
||||
return response, err
|
||||
}
|
||||
|
||||
clientVersion := cli.ClientVersion()
|
||||
|
||||
// When using API 1.24 and under, the client is responsible for removing the container
|
||||
if hostConfig != nil && versions.LessThan(clientVersion, "1.25") {
|
||||
hostConfig.AutoRemove = false
|
||||
}
|
||||
|
||||
// When using API under 1.42, the Linux daemon doesn't respect the ConsoleSize
|
||||
if hostConfig != nil && platform != nil && platform.OS == "linux" && versions.LessThan(clientVersion, "1.42") {
|
||||
hostConfig.ConsoleSize = [2]uint{0, 0}
|
||||
}
|
||||
|
||||
if err := cli.NewVersionError("1.41", "specify container image platform"); platform != nil && err != nil {
|
||||
return response, err
|
||||
}
|
||||
|
||||
if hostConfig != nil {
|
||||
if versions.LessThan(cli.ClientVersion(), "1.25") {
|
||||
// When using API 1.24 and under, the client is responsible for removing the container
|
||||
hostConfig.AutoRemove = false
|
||||
}
|
||||
if versions.GreaterThanOrEqualTo(cli.ClientVersion(), "1.42") || versions.LessThan(cli.ClientVersion(), "1.40") {
|
||||
// KernelMemory was added in API 1.40, and deprecated in API 1.42
|
||||
hostConfig.KernelMemory = 0
|
||||
}
|
||||
if platform != nil && platform.OS == "linux" && versions.LessThan(cli.ClientVersion(), "1.42") {
|
||||
// When using API under 1.42, the Linux daemon doesn't respect the ConsoleSize
|
||||
hostConfig.ConsoleSize = [2]uint{0, 0}
|
||||
}
|
||||
}
|
||||
|
||||
query := url.Values{}
|
||||
if p := formatPlatform(platform); p != "" {
|
||||
query.Set("platform", p)
|
||||
|
1
vendor/github.com/docker/docker/client/events.go
generated
vendored
1
vendor/github.com/docker/docker/client/events.go
generated
vendored
@ -17,7 +17,6 @@ import (
|
||||
// be sent over the error channel. If an error is sent all processing will be stopped. It's up
|
||||
// to the caller to reopen the stream in the event of an error by reinvoking this method.
|
||||
func (cli *Client) Events(ctx context.Context, options types.EventsOptions) (<-chan events.Message, <-chan error) {
|
||||
|
||||
messages := make(chan events.Message)
|
||||
errs := make(chan error, 1)
|
||||
|
||||
|
7
vendor/github.com/docker/docker/client/options.go
generated
vendored
7
vendor/github.com/docker/docker/client/options.go
generated
vendored
@ -44,13 +44,6 @@ func FromEnv(c *Client) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// WithDialer applies the dialer.DialContext to the client transport. This can be
|
||||
// used to set the Timeout and KeepAlive settings of the client.
|
||||
// Deprecated: use WithDialContext
|
||||
func WithDialer(dialer *net.Dialer) Opt {
|
||||
return WithDialContext(dialer.DialContext)
|
||||
}
|
||||
|
||||
// WithDialContext applies the dialer to the client transport. This can be
|
||||
// used to set the Timeout and KeepAlive settings of the client.
|
||||
func WithDialContext(dialContext func(ctx context.Context, network, addr string) (net.Conn, error)) Opt {
|
||||
|
87
vendor/github.com/docker/docker/pkg/archive/archive.go
generated
vendored
87
vendor/github.com/docker/docker/pkg/archive/archive.go
generated
vendored
@ -19,17 +19,30 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/containerd/containerd/pkg/userns"
|
||||
"github.com/docker/docker/pkg/fileutils"
|
||||
"github.com/docker/docker/pkg/idtools"
|
||||
"github.com/docker/docker/pkg/ioutils"
|
||||
"github.com/docker/docker/pkg/pools"
|
||||
"github.com/docker/docker/pkg/system"
|
||||
"github.com/klauspost/compress/zstd"
|
||||
"github.com/moby/patternmatcher"
|
||||
"github.com/moby/sys/sequential"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
exec "golang.org/x/sys/execabs"
|
||||
)
|
||||
|
||||
// ImpliedDirectoryMode represents the mode (Unix permissions) applied to directories that are implied by files in a
|
||||
// tar, but that do not have their own header entry.
|
||||
//
|
||||
// The permissions mask is stored in a constant instead of locally to ensure that magic numbers do not
|
||||
// proliferate in the codebase. The default value 0755 has been selected based on the default umask of 0022, and
|
||||
// a convention of mkdir(1) calling mkdir(2) with permissions of 0777, resulting in a final value of 0755.
|
||||
//
|
||||
// This value is currently implementation-defined, and not captured in any cross-runtime specification. Thus, it is
|
||||
// subject to change in Moby at any time -- image authors who require consistent or known directory permissions
|
||||
// should explicitly control them by ensuring that header entries exist for any applicable path.
|
||||
const ImpliedDirectoryMode = 0755
|
||||
|
||||
type (
|
||||
// Compression is the state represents if compressed or not.
|
||||
Compression int
|
||||
@ -382,7 +395,6 @@ func ReplaceFileTarWrapper(inputTarStream io.ReadCloser, mods map[string]TarModi
|
||||
}
|
||||
|
||||
pipeWriter.Close()
|
||||
|
||||
}()
|
||||
return pipeReader
|
||||
}
|
||||
@ -660,10 +672,9 @@ func (ta *tarAppender) addTarFile(path, name string) error {
|
||||
}
|
||||
|
||||
if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 {
|
||||
// We use system.OpenSequential to ensure we use sequential file
|
||||
// access on Windows to avoid depleting the standby list.
|
||||
// On Linux, this equates to a regular os.Open.
|
||||
file, err := system.OpenSequential(path)
|
||||
// We use sequential file access to avoid depleting the standby list on
|
||||
// Windows. On Linux, this equates to a regular os.Open.
|
||||
file, err := sequential.Open(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -701,10 +712,9 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L
|
||||
}
|
||||
|
||||
case tar.TypeReg, tar.TypeRegA:
|
||||
// Source is regular file. We use system.OpenFileSequential to use sequential
|
||||
// file access to avoid depleting the standby list on Windows.
|
||||
// On Linux, this equates to a regular os.OpenFile
|
||||
file, err := system.OpenFileSequential(path, os.O_CREATE|os.O_WRONLY, hdrInfo.Mode())
|
||||
// Source is regular file. We use sequential file access to avoid depleting
|
||||
// the standby list on Windows. On Linux, this equates to a regular os.OpenFile.
|
||||
file, err := sequential.OpenFile(path, os.O_CREATE|os.O_WRONLY, hdrInfo.Mode())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -791,7 +801,6 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if len(errors) > 0 {
|
||||
@ -841,12 +850,11 @@ func Tar(path string, compression Compression) (io.ReadCloser, error) {
|
||||
// TarWithOptions creates an archive from the directory at `path`, only including files whose relative
|
||||
// paths are included in `options.IncludeFiles` (if non-nil) or not in `options.ExcludePatterns`.
|
||||
func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) {
|
||||
|
||||
// Fix the source path to work with long path names. This is a no-op
|
||||
// on platforms other than Windows.
|
||||
srcPath = fixVolumePathPrefix(srcPath)
|
||||
|
||||
pm, err := fileutils.NewPatternMatcher(options.ExcludePatterns)
|
||||
pm, err := patternmatcher.New(options.ExcludePatterns)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -921,7 +929,7 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error)
|
||||
rebaseName := options.RebaseNames[include]
|
||||
|
||||
var (
|
||||
parentMatchInfo []fileutils.MatchInfo
|
||||
parentMatchInfo []patternmatcher.MatchInfo
|
||||
parentDirs []string
|
||||
)
|
||||
|
||||
@ -960,11 +968,11 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error)
|
||||
parentMatchInfo = parentMatchInfo[:len(parentMatchInfo)-1]
|
||||
}
|
||||
|
||||
var matchInfo fileutils.MatchInfo
|
||||
var matchInfo patternmatcher.MatchInfo
|
||||
if len(parentMatchInfo) != 0 {
|
||||
skip, matchInfo, err = pm.MatchesUsingParentResults(relFilePath, parentMatchInfo[len(parentMatchInfo)-1])
|
||||
} else {
|
||||
skip, matchInfo, err = pm.MatchesUsingParentResults(relFilePath, fileutils.MatchInfo{})
|
||||
skip, matchInfo, err = pm.MatchesUsingParentResults(relFilePath, patternmatcher.MatchInfo{})
|
||||
}
|
||||
if err != nil {
|
||||
logrus.Errorf("Error matching %s: %v", relFilePath, err)
|
||||
@ -1049,7 +1057,6 @@ func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) err
|
||||
defer pools.BufioReader32KPool.Put(trBuf)
|
||||
|
||||
var dirs []*tar.Header
|
||||
rootIDs := options.IDMap.RootPair()
|
||||
whiteoutConverter, err := getWhiteoutConverter(options.WhiteoutFormat, options.InUserNS)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -1084,19 +1091,10 @@ loop:
|
||||
}
|
||||
}
|
||||
|
||||
// After calling filepath.Clean(hdr.Name) above, hdr.Name will now be in
|
||||
// the filepath format for the OS on which the daemon is running. Hence
|
||||
// the check for a slash-suffix MUST be done in an OS-agnostic way.
|
||||
if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) {
|
||||
// Not the root directory, ensure that the parent directory exists
|
||||
parent := filepath.Dir(hdr.Name)
|
||||
parentPath := filepath.Join(dest, parent)
|
||||
if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) {
|
||||
err = idtools.MkdirAllAndChownNew(parentPath, 0755, rootIDs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
// Ensure that the parent directory exists.
|
||||
err = createImpliedDirectories(dest, hdr, options)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// #nosec G305 -- The joined path is checked for path traversal.
|
||||
@ -1174,6 +1172,35 @@ loop:
|
||||
return nil
|
||||
}
|
||||
|
||||
// createImpliedDirectories will create all parent directories of the current path with default permissions, if they do
|
||||
// not already exist. This is possible as the tar format supports 'implicit' directories, where their existence is
|
||||
// defined by the paths of files in the tar, but there are no header entries for the directories themselves, and thus
|
||||
// we most both create them and choose metadata like permissions.
|
||||
//
|
||||
// The caller should have performed filepath.Clean(hdr.Name), so hdr.Name will now be in the filepath format for the OS
|
||||
// on which the daemon is running. This precondition is required because this function assumes a OS-specific path
|
||||
// separator when checking that a path is not the root.
|
||||
func createImpliedDirectories(dest string, hdr *tar.Header, options *TarOptions) error {
|
||||
// Not the root directory, ensure that the parent directory exists
|
||||
if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) {
|
||||
parent := filepath.Dir(hdr.Name)
|
||||
parentPath := filepath.Join(dest, parent)
|
||||
if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) {
|
||||
// RootPair() is confined inside this loop as most cases will not require a call, so we can spend some
|
||||
// unneeded function calls in the uncommon case to encapsulate logic -- implied directories are a niche
|
||||
// usage that reduces the portability of an image.
|
||||
rootIDs := options.IDMap.RootPair()
|
||||
|
||||
err = idtools.MkdirAllAndChownNew(parentPath, ImpliedDirectoryMode, rootIDs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Untar reads a stream of bytes from `archive`, parses it as a tar archive,
|
||||
// and unpacks it into the directory at `dest`.
|
||||
// The archive may be compressed with one of the following algorithms:
|
||||
|
2
vendor/github.com/docker/docker/pkg/archive/changes.go
generated
vendored
2
vendor/github.com/docker/docker/pkg/archive/changes.go
generated
vendored
@ -246,7 +246,6 @@ func (info *FileInfo) path() string {
|
||||
}
|
||||
|
||||
func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) {
|
||||
|
||||
sizeAtEntry := len(*changes)
|
||||
|
||||
if oldInfo == nil {
|
||||
@ -319,7 +318,6 @@ func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) {
|
||||
copy((*changes)[sizeAtEntry+1:], (*changes)[sizeAtEntry:])
|
||||
(*changes)[sizeAtEntry] = change
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// Changes add changes to file information.
|
||||
|
1
vendor/github.com/docker/docker/pkg/archive/copy.go
generated
vendored
1
vendor/github.com/docker/docker/pkg/archive/copy.go
generated
vendored
@ -303,7 +303,6 @@ func PrepareArchiveCopy(srcContent io.Reader, srcInfo, dstInfo CopyInfo) (dstDir
|
||||
}
|
||||
return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// RebaseArchiveEntries rewrites the given srcContent archive replacing
|
||||
|
18
vendor/github.com/docker/docker/pkg/archive/diff.go
generated
vendored
18
vendor/github.com/docker/docker/pkg/archive/diff.go
generated
vendored
@ -72,20 +72,10 @@ func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64,
|
||||
}
|
||||
}
|
||||
|
||||
// Note as these operations are platform specific, so must the slash be.
|
||||
if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) {
|
||||
// Not the root directory, ensure that the parent directory exists.
|
||||
// This happened in some tests where an image had a tarfile without any
|
||||
// parent directories.
|
||||
parent := filepath.Dir(hdr.Name)
|
||||
parentPath := filepath.Join(dest, parent)
|
||||
|
||||
if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) {
|
||||
err = system.MkdirAll(parentPath, 0600)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
// Ensure that the parent directory exists.
|
||||
err = createImpliedDirectories(dest, hdr, options)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// Skip AUFS metadata dirs
|
||||
|
27
vendor/github.com/docker/docker/pkg/fileutils/fileutils_darwin.go
generated
vendored
27
vendor/github.com/docker/docker/pkg/fileutils/fileutils_darwin.go
generated
vendored
@ -1,27 +0,0 @@
|
||||
package fileutils // import "github.com/docker/docker/pkg/fileutils"
|
||||
|
||||
import (
|
||||
"os"
|
||||
"os/exec"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// GetTotalUsedFds returns the number of used File Descriptors by
|
||||
// executing `lsof -p PID`
|
||||
func GetTotalUsedFds() int {
|
||||
pid := os.Getpid()
|
||||
|
||||
cmd := exec.Command("lsof", "-p", strconv.Itoa(pid))
|
||||
|
||||
output, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
return -1
|
||||
}
|
||||
|
||||
outputStr := strings.TrimSpace(string(output))
|
||||
|
||||
fds := strings.Split(outputStr, "\n")
|
||||
|
||||
return len(fds) - 1
|
||||
}
|
22
vendor/github.com/docker/docker/pkg/fileutils/fileutils_unix.go
generated
vendored
22
vendor/github.com/docker/docker/pkg/fileutils/fileutils_unix.go
generated
vendored
@ -1,22 +0,0 @@
|
||||
//go:build linux || freebsd
|
||||
// +build linux freebsd
|
||||
|
||||
package fileutils // import "github.com/docker/docker/pkg/fileutils"
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// GetTotalUsedFds Returns the number of used File Descriptors by
|
||||
// reading it via /proc filesystem.
|
||||
func GetTotalUsedFds() int {
|
||||
if fds, err := os.ReadDir(fmt.Sprintf("/proc/%d/fd", os.Getpid())); err != nil {
|
||||
logrus.Errorf("Error opening /proc/%d/fd: %s", os.Getpid(), err)
|
||||
} else {
|
||||
return len(fds)
|
||||
}
|
||||
return -1
|
||||
}
|
7
vendor/github.com/docker/docker/pkg/fileutils/fileutils_windows.go
generated
vendored
7
vendor/github.com/docker/docker/pkg/fileutils/fileutils_windows.go
generated
vendored
@ -1,7 +0,0 @@
|
||||
package fileutils // import "github.com/docker/docker/pkg/fileutils"
|
||||
|
||||
// GetTotalUsedFds Returns the number of used File Descriptors. Not supported
|
||||
// on Windows.
|
||||
func GetTotalUsedFds() int {
|
||||
return -1
|
||||
}
|
5
vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go
generated
vendored
5
vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go
generated
vendored
@ -30,6 +30,10 @@ func mkdirAs(path string, mode os.FileMode, owner Identity, mkAll, chownExisting
|
||||
// chown the full directory path if it exists
|
||||
|
||||
var paths []string
|
||||
path, err := filepath.Abs(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
stat, err := system.Stat(path)
|
||||
if err == nil {
|
||||
@ -209,7 +213,6 @@ func callGetent(database, key string) (io.Reader, error) {
|
||||
default:
|
||||
return nil, err
|
||||
}
|
||||
|
||||
}
|
||||
return bytes.NewReader(out), nil
|
||||
}
|
||||
|
1
vendor/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go
generated
vendored
1
vendor/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go
generated
vendored
@ -88,7 +88,6 @@ func addUser(name string) error {
|
||||
}
|
||||
|
||||
func createSubordinateRanges(name string) error {
|
||||
|
||||
// first, we should verify that ranges weren't automatically created
|
||||
// by the distro tooling
|
||||
ranges, err := parseSubuid(name)
|
||||
|
3
vendor/github.com/docker/docker/pkg/namesgenerator/names-generator.go
generated
vendored
3
vendor/github.com/docker/docker/pkg/namesgenerator/names-generator.go
generated
vendored
@ -769,9 +769,6 @@ var (
|
||||
// Helen Brooke Taussig - American cardiologist and founder of the field of paediatric cardiology. https://en.wikipedia.org/wiki/Helen_B._Taussig
|
||||
"taussig",
|
||||
|
||||
// Valentina Tereshkova is a Russian engineer, cosmonaut and politician. She was the first woman to fly to space in 1963. In 2013, at the age of 76, she offered to go on a one-way mission to Mars. https://en.wikipedia.org/wiki/Valentina_Tereshkova
|
||||
"tereshkova",
|
||||
|
||||
// Nikola Tesla invented the AC electric system and every gadget ever used by a James Bond villain. https://en.wikipedia.org/wiki/Nikola_Tesla
|
||||
"tesla",
|
||||
|
||||
|
19
vendor/github.com/docker/docker/pkg/system/filesys.go
generated
vendored
Normal file
19
vendor/github.com/docker/docker/pkg/system/filesys.go
generated
vendored
Normal file
@ -0,0 +1,19 @@
|
||||
package system
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// IsAbs is a platform-agnostic wrapper for filepath.IsAbs.
|
||||
//
|
||||
// On Windows, golang filepath.IsAbs does not consider a path \windows\system32
|
||||
// as absolute as it doesn't start with a drive-letter/colon combination. However,
|
||||
// in docker we need to verify things such as WORKDIR /windows/system32 in
|
||||
// a Dockerfile (which gets translated to \windows\system32 when being processed
|
||||
// by the daemon). This SHOULD be treated as absolute from a docker processing
|
||||
// perspective.
|
||||
func IsAbs(path string) bool {
|
||||
return filepath.IsAbs(path) || strings.HasPrefix(path, string(os.PathSeparator))
|
||||
}
|
35
vendor/github.com/docker/docker/pkg/system/filesys_deprecated.go
generated
vendored
Normal file
35
vendor/github.com/docker/docker/pkg/system/filesys_deprecated.go
generated
vendored
Normal file
@ -0,0 +1,35 @@
|
||||
package system
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/moby/sys/sequential"
|
||||
)
|
||||
|
||||
// CreateSequential is deprecated.
|
||||
//
|
||||
// Deprecated: use os.Create or github.com/moby/sys/sequential.Create()
|
||||
func CreateSequential(name string) (*os.File, error) {
|
||||
return sequential.Create(name)
|
||||
}
|
||||
|
||||
// OpenSequential is deprecated.
|
||||
//
|
||||
// Deprecated: use os.Open or github.com/moby/sys/sequential.Open
|
||||
func OpenSequential(name string) (*os.File, error) {
|
||||
return sequential.Open(name)
|
||||
}
|
||||
|
||||
// OpenFileSequential is deprecated.
|
||||
//
|
||||
// Deprecated: use github.com/moby/sys/sequential.OpenFile()
|
||||
func OpenFileSequential(name string, flag int, perm os.FileMode) (*os.File, error) {
|
||||
return sequential.OpenFile(name, flag, perm)
|
||||
}
|
||||
|
||||
// TempFileSequential is deprecated.
|
||||
//
|
||||
// Deprecated: use os.CreateTemp or github.com/moby/sys/sequential.CreateTemp
|
||||
func TempFileSequential(dir, prefix string) (f *os.File, err error) {
|
||||
return sequential.CreateTemp(dir, prefix)
|
||||
}
|
52
vendor/github.com/docker/docker/pkg/system/filesys_unix.go
generated
vendored
52
vendor/github.com/docker/docker/pkg/system/filesys_unix.go
generated
vendored
@ -3,10 +3,7 @@
|
||||
|
||||
package system // import "github.com/docker/docker/pkg/system"
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
)
|
||||
import "os"
|
||||
|
||||
// MkdirAllWithACL is a wrapper for os.MkdirAll on unix systems.
|
||||
func MkdirAllWithACL(path string, perm os.FileMode, sddl string) error {
|
||||
@ -18,50 +15,3 @@ func MkdirAllWithACL(path string, perm os.FileMode, sddl string) error {
|
||||
func MkdirAll(path string, perm os.FileMode) error {
|
||||
return os.MkdirAll(path, perm)
|
||||
}
|
||||
|
||||
// IsAbs is a platform-specific wrapper for filepath.IsAbs.
|
||||
func IsAbs(path string) bool {
|
||||
return filepath.IsAbs(path)
|
||||
}
|
||||
|
||||
// The functions below here are wrappers for the equivalents in the os and ioutils packages.
|
||||
// They are passthrough on Unix platforms, and only relevant on Windows.
|
||||
|
||||
// CreateSequential creates the named file with mode 0666 (before umask), truncating
|
||||
// it if it already exists. If successful, methods on the returned
|
||||
// File can be used for I/O; the associated file descriptor has mode
|
||||
// O_RDWR.
|
||||
// If there is an error, it will be of type *PathError.
|
||||
func CreateSequential(name string) (*os.File, error) {
|
||||
return os.Create(name)
|
||||
}
|
||||
|
||||
// OpenSequential opens the named file for reading. If successful, methods on
|
||||
// the returned file can be used for reading; the associated file
|
||||
// descriptor has mode O_RDONLY.
|
||||
// If there is an error, it will be of type *PathError.
|
||||
func OpenSequential(name string) (*os.File, error) {
|
||||
return os.Open(name)
|
||||
}
|
||||
|
||||
// OpenFileSequential is the generalized open call; most users will use Open
|
||||
// or Create instead. It opens the named file with specified flag
|
||||
// (O_RDONLY etc.) and perm, (0666 etc.) if applicable. If successful,
|
||||
// methods on the returned File can be used for I/O.
|
||||
// If there is an error, it will be of type *PathError.
|
||||
func OpenFileSequential(name string, flag int, perm os.FileMode) (*os.File, error) {
|
||||
return os.OpenFile(name, flag, perm)
|
||||
}
|
||||
|
||||
// TempFileSequential creates a new temporary file in the directory dir
|
||||
// with a name beginning with prefix, opens the file for reading
|
||||
// and writing, and returns the resulting *os.File.
|
||||
// If dir is the empty string, TempFile uses the default directory
|
||||
// for temporary files (see os.TempDir).
|
||||
// Multiple programs calling TempFile simultaneously
|
||||
// will not choose the same file. The caller can use f.Name()
|
||||
// to find the pathname of the file. It is the caller's responsibility
|
||||
// to remove the file when no longer needed.
|
||||
func TempFileSequential(dir, prefix string) (f *os.File, err error) {
|
||||
return os.CreateTemp(dir, prefix)
|
||||
}
|
||||
|
174
vendor/github.com/docker/docker/pkg/system/filesys_windows.go
generated
vendored
174
vendor/github.com/docker/docker/pkg/system/filesys_windows.go
generated
vendored
@ -2,13 +2,8 @@ package system // import "github.com/docker/docker/pkg/system"
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
"time"
|
||||
"unsafe"
|
||||
|
||||
"golang.org/x/sys/windows"
|
||||
@ -121,172 +116,3 @@ func mkdirWithACL(name string, sddl string) error {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsAbs is a platform-specific wrapper for filepath.IsAbs. On Windows,
|
||||
// golang filepath.IsAbs does not consider a path \windows\system32 as absolute
|
||||
// as it doesn't start with a drive-letter/colon combination. However, in
|
||||
// docker we need to verify things such as WORKDIR /windows/system32 in
|
||||
// a Dockerfile (which gets translated to \windows\system32 when being processed
|
||||
// by the daemon. This SHOULD be treated as absolute from a docker processing
|
||||
// perspective.
|
||||
func IsAbs(path string) bool {
|
||||
if filepath.IsAbs(path) || strings.HasPrefix(path, string(os.PathSeparator)) {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// The origin of the functions below here are the golang OS and windows packages,
|
||||
// slightly modified to only cope with files, not directories due to the
|
||||
// specific use case.
|
||||
//
|
||||
// The alteration is to allow a file on Windows to be opened with
|
||||
// FILE_FLAG_SEQUENTIAL_SCAN (particular for docker load), to avoid eating
|
||||
// the standby list, particularly when accessing large files such as layer.tar.
|
||||
|
||||
// CreateSequential creates the named file with mode 0666 (before umask), truncating
|
||||
// it if it already exists. If successful, methods on the returned
|
||||
// File can be used for I/O; the associated file descriptor has mode
|
||||
// O_RDWR.
|
||||
// If there is an error, it will be of type *PathError.
|
||||
func CreateSequential(name string) (*os.File, error) {
|
||||
return OpenFileSequential(name, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0)
|
||||
}
|
||||
|
||||
// OpenSequential opens the named file for reading. If successful, methods on
|
||||
// the returned file can be used for reading; the associated file
|
||||
// descriptor has mode O_RDONLY.
|
||||
// If there is an error, it will be of type *PathError.
|
||||
func OpenSequential(name string) (*os.File, error) {
|
||||
return OpenFileSequential(name, os.O_RDONLY, 0)
|
||||
}
|
||||
|
||||
// OpenFileSequential is the generalized open call; most users will use Open
|
||||
// or Create instead.
|
||||
// If there is an error, it will be of type *PathError.
|
||||
func OpenFileSequential(name string, flag int, _ os.FileMode) (*os.File, error) {
|
||||
if name == "" {
|
||||
return nil, &os.PathError{Op: "open", Path: name, Err: syscall.ENOENT}
|
||||
}
|
||||
r, errf := windowsOpenFileSequential(name, flag, 0)
|
||||
if errf == nil {
|
||||
return r, nil
|
||||
}
|
||||
return nil, &os.PathError{Op: "open", Path: name, Err: errf}
|
||||
}
|
||||
|
||||
func windowsOpenFileSequential(name string, flag int, _ os.FileMode) (file *os.File, err error) {
|
||||
r, e := windowsOpenSequential(name, flag|windows.O_CLOEXEC, 0)
|
||||
if e != nil {
|
||||
return nil, e
|
||||
}
|
||||
return os.NewFile(uintptr(r), name), nil
|
||||
}
|
||||
|
||||
func makeInheritSa() *windows.SecurityAttributes {
|
||||
var sa windows.SecurityAttributes
|
||||
sa.Length = uint32(unsafe.Sizeof(sa))
|
||||
sa.InheritHandle = 1
|
||||
return &sa
|
||||
}
|
||||
|
||||
func windowsOpenSequential(path string, mode int, _ uint32) (fd windows.Handle, err error) {
|
||||
if len(path) == 0 {
|
||||
return windows.InvalidHandle, windows.ERROR_FILE_NOT_FOUND
|
||||
}
|
||||
pathp, err := windows.UTF16PtrFromString(path)
|
||||
if err != nil {
|
||||
return windows.InvalidHandle, err
|
||||
}
|
||||
var access uint32
|
||||
switch mode & (windows.O_RDONLY | windows.O_WRONLY | windows.O_RDWR) {
|
||||
case windows.O_RDONLY:
|
||||
access = windows.GENERIC_READ
|
||||
case windows.O_WRONLY:
|
||||
access = windows.GENERIC_WRITE
|
||||
case windows.O_RDWR:
|
||||
access = windows.GENERIC_READ | windows.GENERIC_WRITE
|
||||
}
|
||||
if mode&windows.O_CREAT != 0 {
|
||||
access |= windows.GENERIC_WRITE
|
||||
}
|
||||
if mode&windows.O_APPEND != 0 {
|
||||
access &^= windows.GENERIC_WRITE
|
||||
access |= windows.FILE_APPEND_DATA
|
||||
}
|
||||
sharemode := uint32(windows.FILE_SHARE_READ | windows.FILE_SHARE_WRITE)
|
||||
var sa *windows.SecurityAttributes
|
||||
if mode&windows.O_CLOEXEC == 0 {
|
||||
sa = makeInheritSa()
|
||||
}
|
||||
var createmode uint32
|
||||
switch {
|
||||
case mode&(windows.O_CREAT|windows.O_EXCL) == (windows.O_CREAT | windows.O_EXCL):
|
||||
createmode = windows.CREATE_NEW
|
||||
case mode&(windows.O_CREAT|windows.O_TRUNC) == (windows.O_CREAT | windows.O_TRUNC):
|
||||
createmode = windows.CREATE_ALWAYS
|
||||
case mode&windows.O_CREAT == windows.O_CREAT:
|
||||
createmode = windows.OPEN_ALWAYS
|
||||
case mode&windows.O_TRUNC == windows.O_TRUNC:
|
||||
createmode = windows.TRUNCATE_EXISTING
|
||||
default:
|
||||
createmode = windows.OPEN_EXISTING
|
||||
}
|
||||
// Use FILE_FLAG_SEQUENTIAL_SCAN rather than FILE_ATTRIBUTE_NORMAL as implemented in golang.
|
||||
// https://msdn.microsoft.com/en-us/library/windows/desktop/aa363858(v=vs.85).aspx
|
||||
const fileFlagSequentialScan = 0x08000000 // FILE_FLAG_SEQUENTIAL_SCAN
|
||||
h, e := windows.CreateFile(pathp, access, sharemode, sa, createmode, fileFlagSequentialScan, 0)
|
||||
return h, e
|
||||
}
|
||||
|
||||
// Helpers for TempFileSequential
|
||||
var rand uint32
|
||||
var randmu sync.Mutex
|
||||
|
||||
func reseed() uint32 {
|
||||
return uint32(time.Now().UnixNano() + int64(os.Getpid()))
|
||||
}
|
||||
func nextSuffix() string {
|
||||
randmu.Lock()
|
||||
r := rand
|
||||
if r == 0 {
|
||||
r = reseed()
|
||||
}
|
||||
r = r*1664525 + 1013904223 // constants from Numerical Recipes
|
||||
rand = r
|
||||
randmu.Unlock()
|
||||
return strconv.Itoa(int(1e9 + r%1e9))[1:]
|
||||
}
|
||||
|
||||
// TempFileSequential is a copy of os.CreateTemp, modified to use sequential
|
||||
// file access. Below is the original comment from golang:
|
||||
// TempFile creates a new temporary file in the directory dir
|
||||
// with a name beginning with prefix, opens the file for reading
|
||||
// and writing, and returns the resulting *os.File.
|
||||
// If dir is the empty string, TempFile uses the default directory
|
||||
// for temporary files (see os.TempDir).
|
||||
// Multiple programs calling TempFile simultaneously
|
||||
// will not choose the same file. The caller can use f.Name()
|
||||
// to find the pathname of the file. It is the caller's responsibility
|
||||
// to remove the file when no longer needed.
|
||||
func TempFileSequential(dir, prefix string) (f *os.File, err error) {
|
||||
if dir == "" {
|
||||
dir = os.TempDir()
|
||||
}
|
||||
|
||||
nconflict := 0
|
||||
for i := 0; i < 10000; i++ {
|
||||
name := filepath.Join(dir, prefix+nextSuffix())
|
||||
f, err = OpenFileSequential(name, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600)
|
||||
if os.IsExist(err) {
|
||||
if nconflict++; nconflict > 10 {
|
||||
randmu.Lock()
|
||||
rand = reseed()
|
||||
randmu.Unlock()
|
||||
}
|
||||
continue
|
||||
}
|
||||
break
|
||||
}
|
||||
return
|
||||
}
|
||||
|
1
vendor/github.com/docker/docker/pkg/system/meminfo_linux.go
generated
vendored
1
vendor/github.com/docker/docker/pkg/system/meminfo_linux.go
generated
vendored
@ -55,7 +55,6 @@ func parseMemInfo(reader io.Reader) (*MemInfo, error) {
|
||||
case "SwapFree:":
|
||||
meminfo.SwapFree = bytes
|
||||
}
|
||||
|
||||
}
|
||||
if memAvailable != -1 {
|
||||
meminfo.MemFree = memAvailable
|
||||
|
1
vendor/github.com/docker/docker/pkg/system/path.go
generated
vendored
1
vendor/github.com/docker/docker/pkg/system/path.go
generated
vendored
@ -13,7 +13,6 @@ func DefaultPathEnv(os string) string {
|
||||
return ""
|
||||
}
|
||||
return defaultUnixPathEnv
|
||||
|
||||
}
|
||||
|
||||
// PathVerifier defines the subset of a PathDriver that CheckSystemDriveAndRemoveDriveLetter
|
||||
|
4
vendor/github.com/go-logr/logr/README.md
generated
vendored
4
vendor/github.com/go-logr/logr/README.md
generated
vendored
@ -105,14 +105,18 @@ with higher verbosity means more (and less important) logs will be generated.
|
||||
There are implementations for the following logging libraries:
|
||||
|
||||
- **a function** (can bridge to non-structured libraries): [funcr](https://github.com/go-logr/logr/tree/master/funcr)
|
||||
- **a testing.T** (for use in Go tests, with JSON-like output): [testr](https://github.com/go-logr/logr/tree/master/testr)
|
||||
- **github.com/google/glog**: [glogr](https://github.com/go-logr/glogr)
|
||||
- **k8s.io/klog** (for Kubernetes): [klogr](https://git.k8s.io/klog/klogr)
|
||||
- **a testing.T** (with klog-like text output): [ktesting](https://git.k8s.io/klog/ktesting)
|
||||
- **go.uber.org/zap**: [zapr](https://github.com/go-logr/zapr)
|
||||
- **log** (the Go standard library logger): [stdr](https://github.com/go-logr/stdr)
|
||||
- **github.com/sirupsen/logrus**: [logrusr](https://github.com/bombsimon/logrusr)
|
||||
- **github.com/wojas/genericr**: [genericr](https://github.com/wojas/genericr) (makes it easy to implement your own backend)
|
||||
- **logfmt** (Heroku style [logging](https://www.brandur.org/logfmt)): [logfmtr](https://github.com/iand/logfmtr)
|
||||
- **github.com/rs/zerolog**: [zerologr](https://github.com/go-logr/zerologr)
|
||||
- **github.com/go-kit/log**: [gokitlogr](https://github.com/tonglil/gokitlogr) (also compatible with github.com/go-kit/kit/log since v0.12.0)
|
||||
- **bytes.Buffer** (writing to a buffer): [bufrlogr](https://github.com/tonglil/buflogr) (useful for ensuring values were logged, like during testing)
|
||||
|
||||
## FAQ
|
||||
|
||||
|
36
vendor/github.com/go-logr/logr/funcr/funcr.go
generated
vendored
36
vendor/github.com/go-logr/logr/funcr/funcr.go
generated
vendored
@ -351,15 +351,15 @@ func (f Formatter) prettyWithFlags(value interface{}, flags uint32, depth int) s
|
||||
if v, ok := value.(logr.Marshaler); ok {
|
||||
// Replace the value with what the type wants to get logged.
|
||||
// That then gets handled below via reflection.
|
||||
value = v.MarshalLog()
|
||||
value = invokeMarshaler(v)
|
||||
}
|
||||
|
||||
// Handle types that want to format themselves.
|
||||
switch v := value.(type) {
|
||||
case fmt.Stringer:
|
||||
value = v.String()
|
||||
value = invokeStringer(v)
|
||||
case error:
|
||||
value = v.Error()
|
||||
value = invokeError(v)
|
||||
}
|
||||
|
||||
// Handling the most common types without reflect is a small perf win.
|
||||
@ -408,8 +408,9 @@ func (f Formatter) prettyWithFlags(value interface{}, flags uint32, depth int) s
|
||||
if i > 0 {
|
||||
buf.WriteByte(',')
|
||||
}
|
||||
k, _ := v[i].(string) // sanitize() above means no need to check success
|
||||
// arbitrary keys might need escaping
|
||||
buf.WriteString(prettyString(v[i].(string)))
|
||||
buf.WriteString(prettyString(k))
|
||||
buf.WriteByte(':')
|
||||
buf.WriteString(f.prettyWithFlags(v[i+1], 0, depth+1))
|
||||
}
|
||||
@ -596,6 +597,33 @@ func isEmpty(v reflect.Value) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func invokeMarshaler(m logr.Marshaler) (ret interface{}) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
ret = fmt.Sprintf("<panic: %s>", r)
|
||||
}
|
||||
}()
|
||||
return m.MarshalLog()
|
||||
}
|
||||
|
||||
func invokeStringer(s fmt.Stringer) (ret string) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
ret = fmt.Sprintf("<panic: %s>", r)
|
||||
}
|
||||
}()
|
||||
return s.String()
|
||||
}
|
||||
|
||||
func invokeError(e error) (ret string) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
ret = fmt.Sprintf("<panic: %s>", r)
|
||||
}
|
||||
}()
|
||||
return e.Error()
|
||||
}
|
||||
|
||||
// Caller represents the original call site for a log line, after considering
|
||||
// logr.Logger.WithCallDepth and logr.Logger.WithCallStackHelper. The File and
|
||||
// Line fields will always be provided, while the Func field is optional.
|
||||
|
9
vendor/github.com/go-logr/logr/logr.go
generated
vendored
9
vendor/github.com/go-logr/logr/logr.go
generated
vendored
@ -115,6 +115,15 @@ limitations under the License.
|
||||
// may be any Go value, but how the value is formatted is determined by the
|
||||
// LogSink implementation.
|
||||
//
|
||||
// Logger instances are meant to be passed around by value. Code that receives
|
||||
// such a value can call its methods without having to check whether the
|
||||
// instance is ready for use.
|
||||
//
|
||||
// Calling methods with the null logger (Logger{}) as instance will crash
|
||||
// because it has no LogSink. Therefore this null logger should never be passed
|
||||
// around. For cases where passing a logger is optional, a pointer to Logger
|
||||
// should be used.
|
||||
//
|
||||
// Key Naming Conventions
|
||||
//
|
||||
// Keys are not strictly required to conform to any specification or regex, but
|
||||
|
30
vendor/github.com/golang-jwt/jwt/v4/README.md
generated
vendored
30
vendor/github.com/golang-jwt/jwt/v4/README.md
generated
vendored
@ -36,9 +36,23 @@ The part in the middle is the interesting bit. It's called the Claims and conta
|
||||
|
||||
This library supports the parsing and verification as well as the generation and signing of JWTs. Current supported signing algorithms are HMAC SHA, RSA, RSA-PSS, and ECDSA, though hooks are present for adding your own.
|
||||
|
||||
## Installation Guidelines
|
||||
|
||||
1. To install the jwt package, you first need to have [Go](https://go.dev/doc/install) installed, then you can use the command below to add `jwt-go` as a dependency in your Go program.
|
||||
|
||||
```sh
|
||||
go get -u github.com/golang-jwt/jwt/v4
|
||||
```
|
||||
|
||||
2. Import it in your code:
|
||||
|
||||
```go
|
||||
import "github.com/golang-jwt/jwt/v4"
|
||||
```
|
||||
|
||||
## Examples
|
||||
|
||||
See [the project documentation](https://pkg.go.dev/github.com/golang-jwt/jwt) for examples of usage:
|
||||
See [the project documentation](https://pkg.go.dev/github.com/golang-jwt/jwt/v4) for examples of usage:
|
||||
|
||||
* [Simple example of parsing and validating a token](https://pkg.go.dev/github.com/golang-jwt/jwt#example-Parse-Hmac)
|
||||
* [Simple example of building and signing a token](https://pkg.go.dev/github.com/golang-jwt/jwt#example-New-Hmac)
|
||||
@ -46,9 +60,17 @@ See [the project documentation](https://pkg.go.dev/github.com/golang-jwt/jwt) fo
|
||||
|
||||
## Extensions
|
||||
|
||||
This library publishes all the necessary components for adding your own signing methods. Simply implement the `SigningMethod` interface and register a factory method using `RegisterSigningMethod`.
|
||||
This library publishes all the necessary components for adding your own signing methods or key functions. Simply implement the `SigningMethod` interface and register a factory method using `RegisterSigningMethod` or provide a `jwt.Keyfunc`.
|
||||
|
||||
Here's an example of an extension that integrates with multiple Google Cloud Platform signing tools (AppEngine, IAM API, Cloud KMS): https://github.com/someone1/gcp-jwt-go
|
||||
A common use case would be integrating with different 3rd party signature providers, like key management services from various cloud providers or Hardware Security Modules (HSMs) or to implement additional standards.
|
||||
|
||||
| Extension | Purpose | Repo |
|
||||
| --------- | -------------------------------------------------------------------------------------------------------- | ------------------------------------------ |
|
||||
| GCP | Integrates with multiple Google Cloud Platform signing tools (AppEngine, IAM API, Cloud KMS) | https://github.com/someone1/gcp-jwt-go |
|
||||
| AWS | Integrates with AWS Key Management Service, KMS | https://github.com/matelang/jwt-go-aws-kms |
|
||||
| JWKS | Provides support for JWKS ([RFC 7517](https://datatracker.ietf.org/doc/html/rfc7517)) as a `jwt.Keyfunc` | https://github.com/MicahParks/keyfunc |
|
||||
|
||||
*Disclaimer*: Unless otherwise specified, these integrations are maintained by third parties and should not be considered as a primary offer by any of the mentioned cloud providers
|
||||
|
||||
## Compliance
|
||||
|
||||
@ -112,3 +134,5 @@ This library uses descriptive error messages whenever possible. If you are not g
|
||||
Documentation can be found [on pkg.go.dev](https://pkg.go.dev/github.com/golang-jwt/jwt).
|
||||
|
||||
The command line utility included in this project (cmd/jwt) provides a straightforward example of token creation and parsing as well as a useful tool for debugging your own integration. You'll also find several implementation examples in the documentation.
|
||||
|
||||
[golang-jwt](https://github.com/orgs/golang-jwt) incorporates a modified version of the JWT logo, which is distributed under the terms of the [MIT License](https://github.com/jsonwebtoken/jsonwebtoken.github.io/blob/master/LICENSE.txt).
|
||||
|
19
vendor/github.com/golang-jwt/jwt/v4/SECURITY.md
generated
vendored
Normal file
19
vendor/github.com/golang-jwt/jwt/v4/SECURITY.md
generated
vendored
Normal file
@ -0,0 +1,19 @@
|
||||
# Security Policy
|
||||
|
||||
## Supported Versions
|
||||
|
||||
As of February 2022 (and until this document is updated), the latest version `v4` is supported.
|
||||
|
||||
## Reporting a Vulnerability
|
||||
|
||||
If you think you found a vulnerability, and even if you are not sure, please report it to jwt-go-security@googlegroups.com or one of the other [golang-jwt maintainers](https://github.com/orgs/golang-jwt/people). Please try be explicit, describe steps to reproduce the security issue with code example(s).
|
||||
|
||||
You will receive a response within a timely manner. If the issue is confirmed, we will do our best to release a patch as soon as possible given the complexity of the problem.
|
||||
|
||||
## Public Discussions
|
||||
|
||||
Please avoid publicly discussing a potential security vulnerability.
|
||||
|
||||
Let's take this offline and find a solution first, this limits the potential impact as much as possible.
|
||||
|
||||
We appreciate your help!
|
12
vendor/github.com/golang-jwt/jwt/v4/claims.go
generated
vendored
12
vendor/github.com/golang-jwt/jwt/v4/claims.go
generated
vendored
@ -56,17 +56,17 @@ func (c RegisteredClaims) Valid() error {
|
||||
// default value in Go, let's not fail the verification for them.
|
||||
if !c.VerifyExpiresAt(now, false) {
|
||||
delta := now.Sub(c.ExpiresAt.Time)
|
||||
vErr.Inner = fmt.Errorf("token is expired by %v", delta)
|
||||
vErr.Inner = fmt.Errorf("%s by %s", ErrTokenExpired, delta)
|
||||
vErr.Errors |= ValidationErrorExpired
|
||||
}
|
||||
|
||||
if !c.VerifyIssuedAt(now, false) {
|
||||
vErr.Inner = fmt.Errorf("token used before issued")
|
||||
vErr.Inner = ErrTokenUsedBeforeIssued
|
||||
vErr.Errors |= ValidationErrorIssuedAt
|
||||
}
|
||||
|
||||
if !c.VerifyNotBefore(now, false) {
|
||||
vErr.Inner = fmt.Errorf("token is not valid yet")
|
||||
vErr.Inner = ErrTokenNotValidYet
|
||||
vErr.Errors |= ValidationErrorNotValidYet
|
||||
}
|
||||
|
||||
@ -149,17 +149,17 @@ func (c StandardClaims) Valid() error {
|
||||
// default value in Go, let's not fail the verification for them.
|
||||
if !c.VerifyExpiresAt(now, false) {
|
||||
delta := time.Unix(now, 0).Sub(time.Unix(c.ExpiresAt, 0))
|
||||
vErr.Inner = fmt.Errorf("token is expired by %v", delta)
|
||||
vErr.Inner = fmt.Errorf("%s by %s", ErrTokenExpired, delta)
|
||||
vErr.Errors |= ValidationErrorExpired
|
||||
}
|
||||
|
||||
if !c.VerifyIssuedAt(now, false) {
|
||||
vErr.Inner = fmt.Errorf("token used before issued")
|
||||
vErr.Inner = ErrTokenUsedBeforeIssued
|
||||
vErr.Errors |= ValidationErrorIssuedAt
|
||||
}
|
||||
|
||||
if !c.VerifyNotBefore(now, false) {
|
||||
vErr.Inner = fmt.Errorf("token is not valid yet")
|
||||
vErr.Inner = ErrTokenNotValidYet
|
||||
vErr.Errors |= ValidationErrorNotValidYet
|
||||
}
|
||||
|
||||
|
48
vendor/github.com/golang-jwt/jwt/v4/errors.go
generated
vendored
48
vendor/github.com/golang-jwt/jwt/v4/errors.go
generated
vendored
@ -9,6 +9,18 @@ var (
|
||||
ErrInvalidKey = errors.New("key is invalid")
|
||||
ErrInvalidKeyType = errors.New("key is of invalid type")
|
||||
ErrHashUnavailable = errors.New("the requested hash function is unavailable")
|
||||
|
||||
ErrTokenMalformed = errors.New("token is malformed")
|
||||
ErrTokenUnverifiable = errors.New("token is unverifiable")
|
||||
ErrTokenSignatureInvalid = errors.New("token signature is invalid")
|
||||
|
||||
ErrTokenInvalidAudience = errors.New("token has invalid audience")
|
||||
ErrTokenExpired = errors.New("token is expired")
|
||||
ErrTokenUsedBeforeIssued = errors.New("token used before issued")
|
||||
ErrTokenInvalidIssuer = errors.New("token has invalid issuer")
|
||||
ErrTokenNotValidYet = errors.New("token is not valid yet")
|
||||
ErrTokenInvalidId = errors.New("token has invalid id")
|
||||
ErrTokenInvalidClaims = errors.New("token has invalid claims")
|
||||
)
|
||||
|
||||
// The errors that might occur when parsing and validating a token
|
||||
@ -62,3 +74,39 @@ func (e *ValidationError) Unwrap() error {
|
||||
func (e *ValidationError) valid() bool {
|
||||
return e.Errors == 0
|
||||
}
|
||||
|
||||
// Is checks if this ValidationError is of the supplied error. We are first checking for the exact error message
|
||||
// by comparing the inner error message. If that fails, we compare using the error flags. This way we can use
|
||||
// custom error messages (mainly for backwards compatability) and still leverage errors.Is using the global error variables.
|
||||
func (e *ValidationError) Is(err error) bool {
|
||||
// Check, if our inner error is a direct match
|
||||
if errors.Is(errors.Unwrap(e), err) {
|
||||
return true
|
||||
}
|
||||
|
||||
// Otherwise, we need to match using our error flags
|
||||
switch err {
|
||||
case ErrTokenMalformed:
|
||||
return e.Errors&ValidationErrorMalformed != 0
|
||||
case ErrTokenUnverifiable:
|
||||
return e.Errors&ValidationErrorUnverifiable != 0
|
||||
case ErrTokenSignatureInvalid:
|
||||
return e.Errors&ValidationErrorSignatureInvalid != 0
|
||||
case ErrTokenInvalidAudience:
|
||||
return e.Errors&ValidationErrorAudience != 0
|
||||
case ErrTokenExpired:
|
||||
return e.Errors&ValidationErrorExpired != 0
|
||||
case ErrTokenUsedBeforeIssued:
|
||||
return e.Errors&ValidationErrorIssuedAt != 0
|
||||
case ErrTokenInvalidIssuer:
|
||||
return e.Errors&ValidationErrorIssuer != 0
|
||||
case ErrTokenNotValidYet:
|
||||
return e.Errors&ValidationErrorNotValidYet != 0
|
||||
case ErrTokenInvalidId:
|
||||
return e.Errors&ValidationErrorId != 0
|
||||
case ErrTokenInvalidClaims:
|
||||
return e.Errors&ValidationErrorClaimsInvalid != 0
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
3
vendor/github.com/golang-jwt/jwt/v4/map_claims.go
generated
vendored
3
vendor/github.com/golang-jwt/jwt/v4/map_claims.go
generated
vendored
@ -126,16 +126,19 @@ func (m MapClaims) Valid() error {
|
||||
now := TimeFunc().Unix()
|
||||
|
||||
if !m.VerifyExpiresAt(now, false) {
|
||||
// TODO(oxisto): this should be replaced with ErrTokenExpired
|
||||
vErr.Inner = errors.New("Token is expired")
|
||||
vErr.Errors |= ValidationErrorExpired
|
||||
}
|
||||
|
||||
if !m.VerifyIssuedAt(now, false) {
|
||||
// TODO(oxisto): this should be replaced with ErrTokenUsedBeforeIssued
|
||||
vErr.Inner = errors.New("Token used before issued")
|
||||
vErr.Errors |= ValidationErrorIssuedAt
|
||||
}
|
||||
|
||||
if !m.VerifyNotBefore(now, false) {
|
||||
// TODO(oxisto): this should be replaced with ErrTokenNotValidYet
|
||||
vErr.Inner = errors.New("Token is not valid yet")
|
||||
vErr.Errors |= ValidationErrorNotValidYet
|
||||
}
|
||||
|
4
vendor/github.com/golang-jwt/jwt/v4/parser_option.go
generated
vendored
4
vendor/github.com/golang-jwt/jwt/v4/parser_option.go
generated
vendored
@ -1,6 +1,6 @@
|
||||
package jwt
|
||||
|
||||
// ParserOption is used to implement functional-style options that modify the behaviour of the parser. To add
|
||||
// ParserOption is used to implement functional-style options that modify the behavior of the parser. To add
|
||||
// new options, just create a function (ideally beginning with With or Without) that returns an anonymous function that
|
||||
// takes a *Parser type as input and manipulates its configuration accordingly.
|
||||
type ParserOption func(*Parser)
|
||||
@ -13,7 +13,7 @@ func WithValidMethods(methods []string) ParserOption {
|
||||
}
|
||||
}
|
||||
|
||||
// WithJSONNumber is an option to configure the underyling JSON parser with UseNumber
|
||||
// WithJSONNumber is an option to configure the underlying JSON parser with UseNumber
|
||||
func WithJSONNumber() ParserOption {
|
||||
return func(p *Parser) {
|
||||
p.UseJSONNumber = true
|
||||
|
1
vendor/github.com/golang-jwt/jwt/v4/rsa_pss.go
generated
vendored
1
vendor/github.com/golang-jwt/jwt/v4/rsa_pss.go
generated
vendored
@ -1,3 +1,4 @@
|
||||
//go:build go1.4
|
||||
// +build go1.4
|
||||
|
||||
package jwt
|
||||
|
26
vendor/github.com/golang-jwt/jwt/v4/token.go
generated
vendored
26
vendor/github.com/golang-jwt/jwt/v4/token.go
generated
vendored
@ -7,7 +7,6 @@ import (
|
||||
"time"
|
||||
)
|
||||
|
||||
|
||||
// DecodePaddingAllowed will switch the codec used for decoding JWTs respectively. Note that the JWS RFC7515
|
||||
// states that the tokens will utilize a Base64url encoding with no padding. Unfortunately, some implementations
|
||||
// of JWT are producing non-standard tokens, and thus require support for decoding. Note that this is a global
|
||||
@ -74,22 +73,19 @@ func (t *Token) SignedString(key interface{}) (string, error) {
|
||||
// the SignedString.
|
||||
func (t *Token) SigningString() (string, error) {
|
||||
var err error
|
||||
parts := make([]string, 2)
|
||||
for i := range parts {
|
||||
var jsonValue []byte
|
||||
if i == 0 {
|
||||
if jsonValue, err = json.Marshal(t.Header); err != nil {
|
||||
return "", err
|
||||
}
|
||||
} else {
|
||||
if jsonValue, err = json.Marshal(t.Claims); err != nil {
|
||||
return "", err
|
||||
}
|
||||
}
|
||||
var jsonValue []byte
|
||||
|
||||
parts[i] = EncodeSegment(jsonValue)
|
||||
if jsonValue, err = json.Marshal(t.Header); err != nil {
|
||||
return "", err
|
||||
}
|
||||
return strings.Join(parts, "."), nil
|
||||
header := EncodeSegment(jsonValue)
|
||||
|
||||
if jsonValue, err = json.Marshal(t.Claims); err != nil {
|
||||
return "", err
|
||||
}
|
||||
claim := EncodeSegment(jsonValue)
|
||||
|
||||
return strings.Join([]string{header, claim}, "."), nil
|
||||
}
|
||||
|
||||
// Parse parses, validates, verifies the signature and returns the parsed token.
|
||||
|
22
vendor/github.com/golang-jwt/jwt/v4/types.go
generated
vendored
22
vendor/github.com/golang-jwt/jwt/v4/types.go
generated
vendored
@ -49,9 +49,27 @@ func newNumericDateFromSeconds(f float64) *NumericDate {
|
||||
// MarshalJSON is an implementation of the json.RawMessage interface and serializes the UNIX epoch
|
||||
// represented in NumericDate to a byte array, using the precision specified in TimePrecision.
|
||||
func (date NumericDate) MarshalJSON() (b []byte, err error) {
|
||||
f := float64(date.Truncate(TimePrecision).UnixNano()) / float64(time.Second)
|
||||
var prec int
|
||||
if TimePrecision < time.Second {
|
||||
prec = int(math.Log10(float64(time.Second) / float64(TimePrecision)))
|
||||
}
|
||||
truncatedDate := date.Truncate(TimePrecision)
|
||||
|
||||
return []byte(strconv.FormatFloat(f, 'f', -1, 64)), nil
|
||||
// For very large timestamps, UnixNano would overflow an int64, but this
|
||||
// function requires nanosecond level precision, so we have to use the
|
||||
// following technique to get round the issue:
|
||||
// 1. Take the normal unix timestamp to form the whole number part of the
|
||||
// output,
|
||||
// 2. Take the result of the Nanosecond function, which retuns the offset
|
||||
// within the second of the particular unix time instance, to form the
|
||||
// decimal part of the output
|
||||
// 3. Concatenate them to produce the final result
|
||||
seconds := strconv.FormatInt(truncatedDate.Unix(), 10)
|
||||
nanosecondsOffset := strconv.FormatFloat(float64(truncatedDate.Nanosecond())/float64(time.Second), 'f', prec, 64)
|
||||
|
||||
output := append([]byte(seconds), []byte(nanosecondsOffset)[1:]...)
|
||||
|
||||
return output, nil
|
||||
}
|
||||
|
||||
// UnmarshalJSON is an implementation of the json.RawMessage interface and deserializses a
|
||||
|
64
vendor/github.com/google/go-cmp/cmp/compare.go
generated
vendored
64
vendor/github.com/google/go-cmp/cmp/compare.go
generated
vendored
@ -13,21 +13,21 @@
|
||||
//
|
||||
// The primary features of cmp are:
|
||||
//
|
||||
// • When the default behavior of equality does not suit the needs of the test,
|
||||
// custom equality functions can override the equality operation.
|
||||
// For example, an equality function may report floats as equal so long as they
|
||||
// are within some tolerance of each other.
|
||||
// - When the default behavior of equality does not suit the test's needs,
|
||||
// custom equality functions can override the equality operation.
|
||||
// For example, an equality function may report floats as equal so long as
|
||||
// they are within some tolerance of each other.
|
||||
//
|
||||
// • Types that have an Equal method may use that method to determine equality.
|
||||
// This allows package authors to determine the equality operation for the types
|
||||
// that they define.
|
||||
// - Types with an Equal method may use that method to determine equality.
|
||||
// This allows package authors to determine the equality operation
|
||||
// for the types that they define.
|
||||
//
|
||||
// • If no custom equality functions are used and no Equal method is defined,
|
||||
// equality is determined by recursively comparing the primitive kinds on both
|
||||
// values, much like reflect.DeepEqual. Unlike reflect.DeepEqual, unexported
|
||||
// fields are not compared by default; they result in panics unless suppressed
|
||||
// by using an Ignore option (see cmpopts.IgnoreUnexported) or explicitly
|
||||
// compared using the Exporter option.
|
||||
// - If no custom equality functions are used and no Equal method is defined,
|
||||
// equality is determined by recursively comparing the primitive kinds on
|
||||
// both values, much like reflect.DeepEqual. Unlike reflect.DeepEqual,
|
||||
// unexported fields are not compared by default; they result in panics
|
||||
// unless suppressed by using an Ignore option (see cmpopts.IgnoreUnexported)
|
||||
// or explicitly compared using the Exporter option.
|
||||
package cmp
|
||||
|
||||
import (
|
||||
@ -45,25 +45,25 @@ import (
|
||||
// Equal reports whether x and y are equal by recursively applying the
|
||||
// following rules in the given order to x and y and all of their sub-values:
|
||||
//
|
||||
// • Let S be the set of all Ignore, Transformer, and Comparer options that
|
||||
// remain after applying all path filters, value filters, and type filters.
|
||||
// If at least one Ignore exists in S, then the comparison is ignored.
|
||||
// If the number of Transformer and Comparer options in S is greater than one,
|
||||
// then Equal panics because it is ambiguous which option to use.
|
||||
// If S contains a single Transformer, then use that to transform the current
|
||||
// values and recursively call Equal on the output values.
|
||||
// If S contains a single Comparer, then use that to compare the current values.
|
||||
// Otherwise, evaluation proceeds to the next rule.
|
||||
// - Let S be the set of all Ignore, Transformer, and Comparer options that
|
||||
// remain after applying all path filters, value filters, and type filters.
|
||||
// If at least one Ignore exists in S, then the comparison is ignored.
|
||||
// If the number of Transformer and Comparer options in S is non-zero,
|
||||
// then Equal panics because it is ambiguous which option to use.
|
||||
// If S contains a single Transformer, then use that to transform
|
||||
// the current values and recursively call Equal on the output values.
|
||||
// If S contains a single Comparer, then use that to compare the current values.
|
||||
// Otherwise, evaluation proceeds to the next rule.
|
||||
//
|
||||
// • If the values have an Equal method of the form "(T) Equal(T) bool" or
|
||||
// "(T) Equal(I) bool" where T is assignable to I, then use the result of
|
||||
// x.Equal(y) even if x or y is nil. Otherwise, no such method exists and
|
||||
// evaluation proceeds to the next rule.
|
||||
// - If the values have an Equal method of the form "(T) Equal(T) bool" or
|
||||
// "(T) Equal(I) bool" where T is assignable to I, then use the result of
|
||||
// x.Equal(y) even if x or y is nil. Otherwise, no such method exists and
|
||||
// evaluation proceeds to the next rule.
|
||||
//
|
||||
// • Lastly, try to compare x and y based on their basic kinds.
|
||||
// Simple kinds like booleans, integers, floats, complex numbers, strings, and
|
||||
// channels are compared using the equivalent of the == operator in Go.
|
||||
// Functions are only equal if they are both nil, otherwise they are unequal.
|
||||
// - Lastly, try to compare x and y based on their basic kinds.
|
||||
// Simple kinds like booleans, integers, floats, complex numbers, strings,
|
||||
// and channels are compared using the equivalent of the == operator in Go.
|
||||
// Functions are only equal if they are both nil, otherwise they are unequal.
|
||||
//
|
||||
// Structs are equal if recursively calling Equal on all fields report equal.
|
||||
// If a struct contains unexported fields, Equal panics unless an Ignore option
|
||||
@ -144,7 +144,7 @@ func rootStep(x, y interface{}) PathStep {
|
||||
// so that they have the same parent type.
|
||||
var t reflect.Type
|
||||
if !vx.IsValid() || !vy.IsValid() || vx.Type() != vy.Type() {
|
||||
t = reflect.TypeOf((*interface{})(nil)).Elem()
|
||||
t = anyType
|
||||
if vx.IsValid() {
|
||||
vvx := reflect.New(t).Elem()
|
||||
vvx.Set(vx)
|
||||
@ -639,7 +639,9 @@ type dynChecker struct{ curr, next int }
|
||||
// Next increments the state and reports whether a check should be performed.
|
||||
//
|
||||
// Checks occur every Nth function call, where N is a triangular number:
|
||||
//
|
||||
// 0 1 3 6 10 15 21 28 36 45 55 66 78 91 105 120 136 153 171 190 ...
|
||||
//
|
||||
// See https://en.wikipedia.org/wiki/Triangular_number
|
||||
//
|
||||
// This sequence ensures that the cost of checks drops significantly as
|
||||
|
44
vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go
generated
vendored
44
vendor/github.com/google/go-cmp/cmp/internal/diff/diff.go
generated
vendored
@ -127,9 +127,9 @@ var randBool = rand.New(rand.NewSource(time.Now().Unix())).Intn(2) == 0
|
||||
// This function returns an edit-script, which is a sequence of operations
|
||||
// needed to convert one list into the other. The following invariants for
|
||||
// the edit-script are maintained:
|
||||
// • eq == (es.Dist()==0)
|
||||
// • nx == es.LenX()
|
||||
// • ny == es.LenY()
|
||||
// - eq == (es.Dist()==0)
|
||||
// - nx == es.LenX()
|
||||
// - ny == es.LenY()
|
||||
//
|
||||
// This algorithm is not guaranteed to be an optimal solution (i.e., one that
|
||||
// produces an edit-script with a minimal Levenshtein distance). This algorithm
|
||||
@ -169,12 +169,13 @@ func Difference(nx, ny int, f EqualFunc) (es EditScript) {
|
||||
// A diagonal edge is equivalent to a matching symbol between both X and Y.
|
||||
|
||||
// Invariants:
|
||||
// • 0 ≤ fwdPath.X ≤ (fwdFrontier.X, revFrontier.X) ≤ revPath.X ≤ nx
|
||||
// • 0 ≤ fwdPath.Y ≤ (fwdFrontier.Y, revFrontier.Y) ≤ revPath.Y ≤ ny
|
||||
// - 0 ≤ fwdPath.X ≤ (fwdFrontier.X, revFrontier.X) ≤ revPath.X ≤ nx
|
||||
// - 0 ≤ fwdPath.Y ≤ (fwdFrontier.Y, revFrontier.Y) ≤ revPath.Y ≤ ny
|
||||
//
|
||||
// In general:
|
||||
// • fwdFrontier.X < revFrontier.X
|
||||
// • fwdFrontier.Y < revFrontier.Y
|
||||
// - fwdFrontier.X < revFrontier.X
|
||||
// - fwdFrontier.Y < revFrontier.Y
|
||||
//
|
||||
// Unless, it is time for the algorithm to terminate.
|
||||
fwdPath := path{+1, point{0, 0}, make(EditScript, 0, (nx+ny)/2)}
|
||||
revPath := path{-1, point{nx, ny}, make(EditScript, 0)}
|
||||
@ -195,19 +196,21 @@ func Difference(nx, ny int, f EqualFunc) (es EditScript) {
|
||||
// computing sub-optimal edit-scripts between two lists.
|
||||
//
|
||||
// The algorithm is approximately as follows:
|
||||
// • Searching for differences switches back-and-forth between
|
||||
// a search that starts at the beginning (the top-left corner), and
|
||||
// a search that starts at the end (the bottom-right corner). The goal of
|
||||
// the search is connect with the search from the opposite corner.
|
||||
// • As we search, we build a path in a greedy manner, where the first
|
||||
// match seen is added to the path (this is sub-optimal, but provides a
|
||||
// decent result in practice). When matches are found, we try the next pair
|
||||
// of symbols in the lists and follow all matches as far as possible.
|
||||
// • When searching for matches, we search along a diagonal going through
|
||||
// through the "frontier" point. If no matches are found, we advance the
|
||||
// frontier towards the opposite corner.
|
||||
// • This algorithm terminates when either the X coordinates or the
|
||||
// Y coordinates of the forward and reverse frontier points ever intersect.
|
||||
// - Searching for differences switches back-and-forth between
|
||||
// a search that starts at the beginning (the top-left corner), and
|
||||
// a search that starts at the end (the bottom-right corner).
|
||||
// The goal of the search is connect with the search
|
||||
// from the opposite corner.
|
||||
// - As we search, we build a path in a greedy manner,
|
||||
// where the first match seen is added to the path (this is sub-optimal,
|
||||
// but provides a decent result in practice). When matches are found,
|
||||
// we try the next pair of symbols in the lists and follow all matches
|
||||
// as far as possible.
|
||||
// - When searching for matches, we search along a diagonal going through
|
||||
// through the "frontier" point. If no matches are found,
|
||||
// we advance the frontier towards the opposite corner.
|
||||
// - This algorithm terminates when either the X coordinates or the
|
||||
// Y coordinates of the forward and reverse frontier points ever intersect.
|
||||
|
||||
// This algorithm is correct even if searching only in the forward direction
|
||||
// or in the reverse direction. We do both because it is commonly observed
|
||||
@ -389,6 +392,7 @@ type point struct{ X, Y int }
|
||||
func (p *point) add(dx, dy int) { p.X += dx; p.Y += dy }
|
||||
|
||||
// zigzag maps a consecutive sequence of integers to a zig-zag sequence.
|
||||
//
|
||||
// [0 1 2 3 4 5 ...] => [0 -1 +1 -2 +2 ...]
|
||||
func zigzag(x int) int {
|
||||
if x&1 != 0 {
|
||||
|
48
vendor/github.com/google/go-cmp/cmp/internal/value/zero.go
generated
vendored
48
vendor/github.com/google/go-cmp/cmp/internal/value/zero.go
generated
vendored
@ -1,48 +0,0 @@
|
||||
// Copyright 2017, The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package value
|
||||
|
||||
import (
|
||||
"math"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
// IsZero reports whether v is the zero value.
|
||||
// This does not rely on Interface and so can be used on unexported fields.
|
||||
func IsZero(v reflect.Value) bool {
|
||||
switch v.Kind() {
|
||||
case reflect.Bool:
|
||||
return v.Bool() == false
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
return v.Int() == 0
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
|
||||
return v.Uint() == 0
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return math.Float64bits(v.Float()) == 0
|
||||
case reflect.Complex64, reflect.Complex128:
|
||||
return math.Float64bits(real(v.Complex())) == 0 && math.Float64bits(imag(v.Complex())) == 0
|
||||
case reflect.String:
|
||||
return v.String() == ""
|
||||
case reflect.UnsafePointer:
|
||||
return v.Pointer() == 0
|
||||
case reflect.Chan, reflect.Func, reflect.Interface, reflect.Ptr, reflect.Map, reflect.Slice:
|
||||
return v.IsNil()
|
||||
case reflect.Array:
|
||||
for i := 0; i < v.Len(); i++ {
|
||||
if !IsZero(v.Index(i)) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
case reflect.Struct:
|
||||
for i := 0; i < v.NumField(); i++ {
|
||||
if !IsZero(v.Field(i)) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
10
vendor/github.com/google/go-cmp/cmp/options.go
generated
vendored
10
vendor/github.com/google/go-cmp/cmp/options.go
generated
vendored
@ -33,6 +33,7 @@ type Option interface {
|
||||
}
|
||||
|
||||
// applicableOption represents the following types:
|
||||
//
|
||||
// Fundamental: ignore | validator | *comparer | *transformer
|
||||
// Grouping: Options
|
||||
type applicableOption interface {
|
||||
@ -43,6 +44,7 @@ type applicableOption interface {
|
||||
}
|
||||
|
||||
// coreOption represents the following types:
|
||||
//
|
||||
// Fundamental: ignore | validator | *comparer | *transformer
|
||||
// Filters: *pathFilter | *valuesFilter
|
||||
type coreOption interface {
|
||||
@ -336,9 +338,9 @@ func (tr transformer) String() string {
|
||||
// both implement T.
|
||||
//
|
||||
// The equality function must be:
|
||||
// • Symmetric: equal(x, y) == equal(y, x)
|
||||
// • Deterministic: equal(x, y) == equal(x, y)
|
||||
// • Pure: equal(x, y) does not modify x or y
|
||||
// - Symmetric: equal(x, y) == equal(y, x)
|
||||
// - Deterministic: equal(x, y) == equal(x, y)
|
||||
// - Pure: equal(x, y) does not modify x or y
|
||||
func Comparer(f interface{}) Option {
|
||||
v := reflect.ValueOf(f)
|
||||
if !function.IsType(v.Type(), function.Equal) || v.IsNil() {
|
||||
@ -430,7 +432,7 @@ func AllowUnexported(types ...interface{}) Option {
|
||||
}
|
||||
|
||||
// Result represents the comparison result for a single node and
|
||||
// is provided by cmp when calling Result (see Reporter).
|
||||
// is provided by cmp when calling Report (see Reporter).
|
||||
type Result struct {
|
||||
_ [0]func() // Make Result incomparable
|
||||
flags resultFlags
|
||||
|
20
vendor/github.com/google/go-cmp/cmp/path.go
generated
vendored
20
vendor/github.com/google/go-cmp/cmp/path.go
generated
vendored
@ -41,13 +41,13 @@ type PathStep interface {
|
||||
// The type of each valid value is guaranteed to be identical to Type.
|
||||
//
|
||||
// In some cases, one or both may be invalid or have restrictions:
|
||||
// • For StructField, both are not interface-able if the current field
|
||||
// is unexported and the struct type is not explicitly permitted by
|
||||
// an Exporter to traverse unexported fields.
|
||||
// • For SliceIndex, one may be invalid if an element is missing from
|
||||
// either the x or y slice.
|
||||
// • For MapIndex, one may be invalid if an entry is missing from
|
||||
// either the x or y map.
|
||||
// - For StructField, both are not interface-able if the current field
|
||||
// is unexported and the struct type is not explicitly permitted by
|
||||
// an Exporter to traverse unexported fields.
|
||||
// - For SliceIndex, one may be invalid if an element is missing from
|
||||
// either the x or y slice.
|
||||
// - For MapIndex, one may be invalid if an entry is missing from
|
||||
// either the x or y map.
|
||||
//
|
||||
// The provided values must not be mutated.
|
||||
Values() (vx, vy reflect.Value)
|
||||
@ -94,6 +94,7 @@ func (pa Path) Index(i int) PathStep {
|
||||
// The simplified path only contains struct field accesses.
|
||||
//
|
||||
// For example:
|
||||
//
|
||||
// MyMap.MySlices.MyField
|
||||
func (pa Path) String() string {
|
||||
var ss []string
|
||||
@ -108,6 +109,7 @@ func (pa Path) String() string {
|
||||
// GoString returns the path to a specific node using Go syntax.
|
||||
//
|
||||
// For example:
|
||||
//
|
||||
// (*root.MyMap["key"].(*mypkg.MyStruct).MySlices)[2][3].MyField
|
||||
func (pa Path) GoString() string {
|
||||
var ssPre, ssPost []string
|
||||
@ -159,7 +161,7 @@ func (ps pathStep) String() string {
|
||||
if ps.typ == nil {
|
||||
return "<nil>"
|
||||
}
|
||||
s := ps.typ.String()
|
||||
s := value.TypeString(ps.typ, false)
|
||||
if s == "" || strings.ContainsAny(s, "{}\n") {
|
||||
return "root" // Type too simple or complex to print
|
||||
}
|
||||
@ -282,7 +284,7 @@ type typeAssertion struct {
|
||||
|
||||
func (ta TypeAssertion) Type() reflect.Type { return ta.typ }
|
||||
func (ta TypeAssertion) Values() (vx, vy reflect.Value) { return ta.vx, ta.vy }
|
||||
func (ta TypeAssertion) String() string { return fmt.Sprintf(".(%v)", ta.typ) }
|
||||
func (ta TypeAssertion) String() string { return fmt.Sprintf(".(%v)", value.TypeString(ta.typ, false)) }
|
||||
|
||||
// Transform is a transformation from the parent type to the current type.
|
||||
type Transform struct{ *transform }
|
||||
|
10
vendor/github.com/google/go-cmp/cmp/report_compare.go
generated
vendored
10
vendor/github.com/google/go-cmp/cmp/report_compare.go
generated
vendored
@ -7,8 +7,6 @@ package cmp
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
|
||||
"github.com/google/go-cmp/cmp/internal/value"
|
||||
)
|
||||
|
||||
// numContextRecords is the number of surrounding equal records to print.
|
||||
@ -117,7 +115,7 @@ func (opts formatOptions) FormatDiff(v *valueNode, ptrs *pointerReferences) (out
|
||||
|
||||
// For leaf nodes, format the value based on the reflect.Values alone.
|
||||
// As a special case, treat equal []byte as a leaf nodes.
|
||||
isBytes := v.Type.Kind() == reflect.Slice && v.Type.Elem() == reflect.TypeOf(byte(0))
|
||||
isBytes := v.Type.Kind() == reflect.Slice && v.Type.Elem() == byteType
|
||||
isEqualBytes := isBytes && v.NumDiff+v.NumIgnored+v.NumTransformed == 0
|
||||
if v.MaxDepth == 0 || isEqualBytes {
|
||||
switch opts.DiffMode {
|
||||
@ -248,11 +246,11 @@ func (opts formatOptions) formatDiffList(recs []reportRecord, k reflect.Kind, pt
|
||||
var isZero bool
|
||||
switch opts.DiffMode {
|
||||
case diffIdentical:
|
||||
isZero = value.IsZero(r.Value.ValueX) || value.IsZero(r.Value.ValueY)
|
||||
isZero = r.Value.ValueX.IsZero() || r.Value.ValueY.IsZero()
|
||||
case diffRemoved:
|
||||
isZero = value.IsZero(r.Value.ValueX)
|
||||
isZero = r.Value.ValueX.IsZero()
|
||||
case diffInserted:
|
||||
isZero = value.IsZero(r.Value.ValueY)
|
||||
isZero = r.Value.ValueY.IsZero()
|
||||
}
|
||||
if isZero {
|
||||
continue
|
||||
|
11
vendor/github.com/google/go-cmp/cmp/report_reflect.go
generated
vendored
11
vendor/github.com/google/go-cmp/cmp/report_reflect.go
generated
vendored
@ -16,6 +16,13 @@ import (
|
||||
"github.com/google/go-cmp/cmp/internal/value"
|
||||
)
|
||||
|
||||
var (
|
||||
anyType = reflect.TypeOf((*interface{})(nil)).Elem()
|
||||
stringType = reflect.TypeOf((*string)(nil)).Elem()
|
||||
bytesType = reflect.TypeOf((*[]byte)(nil)).Elem()
|
||||
byteType = reflect.TypeOf((*byte)(nil)).Elem()
|
||||
)
|
||||
|
||||
type formatValueOptions struct {
|
||||
// AvoidStringer controls whether to avoid calling custom stringer
|
||||
// methods like error.Error or fmt.Stringer.String.
|
||||
@ -184,7 +191,7 @@ func (opts formatOptions) FormatValue(v reflect.Value, parentKind reflect.Kind,
|
||||
}
|
||||
for i := 0; i < v.NumField(); i++ {
|
||||
vv := v.Field(i)
|
||||
if value.IsZero(vv) {
|
||||
if vv.IsZero() {
|
||||
continue // Elide fields with zero values
|
||||
}
|
||||
if len(list) == maxLen {
|
||||
@ -205,7 +212,7 @@ func (opts formatOptions) FormatValue(v reflect.Value, parentKind reflect.Kind,
|
||||
}
|
||||
|
||||
// Check whether this is a []byte of text data.
|
||||
if t.Elem() == reflect.TypeOf(byte(0)) {
|
||||
if t.Elem() == byteType {
|
||||
b := v.Bytes()
|
||||
isPrintSpace := func(r rune) bool { return unicode.IsPrint(r) || unicode.IsSpace(r) }
|
||||
if len(b) > 0 && utf8.Valid(b) && len(bytes.TrimFunc(b, isPrintSpace)) == 0 {
|
||||
|
25
vendor/github.com/google/go-cmp/cmp/report_slices.go
generated
vendored
25
vendor/github.com/google/go-cmp/cmp/report_slices.go
generated
vendored
@ -104,7 +104,7 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode {
|
||||
case t.Kind() == reflect.String:
|
||||
sx, sy = vx.String(), vy.String()
|
||||
isString = true
|
||||
case t.Kind() == reflect.Slice && t.Elem() == reflect.TypeOf(byte(0)):
|
||||
case t.Kind() == reflect.Slice && t.Elem() == byteType:
|
||||
sx, sy = string(vx.Bytes()), string(vy.Bytes())
|
||||
isString = true
|
||||
case t.Kind() == reflect.Array:
|
||||
@ -147,7 +147,10 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode {
|
||||
})
|
||||
efficiencyLines := float64(esLines.Dist()) / float64(len(esLines))
|
||||
efficiencyBytes := float64(esBytes.Dist()) / float64(len(esBytes))
|
||||
isPureLinedText = efficiencyLines < 4*efficiencyBytes
|
||||
quotedLength := len(strconv.Quote(sx + sy))
|
||||
unquotedLength := len(sx) + len(sy)
|
||||
escapeExpansionRatio := float64(quotedLength) / float64(unquotedLength)
|
||||
isPureLinedText = efficiencyLines < 4*efficiencyBytes || escapeExpansionRatio > 1.1
|
||||
}
|
||||
}
|
||||
|
||||
@ -171,12 +174,13 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode {
|
||||
// differences in a string literal. This format is more readable,
|
||||
// but has edge-cases where differences are visually indistinguishable.
|
||||
// This format is avoided under the following conditions:
|
||||
// • A line starts with `"""`
|
||||
// • A line starts with "..."
|
||||
// • A line contains non-printable characters
|
||||
// • Adjacent different lines differ only by whitespace
|
||||
// - A line starts with `"""`
|
||||
// - A line starts with "..."
|
||||
// - A line contains non-printable characters
|
||||
// - Adjacent different lines differ only by whitespace
|
||||
//
|
||||
// For example:
|
||||
//
|
||||
// """
|
||||
// ... // 3 identical lines
|
||||
// foo
|
||||
@ -231,7 +235,7 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode {
|
||||
var out textNode = &textWrap{Prefix: "(", Value: list2, Suffix: ")"}
|
||||
switch t.Kind() {
|
||||
case reflect.String:
|
||||
if t != reflect.TypeOf(string("")) {
|
||||
if t != stringType {
|
||||
out = opts.FormatType(t, out)
|
||||
}
|
||||
case reflect.Slice:
|
||||
@ -326,12 +330,12 @@ func (opts formatOptions) FormatDiffSlice(v *valueNode) textNode {
|
||||
switch t.Kind() {
|
||||
case reflect.String:
|
||||
out = &textWrap{Prefix: "strings.Join(", Value: out, Suffix: fmt.Sprintf(", %q)", delim)}
|
||||
if t != reflect.TypeOf(string("")) {
|
||||
if t != stringType {
|
||||
out = opts.FormatType(t, out)
|
||||
}
|
||||
case reflect.Slice:
|
||||
out = &textWrap{Prefix: "bytes.Join(", Value: out, Suffix: fmt.Sprintf(", %q)", delim)}
|
||||
if t != reflect.TypeOf([]byte(nil)) {
|
||||
if t != bytesType {
|
||||
out = opts.FormatType(t, out)
|
||||
}
|
||||
}
|
||||
@ -446,7 +450,6 @@ func (opts formatOptions) formatDiffSlice(
|
||||
// {NumIdentical: 3},
|
||||
// {NumInserted: 1},
|
||||
// ]
|
||||
//
|
||||
func coalesceAdjacentEdits(name string, es diff.EditScript) (groups []diffStats) {
|
||||
var prevMode byte
|
||||
lastStats := func(mode byte) *diffStats {
|
||||
@ -503,7 +506,6 @@ func coalesceAdjacentEdits(name string, es diff.EditScript) (groups []diffStats)
|
||||
// {NumIdentical: 8, NumRemoved: 12, NumInserted: 3},
|
||||
// {NumIdentical: 63},
|
||||
// ]
|
||||
//
|
||||
func coalesceInterveningIdentical(groups []diffStats, windowSize int) []diffStats {
|
||||
groups, groupsOrig := groups[:0], groups
|
||||
for i, ds := range groupsOrig {
|
||||
@ -548,7 +550,6 @@ func coalesceInterveningIdentical(groups []diffStats, windowSize int) []diffStat
|
||||
// {NumRemoved: 9},
|
||||
// {NumIdentical: 64}, // incremented by 10
|
||||
// ]
|
||||
//
|
||||
func cleanupSurroundingIdentical(groups []diffStats, eq func(i, j int) bool) []diffStats {
|
||||
var ix, iy int // indexes into sequence x and y
|
||||
for i, ds := range groups {
|
||||
|
1
vendor/github.com/google/go-cmp/cmp/report_text.go
generated
vendored
1
vendor/github.com/google/go-cmp/cmp/report_text.go
generated
vendored
@ -393,6 +393,7 @@ func (s diffStats) Append(ds diffStats) diffStats {
|
||||
// String prints a humanly-readable summary of coalesced records.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// diffStats{Name: "Field", NumIgnored: 5}.String() => "5 ignored fields"
|
||||
func (s diffStats) String() string {
|
||||
var ss []string
|
||||
|
50
vendor/github.com/klauspost/compress/README.md
generated
vendored
50
vendor/github.com/klauspost/compress/README.md
generated
vendored
@ -17,6 +17,49 @@ This package provides various compression algorithms.
|
||||
|
||||
# changelog
|
||||
|
||||
* Sept 26, 2022 (v1.15.11)
|
||||
|
||||
* flate: Improve level 1-3 compression https://github.com/klauspost/compress/pull/678
|
||||
* zstd: Improve "best" compression by @nightwolfz in https://github.com/klauspost/compress/pull/677
|
||||
* zstd: Fix+reduce decompression allocations https://github.com/klauspost/compress/pull/668
|
||||
* zstd: Fix non-effective noescape tag https://github.com/klauspost/compress/pull/667
|
||||
|
||||
* Sept 16, 2022 (v1.15.10)
|
||||
|
||||
* zstd: Add [WithDecodeAllCapLimit](https://pkg.go.dev/github.com/klauspost/compress@v1.15.10/zstd#WithDecodeAllCapLimit) https://github.com/klauspost/compress/pull/649
|
||||
* Add Go 1.19 - deprecate Go 1.16 https://github.com/klauspost/compress/pull/651
|
||||
* flate: Improve level 5+6 compression https://github.com/klauspost/compress/pull/656
|
||||
* zstd: Improve "better" compresssion https://github.com/klauspost/compress/pull/657
|
||||
* s2: Improve "best" compression https://github.com/klauspost/compress/pull/658
|
||||
* s2: Improve "better" compression. https://github.com/klauspost/compress/pull/635
|
||||
* s2: Slightly faster non-assembly decompression https://github.com/klauspost/compress/pull/646
|
||||
* Use arrays for constant size copies https://github.com/klauspost/compress/pull/659
|
||||
|
||||
* July 21, 2022 (v1.15.9)
|
||||
|
||||
* zstd: Fix decoder crash on amd64 (no BMI) on invalid input https://github.com/klauspost/compress/pull/645
|
||||
* zstd: Disable decoder extended memory copies (amd64) due to possible crashes https://github.com/klauspost/compress/pull/644
|
||||
* zstd: Allow single segments up to "max decoded size" by @klauspost in https://github.com/klauspost/compress/pull/643
|
||||
|
||||
* July 13, 2022 (v1.15.8)
|
||||
|
||||
* gzip: fix stack exhaustion bug in Reader.Read https://github.com/klauspost/compress/pull/641
|
||||
* s2: Add Index header trim/restore https://github.com/klauspost/compress/pull/638
|
||||
* zstd: Optimize seqdeq amd64 asm by @greatroar in https://github.com/klauspost/compress/pull/636
|
||||
* zstd: Improve decoder memcopy https://github.com/klauspost/compress/pull/637
|
||||
* huff0: Pass a single bitReader pointer to asm by @greatroar in https://github.com/klauspost/compress/pull/634
|
||||
* zstd: Branchless getBits for amd64 w/o BMI2 by @greatroar in https://github.com/klauspost/compress/pull/640
|
||||
* gzhttp: Remove header before writing https://github.com/klauspost/compress/pull/639
|
||||
|
||||
* June 29, 2022 (v1.15.7)
|
||||
|
||||
* s2: Fix absolute forward seeks https://github.com/klauspost/compress/pull/633
|
||||
* zip: Merge upstream https://github.com/klauspost/compress/pull/631
|
||||
* zip: Re-add zip64 fix https://github.com/klauspost/compress/pull/624
|
||||
* zstd: translate fseDecoder.buildDtable into asm by @WojciechMula in https://github.com/klauspost/compress/pull/598
|
||||
* flate: Faster histograms https://github.com/klauspost/compress/pull/620
|
||||
* deflate: Use compound hcode https://github.com/klauspost/compress/pull/622
|
||||
|
||||
* June 3, 2022 (v1.15.6)
|
||||
* s2: Improve coding for long, close matches https://github.com/klauspost/compress/pull/613
|
||||
* s2c: Add Snappy/S2 stream recompression https://github.com/klauspost/compress/pull/611
|
||||
@ -72,15 +115,15 @@ This package provides various compression algorithms.
|
||||
* gzhttp: Add zstd to transport by @klauspost in [#400](https://github.com/klauspost/compress/pull/400)
|
||||
* gzhttp: Make content-type optional by @klauspost in [#510](https://github.com/klauspost/compress/pull/510)
|
||||
|
||||
<details>
|
||||
<summary>See Details</summary>
|
||||
Both compression and decompression now supports "synchronous" stream operations. This means that whenever "concurrency" is set to 1, they will operate without spawning goroutines.
|
||||
|
||||
Stream decompression is now faster on asynchronous, since the goroutine allocation much more effectively splits the workload. On typical streams this will typically use 2 cores fully for decompression. When a stream has finished decoding no goroutines will be left over, so decoders can now safely be pooled and still be garbage collected.
|
||||
|
||||
While the release has been extensively tested, it is recommended to testing when upgrading.
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>See changes to v1.14.x</summary>
|
||||
|
||||
* Feb 22, 2022 (v1.14.4)
|
||||
* flate: Fix rare huffman only (-2) corruption. [#503](https://github.com/klauspost/compress/pull/503)
|
||||
* zip: Update deprecated CreateHeaderRaw to correctly call CreateRaw by @saracen in [#502](https://github.com/klauspost/compress/pull/502)
|
||||
@ -106,6 +149,7 @@ While the release has been extensively tested, it is recommended to testing when
|
||||
* zstd: Performance improvement in [#420]( https://github.com/klauspost/compress/pull/420) [#456](https://github.com/klauspost/compress/pull/456) [#437](https://github.com/klauspost/compress/pull/437) [#467](https://github.com/klauspost/compress/pull/467) [#468](https://github.com/klauspost/compress/pull/468)
|
||||
* zstd: add arm64 xxhash assembly in [#464](https://github.com/klauspost/compress/pull/464)
|
||||
* Add garbled for binaries for s2 in [#445](https://github.com/klauspost/compress/pull/445)
|
||||
</details>
|
||||
|
||||
<details>
|
||||
<summary>See changes to v1.13.x</summary>
|
||||
|
36
vendor/github.com/klauspost/compress/huff0/decompress.go
generated
vendored
36
vendor/github.com/klauspost/compress/huff0/decompress.go
generated
vendored
@ -763,17 +763,20 @@ func (d *Decoder) decompress4X8bit(dst, src []byte) ([]byte, error) {
|
||||
d.bufs.Put(buf)
|
||||
return nil, errors.New("corruption detected: stream overrun 1")
|
||||
}
|
||||
copy(out, buf[0][:])
|
||||
copy(out[dstEvery:], buf[1][:])
|
||||
copy(out[dstEvery*2:], buf[2][:])
|
||||
copy(out[dstEvery*3:], buf[3][:])
|
||||
out = out[bufoff:]
|
||||
decoded += bufoff * 4
|
||||
// There must at least be 3 buffers left.
|
||||
if len(out) < dstEvery*3 {
|
||||
if len(out)-bufoff < dstEvery*3 {
|
||||
d.bufs.Put(buf)
|
||||
return nil, errors.New("corruption detected: stream overrun 2")
|
||||
}
|
||||
//copy(out, buf[0][:])
|
||||
//copy(out[dstEvery:], buf[1][:])
|
||||
//copy(out[dstEvery*2:], buf[2][:])
|
||||
*(*[bufoff]byte)(out) = buf[0]
|
||||
*(*[bufoff]byte)(out[dstEvery:]) = buf[1]
|
||||
*(*[bufoff]byte)(out[dstEvery*2:]) = buf[2]
|
||||
*(*[bufoff]byte)(out[dstEvery*3:]) = buf[3]
|
||||
out = out[bufoff:]
|
||||
decoded += bufoff * 4
|
||||
}
|
||||
}
|
||||
if off > 0 {
|
||||
@ -997,17 +1000,22 @@ func (d *Decoder) decompress4X8bitExactly(dst, src []byte) ([]byte, error) {
|
||||
d.bufs.Put(buf)
|
||||
return nil, errors.New("corruption detected: stream overrun 1")
|
||||
}
|
||||
copy(out, buf[0][:])
|
||||
copy(out[dstEvery:], buf[1][:])
|
||||
copy(out[dstEvery*2:], buf[2][:])
|
||||
copy(out[dstEvery*3:], buf[3][:])
|
||||
out = out[bufoff:]
|
||||
decoded += bufoff * 4
|
||||
// There must at least be 3 buffers left.
|
||||
if len(out) < dstEvery*3 {
|
||||
if len(out)-bufoff < dstEvery*3 {
|
||||
d.bufs.Put(buf)
|
||||
return nil, errors.New("corruption detected: stream overrun 2")
|
||||
}
|
||||
|
||||
//copy(out, buf[0][:])
|
||||
//copy(out[dstEvery:], buf[1][:])
|
||||
//copy(out[dstEvery*2:], buf[2][:])
|
||||
// copy(out[dstEvery*3:], buf[3][:])
|
||||
*(*[bufoff]byte)(out) = buf[0]
|
||||
*(*[bufoff]byte)(out[dstEvery:]) = buf[1]
|
||||
*(*[bufoff]byte)(out[dstEvery*2:]) = buf[2]
|
||||
*(*[bufoff]byte)(out[dstEvery*3:]) = buf[3]
|
||||
out = out[bufoff:]
|
||||
decoded += bufoff * 4
|
||||
}
|
||||
}
|
||||
if off > 0 {
|
||||
|
14
vendor/github.com/klauspost/compress/huff0/decompress_amd64.go
generated
vendored
14
vendor/github.com/klauspost/compress/huff0/decompress_amd64.go
generated
vendored
@ -14,12 +14,14 @@ import (
|
||||
|
||||
// decompress4x_main_loop_x86 is an x86 assembler implementation
|
||||
// of Decompress4X when tablelog > 8.
|
||||
//
|
||||
//go:noescape
|
||||
func decompress4x_main_loop_amd64(ctx *decompress4xContext)
|
||||
|
||||
// decompress4x_8b_loop_x86 is an x86 assembler implementation
|
||||
// of Decompress4X when tablelog <= 8 which decodes 4 entries
|
||||
// per loop.
|
||||
//
|
||||
//go:noescape
|
||||
func decompress4x_8b_main_loop_amd64(ctx *decompress4xContext)
|
||||
|
||||
@ -27,10 +29,7 @@ func decompress4x_8b_main_loop_amd64(ctx *decompress4xContext)
|
||||
const fallback8BitSize = 800
|
||||
|
||||
type decompress4xContext struct {
|
||||
pbr0 *bitReaderShifted
|
||||
pbr1 *bitReaderShifted
|
||||
pbr2 *bitReaderShifted
|
||||
pbr3 *bitReaderShifted
|
||||
pbr *[4]bitReaderShifted
|
||||
peekBits uint8
|
||||
out *byte
|
||||
dstEvery int
|
||||
@ -89,10 +88,7 @@ func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) {
|
||||
|
||||
if len(out) > 4*4 && !(br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4) {
|
||||
ctx := decompress4xContext{
|
||||
pbr0: &br[0],
|
||||
pbr1: &br[1],
|
||||
pbr2: &br[2],
|
||||
pbr3: &br[3],
|
||||
pbr: &br,
|
||||
peekBits: uint8((64 - d.actualTableLog) & 63), // see: bitReaderShifted.peekBitsFast()
|
||||
out: &out[0],
|
||||
dstEvery: dstEvery,
|
||||
@ -151,11 +147,13 @@ func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) {
|
||||
|
||||
// decompress4x_main_loop_x86 is an x86 assembler implementation
|
||||
// of Decompress1X when tablelog > 8.
|
||||
//
|
||||
//go:noescape
|
||||
func decompress1x_main_loop_amd64(ctx *decompress1xContext)
|
||||
|
||||
// decompress4x_main_loop_x86 is an x86 with BMI2 assembler implementation
|
||||
// of Decompress1X when tablelog > 8.
|
||||
//
|
||||
//go:noescape
|
||||
func decompress1x_main_loop_bmi2(ctx *decompress1xContext)
|
||||
|
||||
|
667
vendor/github.com/klauspost/compress/huff0/decompress_amd64.s
generated
vendored
667
vendor/github.com/klauspost/compress/huff0/decompress_amd64.s
generated
vendored
@ -1,48 +1,42 @@
|
||||
// Code generated by command: go run gen.go -out ../decompress_amd64.s -pkg=huff0. DO NOT EDIT.
|
||||
|
||||
//go:build amd64 && !appengine && !noasm && gc
|
||||
// +build amd64,!appengine,!noasm,gc
|
||||
|
||||
// func decompress4x_main_loop_amd64(ctx *decompress4xContext)
|
||||
TEXT ·decompress4x_main_loop_amd64(SB), $8-8
|
||||
TEXT ·decompress4x_main_loop_amd64(SB), $0-8
|
||||
XORQ DX, DX
|
||||
|
||||
// Preload values
|
||||
MOVQ ctx+0(FP), AX
|
||||
MOVBQZX 32(AX), SI
|
||||
MOVQ 40(AX), DI
|
||||
MOVQ DI, BX
|
||||
MOVQ 72(AX), CX
|
||||
MOVQ CX, (SP)
|
||||
MOVQ 48(AX), R8
|
||||
MOVQ 56(AX), R9
|
||||
MOVQ (AX), R10
|
||||
MOVQ 8(AX), R11
|
||||
MOVQ 16(AX), R12
|
||||
MOVQ 24(AX), R13
|
||||
MOVBQZX 8(AX), DI
|
||||
MOVQ 16(AX), SI
|
||||
MOVQ 48(AX), BX
|
||||
MOVQ 24(AX), R9
|
||||
MOVQ 32(AX), R10
|
||||
MOVQ (AX), R11
|
||||
|
||||
// Main loop
|
||||
main_loop:
|
||||
MOVQ BX, DI
|
||||
CMPQ DI, (SP)
|
||||
MOVQ SI, R8
|
||||
CMPQ R8, BX
|
||||
SETGE DL
|
||||
|
||||
// br0.fillFast32()
|
||||
MOVQ 32(R10), R14
|
||||
MOVBQZX 40(R10), R15
|
||||
CMPQ R15, $0x20
|
||||
MOVQ 32(R11), R12
|
||||
MOVBQZX 40(R11), R13
|
||||
CMPQ R13, $0x20
|
||||
JBE skip_fill0
|
||||
MOVQ 24(R10), AX
|
||||
SUBQ $0x20, R15
|
||||
MOVQ 24(R11), AX
|
||||
SUBQ $0x20, R13
|
||||
SUBQ $0x04, AX
|
||||
MOVQ (R10), BP
|
||||
MOVQ (R11), R14
|
||||
|
||||
// b.value |= uint64(low) << (b.bitsRead & 63)
|
||||
MOVL (AX)(BP*1), BP
|
||||
MOVQ R15, CX
|
||||
SHLQ CL, BP
|
||||
MOVQ AX, 24(R10)
|
||||
ORQ BP, R14
|
||||
MOVL (AX)(R14*1), R14
|
||||
MOVQ R13, CX
|
||||
SHLQ CL, R14
|
||||
MOVQ AX, 24(R11)
|
||||
ORQ R14, R12
|
||||
|
||||
// exhausted = exhausted || (br0.off < 4)
|
||||
CMPQ AX, $0x04
|
||||
@ -51,57 +45,57 @@ main_loop:
|
||||
|
||||
skip_fill0:
|
||||
// val0 := br0.peekTopBits(peekBits)
|
||||
MOVQ R14, BP
|
||||
MOVQ SI, CX
|
||||
SHRQ CL, BP
|
||||
MOVQ R12, R14
|
||||
MOVQ DI, CX
|
||||
SHRQ CL, R14
|
||||
|
||||
// v0 := table[val0&mask]
|
||||
MOVW (R9)(BP*2), CX
|
||||
MOVW (R10)(R14*2), CX
|
||||
|
||||
// br0.advance(uint8(v0.entry)
|
||||
MOVB CH, AL
|
||||
SHLQ CL, R14
|
||||
ADDB CL, R15
|
||||
SHLQ CL, R12
|
||||
ADDB CL, R13
|
||||
|
||||
// val1 := br0.peekTopBits(peekBits)
|
||||
MOVQ SI, CX
|
||||
MOVQ R14, BP
|
||||
SHRQ CL, BP
|
||||
MOVQ DI, CX
|
||||
MOVQ R12, R14
|
||||
SHRQ CL, R14
|
||||
|
||||
// v1 := table[val1&mask]
|
||||
MOVW (R9)(BP*2), CX
|
||||
MOVW (R10)(R14*2), CX
|
||||
|
||||
// br0.advance(uint8(v1.entry))
|
||||
MOVB CH, AH
|
||||
SHLQ CL, R14
|
||||
ADDB CL, R15
|
||||
SHLQ CL, R12
|
||||
ADDB CL, R13
|
||||
|
||||
// these two writes get coalesced
|
||||
// out[id * dstEvery + 0] = uint8(v0.entry >> 8)
|
||||
// out[id * dstEvery + 1] = uint8(v1.entry >> 8)
|
||||
MOVW AX, (DI)
|
||||
MOVW AX, (R8)
|
||||
|
||||
// update the bitrader reader structure
|
||||
MOVQ R14, 32(R10)
|
||||
MOVB R15, 40(R10)
|
||||
ADDQ R8, DI
|
||||
// update the bitreader structure
|
||||
MOVQ R12, 32(R11)
|
||||
MOVB R13, 40(R11)
|
||||
ADDQ R9, R8
|
||||
|
||||
// br1.fillFast32()
|
||||
MOVQ 32(R11), R14
|
||||
MOVBQZX 40(R11), R15
|
||||
CMPQ R15, $0x20
|
||||
MOVQ 80(R11), R12
|
||||
MOVBQZX 88(R11), R13
|
||||
CMPQ R13, $0x20
|
||||
JBE skip_fill1
|
||||
MOVQ 24(R11), AX
|
||||
SUBQ $0x20, R15
|
||||
MOVQ 72(R11), AX
|
||||
SUBQ $0x20, R13
|
||||
SUBQ $0x04, AX
|
||||
MOVQ (R11), BP
|
||||
MOVQ 48(R11), R14
|
||||
|
||||
// b.value |= uint64(low) << (b.bitsRead & 63)
|
||||
MOVL (AX)(BP*1), BP
|
||||
MOVQ R15, CX
|
||||
SHLQ CL, BP
|
||||
MOVQ AX, 24(R11)
|
||||
ORQ BP, R14
|
||||
MOVL (AX)(R14*1), R14
|
||||
MOVQ R13, CX
|
||||
SHLQ CL, R14
|
||||
MOVQ AX, 72(R11)
|
||||
ORQ R14, R12
|
||||
|
||||
// exhausted = exhausted || (br1.off < 4)
|
||||
CMPQ AX, $0x04
|
||||
@ -110,57 +104,57 @@ skip_fill0:
|
||||
|
||||
skip_fill1:
|
||||
// val0 := br1.peekTopBits(peekBits)
|
||||
MOVQ R14, BP
|
||||
MOVQ SI, CX
|
||||
SHRQ CL, BP
|
||||
MOVQ R12, R14
|
||||
MOVQ DI, CX
|
||||
SHRQ CL, R14
|
||||
|
||||
// v0 := table[val0&mask]
|
||||
MOVW (R9)(BP*2), CX
|
||||
MOVW (R10)(R14*2), CX
|
||||
|
||||
// br1.advance(uint8(v0.entry)
|
||||
MOVB CH, AL
|
||||
SHLQ CL, R14
|
||||
ADDB CL, R15
|
||||
SHLQ CL, R12
|
||||
ADDB CL, R13
|
||||
|
||||
// val1 := br1.peekTopBits(peekBits)
|
||||
MOVQ SI, CX
|
||||
MOVQ R14, BP
|
||||
SHRQ CL, BP
|
||||
MOVQ DI, CX
|
||||
MOVQ R12, R14
|
||||
SHRQ CL, R14
|
||||
|
||||
// v1 := table[val1&mask]
|
||||
MOVW (R9)(BP*2), CX
|
||||
MOVW (R10)(R14*2), CX
|
||||
|
||||
// br1.advance(uint8(v1.entry))
|
||||
MOVB CH, AH
|
||||
SHLQ CL, R14
|
||||
ADDB CL, R15
|
||||
SHLQ CL, R12
|
||||
ADDB CL, R13
|
||||
|
||||
// these two writes get coalesced
|
||||
// out[id * dstEvery + 0] = uint8(v0.entry >> 8)
|
||||
// out[id * dstEvery + 1] = uint8(v1.entry >> 8)
|
||||
MOVW AX, (DI)
|
||||
MOVW AX, (R8)
|
||||
|
||||
// update the bitrader reader structure
|
||||
MOVQ R14, 32(R11)
|
||||
MOVB R15, 40(R11)
|
||||
ADDQ R8, DI
|
||||
// update the bitreader structure
|
||||
MOVQ R12, 80(R11)
|
||||
MOVB R13, 88(R11)
|
||||
ADDQ R9, R8
|
||||
|
||||
// br2.fillFast32()
|
||||
MOVQ 32(R12), R14
|
||||
MOVBQZX 40(R12), R15
|
||||
CMPQ R15, $0x20
|
||||
MOVQ 128(R11), R12
|
||||
MOVBQZX 136(R11), R13
|
||||
CMPQ R13, $0x20
|
||||
JBE skip_fill2
|
||||
MOVQ 24(R12), AX
|
||||
SUBQ $0x20, R15
|
||||
MOVQ 120(R11), AX
|
||||
SUBQ $0x20, R13
|
||||
SUBQ $0x04, AX
|
||||
MOVQ (R12), BP
|
||||
MOVQ 96(R11), R14
|
||||
|
||||
// b.value |= uint64(low) << (b.bitsRead & 63)
|
||||
MOVL (AX)(BP*1), BP
|
||||
MOVQ R15, CX
|
||||
SHLQ CL, BP
|
||||
MOVQ AX, 24(R12)
|
||||
ORQ BP, R14
|
||||
MOVL (AX)(R14*1), R14
|
||||
MOVQ R13, CX
|
||||
SHLQ CL, R14
|
||||
MOVQ AX, 120(R11)
|
||||
ORQ R14, R12
|
||||
|
||||
// exhausted = exhausted || (br2.off < 4)
|
||||
CMPQ AX, $0x04
|
||||
@ -169,57 +163,57 @@ skip_fill1:
|
||||
|
||||
skip_fill2:
|
||||
// val0 := br2.peekTopBits(peekBits)
|
||||
MOVQ R14, BP
|
||||
MOVQ SI, CX
|
||||
SHRQ CL, BP
|
||||
MOVQ R12, R14
|
||||
MOVQ DI, CX
|
||||
SHRQ CL, R14
|
||||
|
||||
// v0 := table[val0&mask]
|
||||
MOVW (R9)(BP*2), CX
|
||||
MOVW (R10)(R14*2), CX
|
||||
|
||||
// br2.advance(uint8(v0.entry)
|
||||
MOVB CH, AL
|
||||
SHLQ CL, R14
|
||||
ADDB CL, R15
|
||||
SHLQ CL, R12
|
||||
ADDB CL, R13
|
||||
|
||||
// val1 := br2.peekTopBits(peekBits)
|
||||
MOVQ SI, CX
|
||||
MOVQ R14, BP
|
||||
SHRQ CL, BP
|
||||
MOVQ DI, CX
|
||||
MOVQ R12, R14
|
||||
SHRQ CL, R14
|
||||
|
||||
// v1 := table[val1&mask]
|
||||
MOVW (R9)(BP*2), CX
|
||||
MOVW (R10)(R14*2), CX
|
||||
|
||||
// br2.advance(uint8(v1.entry))
|
||||
MOVB CH, AH
|
||||
SHLQ CL, R14
|
||||
ADDB CL, R15
|
||||
SHLQ CL, R12
|
||||
ADDB CL, R13
|
||||
|
||||
// these two writes get coalesced
|
||||
// out[id * dstEvery + 0] = uint8(v0.entry >> 8)
|
||||
// out[id * dstEvery + 1] = uint8(v1.entry >> 8)
|
||||
MOVW AX, (DI)
|
||||
MOVW AX, (R8)
|
||||
|
||||
// update the bitrader reader structure
|
||||
MOVQ R14, 32(R12)
|
||||
MOVB R15, 40(R12)
|
||||
ADDQ R8, DI
|
||||
// update the bitreader structure
|
||||
MOVQ R12, 128(R11)
|
||||
MOVB R13, 136(R11)
|
||||
ADDQ R9, R8
|
||||
|
||||
// br3.fillFast32()
|
||||
MOVQ 32(R13), R14
|
||||
MOVBQZX 40(R13), R15
|
||||
CMPQ R15, $0x20
|
||||
MOVQ 176(R11), R12
|
||||
MOVBQZX 184(R11), R13
|
||||
CMPQ R13, $0x20
|
||||
JBE skip_fill3
|
||||
MOVQ 24(R13), AX
|
||||
SUBQ $0x20, R15
|
||||
MOVQ 168(R11), AX
|
||||
SUBQ $0x20, R13
|
||||
SUBQ $0x04, AX
|
||||
MOVQ (R13), BP
|
||||
MOVQ 144(R11), R14
|
||||
|
||||
// b.value |= uint64(low) << (b.bitsRead & 63)
|
||||
MOVL (AX)(BP*1), BP
|
||||
MOVQ R15, CX
|
||||
SHLQ CL, BP
|
||||
MOVQ AX, 24(R13)
|
||||
ORQ BP, R14
|
||||
MOVL (AX)(R14*1), R14
|
||||
MOVQ R13, CX
|
||||
SHLQ CL, R14
|
||||
MOVQ AX, 168(R11)
|
||||
ORQ R14, R12
|
||||
|
||||
// exhausted = exhausted || (br3.off < 4)
|
||||
CMPQ AX, $0x04
|
||||
@ -228,149 +222,142 @@ skip_fill2:
|
||||
|
||||
skip_fill3:
|
||||
// val0 := br3.peekTopBits(peekBits)
|
||||
MOVQ R14, BP
|
||||
MOVQ SI, CX
|
||||
SHRQ CL, BP
|
||||
MOVQ R12, R14
|
||||
MOVQ DI, CX
|
||||
SHRQ CL, R14
|
||||
|
||||
// v0 := table[val0&mask]
|
||||
MOVW (R9)(BP*2), CX
|
||||
MOVW (R10)(R14*2), CX
|
||||
|
||||
// br3.advance(uint8(v0.entry)
|
||||
MOVB CH, AL
|
||||
SHLQ CL, R14
|
||||
ADDB CL, R15
|
||||
SHLQ CL, R12
|
||||
ADDB CL, R13
|
||||
|
||||
// val1 := br3.peekTopBits(peekBits)
|
||||
MOVQ SI, CX
|
||||
MOVQ R14, BP
|
||||
SHRQ CL, BP
|
||||
MOVQ DI, CX
|
||||
MOVQ R12, R14
|
||||
SHRQ CL, R14
|
||||
|
||||
// v1 := table[val1&mask]
|
||||
MOVW (R9)(BP*2), CX
|
||||
MOVW (R10)(R14*2), CX
|
||||
|
||||
// br3.advance(uint8(v1.entry))
|
||||
MOVB CH, AH
|
||||
SHLQ CL, R14
|
||||
ADDB CL, R15
|
||||
SHLQ CL, R12
|
||||
ADDB CL, R13
|
||||
|
||||
// these two writes get coalesced
|
||||
// out[id * dstEvery + 0] = uint8(v0.entry >> 8)
|
||||
// out[id * dstEvery + 1] = uint8(v1.entry >> 8)
|
||||
MOVW AX, (DI)
|
||||
MOVW AX, (R8)
|
||||
|
||||
// update the bitrader reader structure
|
||||
MOVQ R14, 32(R13)
|
||||
MOVB R15, 40(R13)
|
||||
ADDQ $0x02, BX
|
||||
// update the bitreader structure
|
||||
MOVQ R12, 176(R11)
|
||||
MOVB R13, 184(R11)
|
||||
ADDQ $0x02, SI
|
||||
TESTB DL, DL
|
||||
JZ main_loop
|
||||
MOVQ ctx+0(FP), AX
|
||||
MOVQ 40(AX), CX
|
||||
MOVQ BX, DX
|
||||
SUBQ CX, DX
|
||||
SHLQ $0x02, DX
|
||||
MOVQ DX, 64(AX)
|
||||
SUBQ 16(AX), SI
|
||||
SHLQ $0x02, SI
|
||||
MOVQ SI, 40(AX)
|
||||
RET
|
||||
|
||||
// func decompress4x_8b_main_loop_amd64(ctx *decompress4xContext)
|
||||
TEXT ·decompress4x_8b_main_loop_amd64(SB), $16-8
|
||||
TEXT ·decompress4x_8b_main_loop_amd64(SB), $0-8
|
||||
XORQ DX, DX
|
||||
|
||||
// Preload values
|
||||
MOVQ ctx+0(FP), CX
|
||||
MOVBQZX 32(CX), BX
|
||||
MOVQ 40(CX), SI
|
||||
MOVQ SI, (SP)
|
||||
MOVQ 72(CX), DX
|
||||
MOVQ DX, 8(SP)
|
||||
MOVQ 48(CX), DI
|
||||
MOVQ 56(CX), R8
|
||||
MOVQ (CX), R9
|
||||
MOVQ 8(CX), R10
|
||||
MOVQ 16(CX), R11
|
||||
MOVQ 24(CX), R12
|
||||
MOVBQZX 8(CX), DI
|
||||
MOVQ 16(CX), BX
|
||||
MOVQ 48(CX), SI
|
||||
MOVQ 24(CX), R9
|
||||
MOVQ 32(CX), R10
|
||||
MOVQ (CX), R11
|
||||
|
||||
// Main loop
|
||||
main_loop:
|
||||
MOVQ (SP), SI
|
||||
CMPQ SI, 8(SP)
|
||||
MOVQ BX, R8
|
||||
CMPQ R8, SI
|
||||
SETGE DL
|
||||
|
||||
// br1000.fillFast32()
|
||||
MOVQ 32(R9), R13
|
||||
MOVBQZX 40(R9), R14
|
||||
CMPQ R14, $0x20
|
||||
JBE skip_fill1000
|
||||
MOVQ 24(R9), R15
|
||||
SUBQ $0x20, R14
|
||||
SUBQ $0x04, R15
|
||||
MOVQ (R9), BP
|
||||
// br0.fillFast32()
|
||||
MOVQ 32(R11), R12
|
||||
MOVBQZX 40(R11), R13
|
||||
CMPQ R13, $0x20
|
||||
JBE skip_fill0
|
||||
MOVQ 24(R11), R14
|
||||
SUBQ $0x20, R13
|
||||
SUBQ $0x04, R14
|
||||
MOVQ (R11), R15
|
||||
|
||||
// b.value |= uint64(low) << (b.bitsRead & 63)
|
||||
MOVL (R15)(BP*1), BP
|
||||
MOVQ R14, CX
|
||||
SHLQ CL, BP
|
||||
MOVQ R15, 24(R9)
|
||||
ORQ BP, R13
|
||||
MOVL (R14)(R15*1), R15
|
||||
MOVQ R13, CX
|
||||
SHLQ CL, R15
|
||||
MOVQ R14, 24(R11)
|
||||
ORQ R15, R12
|
||||
|
||||
// exhausted = exhausted || (br1000.off < 4)
|
||||
CMPQ R15, $0x04
|
||||
// exhausted = exhausted || (br0.off < 4)
|
||||
CMPQ R14, $0x04
|
||||
SETLT AL
|
||||
ORB AL, DL
|
||||
|
||||
skip_fill1000:
|
||||
skip_fill0:
|
||||
// val0 := br0.peekTopBits(peekBits)
|
||||
MOVQ R13, R15
|
||||
MOVQ BX, CX
|
||||
SHRQ CL, R15
|
||||
MOVQ R12, R14
|
||||
MOVQ DI, CX
|
||||
SHRQ CL, R14
|
||||
|
||||
// v0 := table[val0&mask]
|
||||
MOVW (R8)(R15*2), CX
|
||||
MOVW (R10)(R14*2), CX
|
||||
|
||||
// br0.advance(uint8(v0.entry)
|
||||
MOVB CH, AL
|
||||
SHLQ CL, R13
|
||||
ADDB CL, R14
|
||||
SHLQ CL, R12
|
||||
ADDB CL, R13
|
||||
|
||||
// val1 := br0.peekTopBits(peekBits)
|
||||
MOVQ R13, R15
|
||||
MOVQ BX, CX
|
||||
SHRQ CL, R15
|
||||
MOVQ R12, R14
|
||||
MOVQ DI, CX
|
||||
SHRQ CL, R14
|
||||
|
||||
// v1 := table[val0&mask]
|
||||
MOVW (R8)(R15*2), CX
|
||||
MOVW (R10)(R14*2), CX
|
||||
|
||||
// br0.advance(uint8(v1.entry)
|
||||
MOVB CH, AH
|
||||
SHLQ CL, R13
|
||||
ADDB CL, R14
|
||||
SHLQ CL, R12
|
||||
ADDB CL, R13
|
||||
BSWAPL AX
|
||||
|
||||
// val2 := br0.peekTopBits(peekBits)
|
||||
MOVQ R13, R15
|
||||
MOVQ BX, CX
|
||||
SHRQ CL, R15
|
||||
MOVQ R12, R14
|
||||
MOVQ DI, CX
|
||||
SHRQ CL, R14
|
||||
|
||||
// v2 := table[val0&mask]
|
||||
MOVW (R8)(R15*2), CX
|
||||
MOVW (R10)(R14*2), CX
|
||||
|
||||
// br0.advance(uint8(v2.entry)
|
||||
MOVB CH, AH
|
||||
SHLQ CL, R13
|
||||
ADDB CL, R14
|
||||
SHLQ CL, R12
|
||||
ADDB CL, R13
|
||||
|
||||
// val3 := br0.peekTopBits(peekBits)
|
||||
MOVQ R13, R15
|
||||
MOVQ BX, CX
|
||||
SHRQ CL, R15
|
||||
MOVQ R12, R14
|
||||
MOVQ DI, CX
|
||||
SHRQ CL, R14
|
||||
|
||||
// v3 := table[val0&mask]
|
||||
MOVW (R8)(R15*2), CX
|
||||
MOVW (R10)(R14*2), CX
|
||||
|
||||
// br0.advance(uint8(v3.entry)
|
||||
MOVB CH, AL
|
||||
SHLQ CL, R13
|
||||
ADDB CL, R14
|
||||
SHLQ CL, R12
|
||||
ADDB CL, R13
|
||||
BSWAPL AX
|
||||
|
||||
// these four writes get coalesced
|
||||
@ -378,88 +365,88 @@ skip_fill1000:
|
||||
// out[id * dstEvery + 1] = uint8(v1.entry >> 8)
|
||||
// out[id * dstEvery + 3] = uint8(v2.entry >> 8)
|
||||
// out[id * dstEvery + 4] = uint8(v3.entry >> 8)
|
||||
MOVL AX, (SI)
|
||||
MOVL AX, (R8)
|
||||
|
||||
// update the bitreader reader structure
|
||||
MOVQ R13, 32(R9)
|
||||
MOVB R14, 40(R9)
|
||||
ADDQ DI, SI
|
||||
// update the bitreader structure
|
||||
MOVQ R12, 32(R11)
|
||||
MOVB R13, 40(R11)
|
||||
ADDQ R9, R8
|
||||
|
||||
// br1001.fillFast32()
|
||||
MOVQ 32(R10), R13
|
||||
MOVBQZX 40(R10), R14
|
||||
CMPQ R14, $0x20
|
||||
JBE skip_fill1001
|
||||
MOVQ 24(R10), R15
|
||||
SUBQ $0x20, R14
|
||||
SUBQ $0x04, R15
|
||||
MOVQ (R10), BP
|
||||
// br1.fillFast32()
|
||||
MOVQ 80(R11), R12
|
||||
MOVBQZX 88(R11), R13
|
||||
CMPQ R13, $0x20
|
||||
JBE skip_fill1
|
||||
MOVQ 72(R11), R14
|
||||
SUBQ $0x20, R13
|
||||
SUBQ $0x04, R14
|
||||
MOVQ 48(R11), R15
|
||||
|
||||
// b.value |= uint64(low) << (b.bitsRead & 63)
|
||||
MOVL (R15)(BP*1), BP
|
||||
MOVQ R14, CX
|
||||
SHLQ CL, BP
|
||||
MOVQ R15, 24(R10)
|
||||
ORQ BP, R13
|
||||
MOVL (R14)(R15*1), R15
|
||||
MOVQ R13, CX
|
||||
SHLQ CL, R15
|
||||
MOVQ R14, 72(R11)
|
||||
ORQ R15, R12
|
||||
|
||||
// exhausted = exhausted || (br1001.off < 4)
|
||||
CMPQ R15, $0x04
|
||||
// exhausted = exhausted || (br1.off < 4)
|
||||
CMPQ R14, $0x04
|
||||
SETLT AL
|
||||
ORB AL, DL
|
||||
|
||||
skip_fill1001:
|
||||
skip_fill1:
|
||||
// val0 := br1.peekTopBits(peekBits)
|
||||
MOVQ R13, R15
|
||||
MOVQ BX, CX
|
||||
SHRQ CL, R15
|
||||
MOVQ R12, R14
|
||||
MOVQ DI, CX
|
||||
SHRQ CL, R14
|
||||
|
||||
// v0 := table[val0&mask]
|
||||
MOVW (R8)(R15*2), CX
|
||||
MOVW (R10)(R14*2), CX
|
||||
|
||||
// br1.advance(uint8(v0.entry)
|
||||
MOVB CH, AL
|
||||
SHLQ CL, R13
|
||||
ADDB CL, R14
|
||||
SHLQ CL, R12
|
||||
ADDB CL, R13
|
||||
|
||||
// val1 := br1.peekTopBits(peekBits)
|
||||
MOVQ R13, R15
|
||||
MOVQ BX, CX
|
||||
SHRQ CL, R15
|
||||
MOVQ R12, R14
|
||||
MOVQ DI, CX
|
||||
SHRQ CL, R14
|
||||
|
||||
// v1 := table[val0&mask]
|
||||
MOVW (R8)(R15*2), CX
|
||||
MOVW (R10)(R14*2), CX
|
||||
|
||||
// br1.advance(uint8(v1.entry)
|
||||
MOVB CH, AH
|
||||
SHLQ CL, R13
|
||||
ADDB CL, R14
|
||||
SHLQ CL, R12
|
||||
ADDB CL, R13
|
||||
BSWAPL AX
|
||||
|
||||
// val2 := br1.peekTopBits(peekBits)
|
||||
MOVQ R13, R15
|
||||
MOVQ BX, CX
|
||||
SHRQ CL, R15
|
||||
MOVQ R12, R14
|
||||
MOVQ DI, CX
|
||||
SHRQ CL, R14
|
||||
|
||||
// v2 := table[val0&mask]
|
||||
MOVW (R8)(R15*2), CX
|
||||
MOVW (R10)(R14*2), CX
|
||||
|
||||
// br1.advance(uint8(v2.entry)
|
||||
MOVB CH, AH
|
||||
SHLQ CL, R13
|
||||
ADDB CL, R14
|
||||
SHLQ CL, R12
|
||||
ADDB CL, R13
|
||||
|
||||
// val3 := br1.peekTopBits(peekBits)
|
||||
MOVQ R13, R15
|
||||
MOVQ BX, CX
|
||||
SHRQ CL, R15
|
||||
MOVQ R12, R14
|
||||
MOVQ DI, CX
|
||||
SHRQ CL, R14
|
||||
|
||||
// v3 := table[val0&mask]
|
||||
MOVW (R8)(R15*2), CX
|
||||
MOVW (R10)(R14*2), CX
|
||||
|
||||
// br1.advance(uint8(v3.entry)
|
||||
MOVB CH, AL
|
||||
SHLQ CL, R13
|
||||
ADDB CL, R14
|
||||
SHLQ CL, R12
|
||||
ADDB CL, R13
|
||||
BSWAPL AX
|
||||
|
||||
// these four writes get coalesced
|
||||
@ -467,88 +454,88 @@ skip_fill1001:
|
||||
// out[id * dstEvery + 1] = uint8(v1.entry >> 8)
|
||||
// out[id * dstEvery + 3] = uint8(v2.entry >> 8)
|
||||
// out[id * dstEvery + 4] = uint8(v3.entry >> 8)
|
||||
MOVL AX, (SI)
|
||||
MOVL AX, (R8)
|
||||
|
||||
// update the bitreader reader structure
|
||||
MOVQ R13, 32(R10)
|
||||
MOVB R14, 40(R10)
|
||||
ADDQ DI, SI
|
||||
// update the bitreader structure
|
||||
MOVQ R12, 80(R11)
|
||||
MOVB R13, 88(R11)
|
||||
ADDQ R9, R8
|
||||
|
||||
// br1002.fillFast32()
|
||||
MOVQ 32(R11), R13
|
||||
MOVBQZX 40(R11), R14
|
||||
CMPQ R14, $0x20
|
||||
JBE skip_fill1002
|
||||
MOVQ 24(R11), R15
|
||||
SUBQ $0x20, R14
|
||||
SUBQ $0x04, R15
|
||||
MOVQ (R11), BP
|
||||
// br2.fillFast32()
|
||||
MOVQ 128(R11), R12
|
||||
MOVBQZX 136(R11), R13
|
||||
CMPQ R13, $0x20
|
||||
JBE skip_fill2
|
||||
MOVQ 120(R11), R14
|
||||
SUBQ $0x20, R13
|
||||
SUBQ $0x04, R14
|
||||
MOVQ 96(R11), R15
|
||||
|
||||
// b.value |= uint64(low) << (b.bitsRead & 63)
|
||||
MOVL (R15)(BP*1), BP
|
||||
MOVQ R14, CX
|
||||
SHLQ CL, BP
|
||||
MOVQ R15, 24(R11)
|
||||
ORQ BP, R13
|
||||
MOVL (R14)(R15*1), R15
|
||||
MOVQ R13, CX
|
||||
SHLQ CL, R15
|
||||
MOVQ R14, 120(R11)
|
||||
ORQ R15, R12
|
||||
|
||||
// exhausted = exhausted || (br1002.off < 4)
|
||||
CMPQ R15, $0x04
|
||||
// exhausted = exhausted || (br2.off < 4)
|
||||
CMPQ R14, $0x04
|
||||
SETLT AL
|
||||
ORB AL, DL
|
||||
|
||||
skip_fill1002:
|
||||
skip_fill2:
|
||||
// val0 := br2.peekTopBits(peekBits)
|
||||
MOVQ R13, R15
|
||||
MOVQ BX, CX
|
||||
SHRQ CL, R15
|
||||
MOVQ R12, R14
|
||||
MOVQ DI, CX
|
||||
SHRQ CL, R14
|
||||
|
||||
// v0 := table[val0&mask]
|
||||
MOVW (R8)(R15*2), CX
|
||||
MOVW (R10)(R14*2), CX
|
||||
|
||||
// br2.advance(uint8(v0.entry)
|
||||
MOVB CH, AL
|
||||
SHLQ CL, R13
|
||||
ADDB CL, R14
|
||||
SHLQ CL, R12
|
||||
ADDB CL, R13
|
||||
|
||||
// val1 := br2.peekTopBits(peekBits)
|
||||
MOVQ R13, R15
|
||||
MOVQ BX, CX
|
||||
SHRQ CL, R15
|
||||
MOVQ R12, R14
|
||||
MOVQ DI, CX
|
||||
SHRQ CL, R14
|
||||
|
||||
// v1 := table[val0&mask]
|
||||
MOVW (R8)(R15*2), CX
|
||||
MOVW (R10)(R14*2), CX
|
||||
|
||||
// br2.advance(uint8(v1.entry)
|
||||
MOVB CH, AH
|
||||
SHLQ CL, R13
|
||||
ADDB CL, R14
|
||||
SHLQ CL, R12
|
||||
ADDB CL, R13
|
||||
BSWAPL AX
|
||||
|
||||
// val2 := br2.peekTopBits(peekBits)
|
||||
MOVQ R13, R15
|
||||
MOVQ BX, CX
|
||||
SHRQ CL, R15
|
||||
MOVQ R12, R14
|
||||
MOVQ DI, CX
|
||||
SHRQ CL, R14
|
||||
|
||||
// v2 := table[val0&mask]
|
||||
MOVW (R8)(R15*2), CX
|
||||
MOVW (R10)(R14*2), CX
|
||||
|
||||
// br2.advance(uint8(v2.entry)
|
||||
MOVB CH, AH
|
||||
SHLQ CL, R13
|
||||
ADDB CL, R14
|
||||
SHLQ CL, R12
|
||||
ADDB CL, R13
|
||||
|
||||
// val3 := br2.peekTopBits(peekBits)
|
||||
MOVQ R13, R15
|
||||
MOVQ BX, CX
|
||||
SHRQ CL, R15
|
||||
MOVQ R12, R14
|
||||
MOVQ DI, CX
|
||||
SHRQ CL, R14
|
||||
|
||||
// v3 := table[val0&mask]
|
||||
MOVW (R8)(R15*2), CX
|
||||
MOVW (R10)(R14*2), CX
|
||||
|
||||
// br2.advance(uint8(v3.entry)
|
||||
MOVB CH, AL
|
||||
SHLQ CL, R13
|
||||
ADDB CL, R14
|
||||
SHLQ CL, R12
|
||||
ADDB CL, R13
|
||||
BSWAPL AX
|
||||
|
||||
// these four writes get coalesced
|
||||
@ -556,88 +543,88 @@ skip_fill1002:
|
||||
// out[id * dstEvery + 1] = uint8(v1.entry >> 8)
|
||||
// out[id * dstEvery + 3] = uint8(v2.entry >> 8)
|
||||
// out[id * dstEvery + 4] = uint8(v3.entry >> 8)
|
||||
MOVL AX, (SI)
|
||||
MOVL AX, (R8)
|
||||
|
||||
// update the bitreader reader structure
|
||||
MOVQ R13, 32(R11)
|
||||
MOVB R14, 40(R11)
|
||||
ADDQ DI, SI
|
||||
// update the bitreader structure
|
||||
MOVQ R12, 128(R11)
|
||||
MOVB R13, 136(R11)
|
||||
ADDQ R9, R8
|
||||
|
||||
// br1003.fillFast32()
|
||||
MOVQ 32(R12), R13
|
||||
MOVBQZX 40(R12), R14
|
||||
CMPQ R14, $0x20
|
||||
JBE skip_fill1003
|
||||
MOVQ 24(R12), R15
|
||||
SUBQ $0x20, R14
|
||||
SUBQ $0x04, R15
|
||||
MOVQ (R12), BP
|
||||
// br3.fillFast32()
|
||||
MOVQ 176(R11), R12
|
||||
MOVBQZX 184(R11), R13
|
||||
CMPQ R13, $0x20
|
||||
JBE skip_fill3
|
||||
MOVQ 168(R11), R14
|
||||
SUBQ $0x20, R13
|
||||
SUBQ $0x04, R14
|
||||
MOVQ 144(R11), R15
|
||||
|
||||
// b.value |= uint64(low) << (b.bitsRead & 63)
|
||||
MOVL (R15)(BP*1), BP
|
||||
MOVQ R14, CX
|
||||
SHLQ CL, BP
|
||||
MOVQ R15, 24(R12)
|
||||
ORQ BP, R13
|
||||
MOVL (R14)(R15*1), R15
|
||||
MOVQ R13, CX
|
||||
SHLQ CL, R15
|
||||
MOVQ R14, 168(R11)
|
||||
ORQ R15, R12
|
||||
|
||||
// exhausted = exhausted || (br1003.off < 4)
|
||||
CMPQ R15, $0x04
|
||||
// exhausted = exhausted || (br3.off < 4)
|
||||
CMPQ R14, $0x04
|
||||
SETLT AL
|
||||
ORB AL, DL
|
||||
|
||||
skip_fill1003:
|
||||
skip_fill3:
|
||||
// val0 := br3.peekTopBits(peekBits)
|
||||
MOVQ R13, R15
|
||||
MOVQ BX, CX
|
||||
SHRQ CL, R15
|
||||
MOVQ R12, R14
|
||||
MOVQ DI, CX
|
||||
SHRQ CL, R14
|
||||
|
||||
// v0 := table[val0&mask]
|
||||
MOVW (R8)(R15*2), CX
|
||||
MOVW (R10)(R14*2), CX
|
||||
|
||||
// br3.advance(uint8(v0.entry)
|
||||
MOVB CH, AL
|
||||
SHLQ CL, R13
|
||||
ADDB CL, R14
|
||||
SHLQ CL, R12
|
||||
ADDB CL, R13
|
||||
|
||||
// val1 := br3.peekTopBits(peekBits)
|
||||
MOVQ R13, R15
|
||||
MOVQ BX, CX
|
||||
SHRQ CL, R15
|
||||
MOVQ R12, R14
|
||||
MOVQ DI, CX
|
||||
SHRQ CL, R14
|
||||
|
||||
// v1 := table[val0&mask]
|
||||
MOVW (R8)(R15*2), CX
|
||||
MOVW (R10)(R14*2), CX
|
||||
|
||||
// br3.advance(uint8(v1.entry)
|
||||
MOVB CH, AH
|
||||
SHLQ CL, R13
|
||||
ADDB CL, R14
|
||||
SHLQ CL, R12
|
||||
ADDB CL, R13
|
||||
BSWAPL AX
|
||||
|
||||
// val2 := br3.peekTopBits(peekBits)
|
||||
MOVQ R13, R15
|
||||
MOVQ BX, CX
|
||||
SHRQ CL, R15
|
||||
MOVQ R12, R14
|
||||
MOVQ DI, CX
|
||||
SHRQ CL, R14
|
||||
|
||||
// v2 := table[val0&mask]
|
||||
MOVW (R8)(R15*2), CX
|
||||
MOVW (R10)(R14*2), CX
|
||||
|
||||
// br3.advance(uint8(v2.entry)
|
||||
MOVB CH, AH
|
||||
SHLQ CL, R13
|
||||
ADDB CL, R14
|
||||
SHLQ CL, R12
|
||||
ADDB CL, R13
|
||||
|
||||
// val3 := br3.peekTopBits(peekBits)
|
||||
MOVQ R13, R15
|
||||
MOVQ BX, CX
|
||||
SHRQ CL, R15
|
||||
MOVQ R12, R14
|
||||
MOVQ DI, CX
|
||||
SHRQ CL, R14
|
||||
|
||||
// v3 := table[val0&mask]
|
||||
MOVW (R8)(R15*2), CX
|
||||
MOVW (R10)(R14*2), CX
|
||||
|
||||
// br3.advance(uint8(v3.entry)
|
||||
MOVB CH, AL
|
||||
SHLQ CL, R13
|
||||
ADDB CL, R14
|
||||
SHLQ CL, R12
|
||||
ADDB CL, R13
|
||||
BSWAPL AX
|
||||
|
||||
// these four writes get coalesced
|
||||
@ -645,20 +632,18 @@ skip_fill1003:
|
||||
// out[id * dstEvery + 1] = uint8(v1.entry >> 8)
|
||||
// out[id * dstEvery + 3] = uint8(v2.entry >> 8)
|
||||
// out[id * dstEvery + 4] = uint8(v3.entry >> 8)
|
||||
MOVL AX, (SI)
|
||||
MOVL AX, (R8)
|
||||
|
||||
// update the bitreader reader structure
|
||||
MOVQ R13, 32(R12)
|
||||
MOVB R14, 40(R12)
|
||||
ADDQ $0x04, (SP)
|
||||
// update the bitreader structure
|
||||
MOVQ R12, 176(R11)
|
||||
MOVB R13, 184(R11)
|
||||
ADDQ $0x04, BX
|
||||
TESTB DL, DL
|
||||
JZ main_loop
|
||||
MOVQ ctx+0(FP), AX
|
||||
MOVQ 40(AX), CX
|
||||
MOVQ (SP), DX
|
||||
SUBQ CX, DX
|
||||
SHLQ $0x02, DX
|
||||
MOVQ DX, 64(AX)
|
||||
SUBQ 16(AX), BX
|
||||
SHLQ $0x02, BX
|
||||
MOVQ BX, 40(AX)
|
||||
RET
|
||||
|
||||
// func decompress1x_main_loop_amd64(ctx *decompress1xContext)
|
||||
@ -750,10 +735,8 @@ loop_condition:
|
||||
|
||||
// Update ctx structure
|
||||
MOVQ ctx+0(FP), AX
|
||||
MOVQ DX, CX
|
||||
MOVQ 16(AX), DX
|
||||
SUBQ DX, CX
|
||||
MOVQ CX, 40(AX)
|
||||
SUBQ 16(AX), DX
|
||||
MOVQ DX, 40(AX)
|
||||
MOVQ (AX), AX
|
||||
MOVQ R9, 24(AX)
|
||||
MOVQ R10, 32(AX)
|
||||
@ -847,10 +830,8 @@ loop_condition:
|
||||
|
||||
// Update ctx structure
|
||||
MOVQ ctx+0(FP), AX
|
||||
MOVQ DX, CX
|
||||
MOVQ 16(AX), DX
|
||||
SUBQ DX, CX
|
||||
MOVQ CX, 40(AX)
|
||||
SUBQ 16(AX), DX
|
||||
MOVQ DX, 40(AX)
|
||||
MOVQ (AX), AX
|
||||
MOVQ R9, 24(AX)
|
||||
MOVQ R10, 32(AX)
|
||||
|
18
vendor/github.com/klauspost/compress/huff0/decompress_generic.go
generated
vendored
18
vendor/github.com/klauspost/compress/huff0/decompress_generic.go
generated
vendored
@ -122,17 +122,21 @@ func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) {
|
||||
d.bufs.Put(buf)
|
||||
return nil, errors.New("corruption detected: stream overrun 1")
|
||||
}
|
||||
copy(out, buf[0][:])
|
||||
copy(out[dstEvery:], buf[1][:])
|
||||
copy(out[dstEvery*2:], buf[2][:])
|
||||
copy(out[dstEvery*3:], buf[3][:])
|
||||
out = out[bufoff:]
|
||||
decoded += bufoff * 4
|
||||
// There must at least be 3 buffers left.
|
||||
if len(out) < dstEvery*3 {
|
||||
if len(out)-bufoff < dstEvery*3 {
|
||||
d.bufs.Put(buf)
|
||||
return nil, errors.New("corruption detected: stream overrun 2")
|
||||
}
|
||||
//copy(out, buf[0][:])
|
||||
//copy(out[dstEvery:], buf[1][:])
|
||||
//copy(out[dstEvery*2:], buf[2][:])
|
||||
//copy(out[dstEvery*3:], buf[3][:])
|
||||
*(*[bufoff]byte)(out) = buf[0]
|
||||
*(*[bufoff]byte)(out[dstEvery:]) = buf[1]
|
||||
*(*[bufoff]byte)(out[dstEvery*2:]) = buf[2]
|
||||
*(*[bufoff]byte)(out[dstEvery*3:]) = buf[3]
|
||||
out = out[bufoff:]
|
||||
decoded += bufoff * 4
|
||||
}
|
||||
}
|
||||
if off > 0 {
|
||||
|
6
vendor/github.com/klauspost/compress/internal/snapref/encode_other.go
generated
vendored
6
vendor/github.com/klauspost/compress/internal/snapref/encode_other.go
generated
vendored
@ -18,6 +18,7 @@ func load64(b []byte, i int) uint64 {
|
||||
// emitLiteral writes a literal chunk and returns the number of bytes written.
|
||||
//
|
||||
// It assumes that:
|
||||
//
|
||||
// dst is long enough to hold the encoded bytes
|
||||
// 1 <= len(lit) && len(lit) <= 65536
|
||||
func emitLiteral(dst, lit []byte) int {
|
||||
@ -42,6 +43,7 @@ func emitLiteral(dst, lit []byte) int {
|
||||
// emitCopy writes a copy chunk and returns the number of bytes written.
|
||||
//
|
||||
// It assumes that:
|
||||
//
|
||||
// dst is long enough to hold the encoded bytes
|
||||
// 1 <= offset && offset <= 65535
|
||||
// 4 <= length && length <= 65535
|
||||
@ -89,6 +91,7 @@ func emitCopy(dst []byte, offset, length int) int {
|
||||
// src[i:i+k-j] and src[j:k] have the same contents.
|
||||
//
|
||||
// It assumes that:
|
||||
//
|
||||
// 0 <= i && i < j && j <= len(src)
|
||||
func extendMatch(src []byte, i, j int) int {
|
||||
for ; j < len(src) && src[i] == src[j]; i, j = i+1, j+1 {
|
||||
@ -105,8 +108,9 @@ func hash(u, shift uint32) uint32 {
|
||||
// been written.
|
||||
//
|
||||
// It also assumes that:
|
||||
//
|
||||
// len(dst) >= MaxEncodedLen(len(src)) &&
|
||||
// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
|
||||
// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize
|
||||
func encodeBlock(dst, src []byte) (d int) {
|
||||
// Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive.
|
||||
// The table element type is uint16, as s < sLimit and sLimit < len(src)
|
||||
|
2
vendor/github.com/klauspost/compress/zstd/README.md
generated
vendored
2
vendor/github.com/klauspost/compress/zstd/README.md
generated
vendored
@ -12,6 +12,8 @@ The `zstd` package is provided as open source software using a Go standard licen
|
||||
|
||||
Currently the package is heavily optimized for 64 bit processors and will be significantly slower on 32 bit processors.
|
||||
|
||||
For seekable zstd streams, see [this excellent package](https://github.com/SaveTheRbtz/zstd-seekable-format-go).
|
||||
|
||||
## Installation
|
||||
|
||||
Install using `go get -u github.com/klauspost/compress`. The package is located in `github.com/klauspost/compress/zstd`.
|
||||
|
5
vendor/github.com/klauspost/compress/zstd/blockdec.go
generated
vendored
5
vendor/github.com/klauspost/compress/zstd/blockdec.go
generated
vendored
@ -10,7 +10,6 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
@ -233,7 +232,7 @@ func (b *blockDec) decodeBuf(hist *history) error {
|
||||
if b.lowMem {
|
||||
b.dst = make([]byte, b.RLESize)
|
||||
} else {
|
||||
b.dst = make([]byte, maxBlockSize)
|
||||
b.dst = make([]byte, maxCompressedBlockSize)
|
||||
}
|
||||
}
|
||||
b.dst = b.dst[:b.RLESize]
|
||||
@ -651,7 +650,7 @@ func (b *blockDec) prepareSequences(in []byte, hist *history) (err error) {
|
||||
fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.matchLengths.fse))
|
||||
fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.offsets.fse))
|
||||
buf.Write(in)
|
||||
ioutil.WriteFile(filepath.Join("testdata", "seqs", fn), buf.Bytes(), os.ModePerm)
|
||||
os.WriteFile(filepath.Join("testdata", "seqs", fn), buf.Bytes(), os.ModePerm)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
16
vendor/github.com/klauspost/compress/zstd/bytebuf.go
generated
vendored
16
vendor/github.com/klauspost/compress/zstd/bytebuf.go
generated
vendored
@ -7,7 +7,6 @@ package zstd
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
)
|
||||
|
||||
type byteBuffer interface {
|
||||
@ -23,7 +22,7 @@ type byteBuffer interface {
|
||||
readByte() (byte, error)
|
||||
|
||||
// Skip n bytes.
|
||||
skipN(n int) error
|
||||
skipN(n int64) error
|
||||
}
|
||||
|
||||
// in-memory buffer
|
||||
@ -62,9 +61,12 @@ func (b *byteBuf) readByte() (byte, error) {
|
||||
return r, nil
|
||||
}
|
||||
|
||||
func (b *byteBuf) skipN(n int) error {
|
||||
func (b *byteBuf) skipN(n int64) error {
|
||||
bb := *b
|
||||
if len(bb) < n {
|
||||
if n < 0 {
|
||||
return fmt.Errorf("negative skip (%d) requested", n)
|
||||
}
|
||||
if int64(len(bb)) < n {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
*b = bb[n:]
|
||||
@ -120,9 +122,9 @@ func (r *readerWrapper) readByte() (byte, error) {
|
||||
return r.tmp[0], nil
|
||||
}
|
||||
|
||||
func (r *readerWrapper) skipN(n int) error {
|
||||
n2, err := io.CopyN(ioutil.Discard, r.r, int64(n))
|
||||
if n2 != int64(n) {
|
||||
func (r *readerWrapper) skipN(n int64) error {
|
||||
n2, err := io.CopyN(io.Discard, r.r, n)
|
||||
if n2 != n {
|
||||
err = io.ErrUnexpectedEOF
|
||||
}
|
||||
return err
|
||||
|
47
vendor/github.com/klauspost/compress/zstd/decoder.go
generated
vendored
47
vendor/github.com/klauspost/compress/zstd/decoder.go
generated
vendored
@ -35,6 +35,7 @@ type Decoder struct {
|
||||
br readerWrapper
|
||||
enabled bool
|
||||
inFrame bool
|
||||
dstBuf []byte
|
||||
}
|
||||
|
||||
frame *frameDec
|
||||
@ -187,21 +188,23 @@ func (d *Decoder) Reset(r io.Reader) error {
|
||||
}
|
||||
|
||||
// If bytes buffer and < 5MB, do sync decoding anyway.
|
||||
if bb, ok := r.(byter); ok && bb.Len() < 5<<20 {
|
||||
if bb, ok := r.(byter); ok && bb.Len() < d.o.decodeBufsBelow && !d.o.limitToCap {
|
||||
bb2 := bb
|
||||
if debugDecoder {
|
||||
println("*bytes.Buffer detected, doing sync decode, len:", bb.Len())
|
||||
}
|
||||
b := bb2.Bytes()
|
||||
var dst []byte
|
||||
if cap(d.current.b) > 0 {
|
||||
dst = d.current.b
|
||||
if cap(d.syncStream.dstBuf) > 0 {
|
||||
dst = d.syncStream.dstBuf[:0]
|
||||
}
|
||||
|
||||
dst, err := d.DecodeAll(b, dst[:0])
|
||||
dst, err := d.DecodeAll(b, dst)
|
||||
if err == nil {
|
||||
err = io.EOF
|
||||
}
|
||||
// Save output buffer
|
||||
d.syncStream.dstBuf = dst
|
||||
d.current.b = dst
|
||||
d.current.err = err
|
||||
d.current.flushed = true
|
||||
@ -216,6 +219,7 @@ func (d *Decoder) Reset(r io.Reader) error {
|
||||
d.current.err = nil
|
||||
d.current.flushed = false
|
||||
d.current.d = nil
|
||||
d.syncStream.dstBuf = nil
|
||||
|
||||
// Ensure no-one else is still running...
|
||||
d.streamWg.Wait()
|
||||
@ -312,6 +316,7 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) {
|
||||
// Grab a block decoder and frame decoder.
|
||||
block := <-d.decoders
|
||||
frame := block.localFrame
|
||||
initialSize := len(dst)
|
||||
defer func() {
|
||||
if debugDecoder {
|
||||
printf("re-adding decoder: %p", block)
|
||||
@ -348,10 +353,22 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) {
|
||||
frame.history.setDict(&dict)
|
||||
}
|
||||
if frame.WindowSize > d.o.maxWindowSize {
|
||||
if debugDecoder {
|
||||
println("window size exceeded:", frame.WindowSize, ">", d.o.maxWindowSize)
|
||||
}
|
||||
return dst, ErrWindowSizeExceeded
|
||||
}
|
||||
if frame.FrameContentSize != fcsUnknown {
|
||||
if frame.FrameContentSize > d.o.maxDecodedSize-uint64(len(dst)) {
|
||||
if frame.FrameContentSize > d.o.maxDecodedSize-uint64(len(dst)-initialSize) {
|
||||
if debugDecoder {
|
||||
println("decoder size exceeded; fcs:", frame.FrameContentSize, "> mcs:", d.o.maxDecodedSize-uint64(len(dst)-initialSize), "len:", len(dst))
|
||||
}
|
||||
return dst, ErrDecoderSizeExceeded
|
||||
}
|
||||
if d.o.limitToCap && frame.FrameContentSize > uint64(cap(dst)-len(dst)) {
|
||||
if debugDecoder {
|
||||
println("decoder size exceeded; fcs:", frame.FrameContentSize, "> (cap-len)", cap(dst)-len(dst))
|
||||
}
|
||||
return dst, ErrDecoderSizeExceeded
|
||||
}
|
||||
if cap(dst)-len(dst) < int(frame.FrameContentSize) {
|
||||
@ -361,7 +378,7 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) {
|
||||
}
|
||||
}
|
||||
|
||||
if cap(dst) == 0 {
|
||||
if cap(dst) == 0 && !d.o.limitToCap {
|
||||
// Allocate len(input) * 2 by default if nothing is provided
|
||||
// and we didn't get frame content size.
|
||||
size := len(input) * 2
|
||||
@ -379,6 +396,9 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) {
|
||||
if err != nil {
|
||||
return dst, err
|
||||
}
|
||||
if uint64(len(dst)-initialSize) > d.o.maxDecodedSize {
|
||||
return dst, ErrDecoderSizeExceeded
|
||||
}
|
||||
if len(frame.bBuf) == 0 {
|
||||
if debugDecoder {
|
||||
println("frame dbuf empty")
|
||||
@ -664,6 +684,7 @@ func (d *Decoder) startStreamDecoder(ctx context.Context, r io.Reader, output ch
|
||||
if debugDecoder {
|
||||
println("Async 1: new history, recent:", block.async.newHist.recentOffsets)
|
||||
}
|
||||
hist.reset()
|
||||
hist.decoders = block.async.newHist.decoders
|
||||
hist.recentOffsets = block.async.newHist.recentOffsets
|
||||
hist.windowSize = block.async.newHist.windowSize
|
||||
@ -695,6 +716,7 @@ func (d *Decoder) startStreamDecoder(ctx context.Context, r io.Reader, output ch
|
||||
seqExecute <- block
|
||||
}
|
||||
close(seqExecute)
|
||||
hist.reset()
|
||||
}()
|
||||
|
||||
var wg sync.WaitGroup
|
||||
@ -718,6 +740,7 @@ func (d *Decoder) startStreamDecoder(ctx context.Context, r io.Reader, output ch
|
||||
if debugDecoder {
|
||||
println("Async 2: new history")
|
||||
}
|
||||
hist.reset()
|
||||
hist.windowSize = block.async.newHist.windowSize
|
||||
hist.allocFrameBuffer = block.async.newHist.allocFrameBuffer
|
||||
if block.async.newHist.dict != nil {
|
||||
@ -747,7 +770,7 @@ func (d *Decoder) startStreamDecoder(ctx context.Context, r io.Reader, output ch
|
||||
if block.lowMem {
|
||||
block.dst = make([]byte, block.RLESize)
|
||||
} else {
|
||||
block.dst = make([]byte, maxBlockSize)
|
||||
block.dst = make([]byte, maxCompressedBlockSize)
|
||||
}
|
||||
}
|
||||
block.dst = block.dst[:block.RLESize]
|
||||
@ -799,13 +822,14 @@ func (d *Decoder) startStreamDecoder(ctx context.Context, r io.Reader, output ch
|
||||
if debugDecoder {
|
||||
println("decoder goroutines finished")
|
||||
}
|
||||
hist.reset()
|
||||
}()
|
||||
|
||||
var hist history
|
||||
decodeStream:
|
||||
for {
|
||||
var hist history
|
||||
var hasErr bool
|
||||
|
||||
hist.reset()
|
||||
decodeBlock := func(block *blockDec) {
|
||||
if hasErr {
|
||||
if block != nil {
|
||||
@ -849,6 +873,10 @@ decodeStream:
|
||||
}
|
||||
}
|
||||
if err == nil && d.frame.WindowSize > d.o.maxWindowSize {
|
||||
if debugDecoder {
|
||||
println("decoder size exceeded, fws:", d.frame.WindowSize, "> mws:", d.o.maxWindowSize)
|
||||
}
|
||||
|
||||
err = ErrDecoderSizeExceeded
|
||||
}
|
||||
if err != nil {
|
||||
@ -917,5 +945,6 @@ decodeStream:
|
||||
}
|
||||
close(seqDecode)
|
||||
wg.Wait()
|
||||
hist.reset()
|
||||
d.frame.history.b = frameHistCache
|
||||
}
|
||||
|
44
vendor/github.com/klauspost/compress/zstd/decoder_options.go
generated
vendored
44
vendor/github.com/klauspost/compress/zstd/decoder_options.go
generated
vendored
@ -14,20 +14,23 @@ type DOption func(*decoderOptions) error
|
||||
|
||||
// options retains accumulated state of multiple options.
|
||||
type decoderOptions struct {
|
||||
lowMem bool
|
||||
concurrent int
|
||||
maxDecodedSize uint64
|
||||
maxWindowSize uint64
|
||||
dicts []dict
|
||||
ignoreChecksum bool
|
||||
lowMem bool
|
||||
concurrent int
|
||||
maxDecodedSize uint64
|
||||
maxWindowSize uint64
|
||||
dicts []dict
|
||||
ignoreChecksum bool
|
||||
limitToCap bool
|
||||
decodeBufsBelow int
|
||||
}
|
||||
|
||||
func (o *decoderOptions) setDefault() {
|
||||
*o = decoderOptions{
|
||||
// use less ram: true for now, but may change.
|
||||
lowMem: true,
|
||||
concurrent: runtime.GOMAXPROCS(0),
|
||||
maxWindowSize: MaxWindowSize,
|
||||
lowMem: true,
|
||||
concurrent: runtime.GOMAXPROCS(0),
|
||||
maxWindowSize: MaxWindowSize,
|
||||
decodeBufsBelow: 128 << 10,
|
||||
}
|
||||
if o.concurrent > 4 {
|
||||
o.concurrent = 4
|
||||
@ -114,6 +117,29 @@ func WithDecoderMaxWindow(size uint64) DOption {
|
||||
}
|
||||
}
|
||||
|
||||
// WithDecodeAllCapLimit will limit DecodeAll to decoding cap(dst)-len(dst) bytes,
|
||||
// or any size set in WithDecoderMaxMemory.
|
||||
// This can be used to limit decoding to a specific maximum output size.
|
||||
// Disabled by default.
|
||||
func WithDecodeAllCapLimit(b bool) DOption {
|
||||
return func(o *decoderOptions) error {
|
||||
o.limitToCap = b
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithDecodeBuffersBelow will fully decode readers that have a
|
||||
// `Bytes() []byte` and `Len() int` interface similar to bytes.Buffer.
|
||||
// This typically uses less allocations but will have the full decompressed object in memory.
|
||||
// Note that DecodeAllCapLimit will disable this, as well as giving a size of 0 or less.
|
||||
// Default is 128KiB.
|
||||
func WithDecodeBuffersBelow(size int) DOption {
|
||||
return func(o *decoderOptions) error {
|
||||
o.decodeBufsBelow = size
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// IgnoreChecksum allows to forcibly ignore checksum checking.
|
||||
func IgnoreChecksum(b bool) DOption {
|
||||
return func(o *decoderOptions) error {
|
||||
|
1
vendor/github.com/klauspost/compress/zstd/enc_best.go
generated
vendored
1
vendor/github.com/klauspost/compress/zstd/enc_best.go
generated
vendored
@ -32,6 +32,7 @@ type match struct {
|
||||
length int32
|
||||
rep int32
|
||||
est int32
|
||||
_ [12]byte // Aligned size to cache line: 4+4+4+4+4 bytes + 12 bytes padding = 32 bytes
|
||||
}
|
||||
|
||||
const highScore = 25000
|
||||
|
23
vendor/github.com/klauspost/compress/zstd/enc_better.go
generated
vendored
23
vendor/github.com/klauspost/compress/zstd/enc_better.go
generated
vendored
@ -416,15 +416,23 @@ encodeLoop:
|
||||
|
||||
// Try to find a better match by searching for a long match at the end of the current best match
|
||||
if s+matched < sLimit {
|
||||
// Allow some bytes at the beginning to mismatch.
|
||||
// Sweet spot is around 3 bytes, but depends on input.
|
||||
// The skipped bytes are tested in Extend backwards,
|
||||
// and still picked up as part of the match if they do.
|
||||
const skipBeginning = 3
|
||||
|
||||
nextHashL := hashLen(load6432(src, s+matched), betterLongTableBits, betterLongLen)
|
||||
cv := load3232(src, s)
|
||||
s2 := s + skipBeginning
|
||||
cv := load3232(src, s2)
|
||||
candidateL := e.longTable[nextHashL]
|
||||
coffsetL := candidateL.offset - e.cur - matched
|
||||
if coffsetL >= 0 && coffsetL < s && s-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) {
|
||||
coffsetL := candidateL.offset - e.cur - matched + skipBeginning
|
||||
if coffsetL >= 0 && coffsetL < s2 && s2-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) {
|
||||
// Found a long match, at least 4 bytes.
|
||||
matchedNext := e.matchlen(s+4, coffsetL+4, src) + 4
|
||||
matchedNext := e.matchlen(s2+4, coffsetL+4, src) + 4
|
||||
if matchedNext > matched {
|
||||
t = coffsetL
|
||||
s = s2
|
||||
matched = matchedNext
|
||||
if debugMatches {
|
||||
println("long match at end-of-match")
|
||||
@ -434,12 +442,13 @@ encodeLoop:
|
||||
|
||||
// Check prev long...
|
||||
if true {
|
||||
coffsetL = candidateL.prev - e.cur - matched
|
||||
if coffsetL >= 0 && coffsetL < s && s-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) {
|
||||
coffsetL = candidateL.prev - e.cur - matched + skipBeginning
|
||||
if coffsetL >= 0 && coffsetL < s2 && s2-coffsetL < e.maxMatchOff && cv == load3232(src, coffsetL) {
|
||||
// Found a long match, at least 4 bytes.
|
||||
matchedNext := e.matchlen(s+4, coffsetL+4, src) + 4
|
||||
matchedNext := e.matchlen(s2+4, coffsetL+4, src) + 4
|
||||
if matchedNext > matched {
|
||||
t = coffsetL
|
||||
s = s2
|
||||
matched = matchedNext
|
||||
if debugMatches {
|
||||
println("prev long match at end-of-match")
|
||||
|
7
vendor/github.com/klauspost/compress/zstd/enc_dfast.go
generated
vendored
7
vendor/github.com/klauspost/compress/zstd/enc_dfast.go
generated
vendored
@ -1103,7 +1103,8 @@ func (e *doubleFastEncoderDict) Reset(d *dict, singleBlock bool) {
|
||||
}
|
||||
|
||||
if allDirty || dirtyShardCnt > dLongTableShardCnt/2 {
|
||||
copy(e.longTable[:], e.dictLongTable)
|
||||
//copy(e.longTable[:], e.dictLongTable)
|
||||
e.longTable = *(*[dFastLongTableSize]tableEntry)(e.dictLongTable)
|
||||
for i := range e.longTableShardDirty {
|
||||
e.longTableShardDirty[i] = false
|
||||
}
|
||||
@ -1114,7 +1115,9 @@ func (e *doubleFastEncoderDict) Reset(d *dict, singleBlock bool) {
|
||||
continue
|
||||
}
|
||||
|
||||
copy(e.longTable[i*dLongTableShardSize:(i+1)*dLongTableShardSize], e.dictLongTable[i*dLongTableShardSize:(i+1)*dLongTableShardSize])
|
||||
// copy(e.longTable[i*dLongTableShardSize:(i+1)*dLongTableShardSize], e.dictLongTable[i*dLongTableShardSize:(i+1)*dLongTableShardSize])
|
||||
*(*[dLongTableShardSize]tableEntry)(e.longTable[i*dLongTableShardSize:]) = *(*[dLongTableShardSize]tableEntry)(e.dictLongTable[i*dLongTableShardSize:])
|
||||
|
||||
e.longTableShardDirty[i] = false
|
||||
}
|
||||
}
|
||||
|
8
vendor/github.com/klauspost/compress/zstd/enc_fast.go
generated
vendored
8
vendor/github.com/klauspost/compress/zstd/enc_fast.go
generated
vendored
@ -304,7 +304,7 @@ func (e *fastEncoder) EncodeNoHist(blk *blockEnc, src []byte) {
|
||||
minNonLiteralBlockSize = 1 + 1 + inputMargin
|
||||
)
|
||||
if debugEncoder {
|
||||
if len(src) > maxBlockSize {
|
||||
if len(src) > maxCompressedBlockSize {
|
||||
panic("src too big")
|
||||
}
|
||||
}
|
||||
@ -871,7 +871,8 @@ func (e *fastEncoderDict) Reset(d *dict, singleBlock bool) {
|
||||
const shardCnt = tableShardCnt
|
||||
const shardSize = tableShardSize
|
||||
if e.allDirty || dirtyShardCnt > shardCnt*4/6 {
|
||||
copy(e.table[:], e.dictTable)
|
||||
//copy(e.table[:], e.dictTable)
|
||||
e.table = *(*[tableSize]tableEntry)(e.dictTable)
|
||||
for i := range e.tableShardDirty {
|
||||
e.tableShardDirty[i] = false
|
||||
}
|
||||
@ -883,7 +884,8 @@ func (e *fastEncoderDict) Reset(d *dict, singleBlock bool) {
|
||||
continue
|
||||
}
|
||||
|
||||
copy(e.table[i*shardSize:(i+1)*shardSize], e.dictTable[i*shardSize:(i+1)*shardSize])
|
||||
//copy(e.table[i*shardSize:(i+1)*shardSize], e.dictTable[i*shardSize:(i+1)*shardSize])
|
||||
*(*[shardSize]tableEntry)(e.table[i*shardSize:]) = *(*[shardSize]tableEntry)(e.dictTable[i*shardSize:])
|
||||
e.tableShardDirty[i] = false
|
||||
}
|
||||
e.allDirty = false
|
||||
|
4
vendor/github.com/klauspost/compress/zstd/encoder.go
generated
vendored
4
vendor/github.com/klauspost/compress/zstd/encoder.go
generated
vendored
@ -528,8 +528,8 @@ func (e *Encoder) EncodeAll(src, dst []byte) []byte {
|
||||
// If a non-single block is needed the encoder will reset again.
|
||||
e.encoders <- enc
|
||||
}()
|
||||
// Use single segments when above minimum window and below 1MB.
|
||||
single := len(src) < 1<<20 && len(src) > MinWindowSize
|
||||
// Use single segments when above minimum window and below window size.
|
||||
single := len(src) <= e.o.windowSize && len(src) > MinWindowSize
|
||||
if e.o.single != nil {
|
||||
single = *e.o.single
|
||||
}
|
||||
|
2
vendor/github.com/klauspost/compress/zstd/encoder_options.go
generated
vendored
2
vendor/github.com/klauspost/compress/zstd/encoder_options.go
generated
vendored
@ -283,7 +283,7 @@ func WithNoEntropyCompression(b bool) EOption {
|
||||
// a decoder is allowed to reject a compressed frame which requests a memory size beyond decoder's authorized range.
|
||||
// For broader compatibility, decoders are recommended to support memory sizes of at least 8 MB.
|
||||
// This is only a recommendation, each decoder is free to support higher or lower limits, depending on local limitations.
|
||||
// If this is not specified, block encodes will automatically choose this based on the input size.
|
||||
// If this is not specified, block encodes will automatically choose this based on the input size and the window size.
|
||||
// This setting has no effect on streamed encodes.
|
||||
func WithSingleSegment(b bool) EOption {
|
||||
return func(o *encoderOptions) error {
|
||||
|
57
vendor/github.com/klauspost/compress/zstd/framedec.go
generated
vendored
57
vendor/github.com/klauspost/compress/zstd/framedec.go
generated
vendored
@ -106,7 +106,7 @@ func (d *frameDec) reset(br byteBuffer) error {
|
||||
}
|
||||
n := uint32(b[0]) | (uint32(b[1]) << 8) | (uint32(b[2]) << 16) | (uint32(b[3]) << 24)
|
||||
println("Skipping frame with", n, "bytes.")
|
||||
err = br.skipN(int(n))
|
||||
err = br.skipN(int64(n))
|
||||
if err != nil {
|
||||
if debugDecoder {
|
||||
println("Reading discarded frame", err)
|
||||
@ -231,20 +231,27 @@ func (d *frameDec) reset(br byteBuffer) error {
|
||||
d.crc.Reset()
|
||||
}
|
||||
|
||||
if d.WindowSize > d.o.maxWindowSize {
|
||||
if debugDecoder {
|
||||
printf("window size %d > max %d\n", d.WindowSize, d.o.maxWindowSize)
|
||||
}
|
||||
return ErrWindowSizeExceeded
|
||||
}
|
||||
|
||||
if d.WindowSize == 0 && d.SingleSegment {
|
||||
// We may not need window in this case.
|
||||
d.WindowSize = d.FrameContentSize
|
||||
if d.WindowSize < MinWindowSize {
|
||||
d.WindowSize = MinWindowSize
|
||||
}
|
||||
if d.WindowSize > d.o.maxDecodedSize {
|
||||
if debugDecoder {
|
||||
printf("window size %d > max %d\n", d.WindowSize, d.o.maxWindowSize)
|
||||
}
|
||||
return ErrDecoderSizeExceeded
|
||||
}
|
||||
}
|
||||
|
||||
if d.WindowSize > uint64(d.o.maxWindowSize) {
|
||||
if debugDecoder {
|
||||
printf("window size %d > max %d\n", d.WindowSize, d.o.maxWindowSize)
|
||||
}
|
||||
return ErrWindowSizeExceeded
|
||||
}
|
||||
// The minimum Window_Size is 1 KB.
|
||||
if d.WindowSize < MinWindowSize {
|
||||
if debugDecoder {
|
||||
@ -254,11 +261,16 @@ func (d *frameDec) reset(br byteBuffer) error {
|
||||
}
|
||||
d.history.windowSize = int(d.WindowSize)
|
||||
if !d.o.lowMem || d.history.windowSize < maxBlockSize {
|
||||
// Alloc 2x window size if not low-mem, or very small window size.
|
||||
// Alloc 2x window size if not low-mem, or window size below 2MB.
|
||||
d.history.allocFrameBuffer = d.history.windowSize * 2
|
||||
} else {
|
||||
// Alloc with one additional block
|
||||
d.history.allocFrameBuffer = d.history.windowSize + maxBlockSize
|
||||
if d.o.lowMem {
|
||||
// Alloc with 1MB extra.
|
||||
d.history.allocFrameBuffer = d.history.windowSize + maxBlockSize/2
|
||||
} else {
|
||||
// Alloc with 2MB extra.
|
||||
d.history.allocFrameBuffer = d.history.windowSize + maxBlockSize
|
||||
}
|
||||
}
|
||||
|
||||
if debugDecoder {
|
||||
@ -336,7 +348,7 @@ func (d *frameDec) consumeCRC() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// runDecoder will create a sync decoder that will decode a block of data.
|
||||
// runDecoder will run the decoder for the remainder of the frame.
|
||||
func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) {
|
||||
saved := d.history.b
|
||||
|
||||
@ -346,12 +358,23 @@ func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) {
|
||||
// Store input length, so we only check new data.
|
||||
crcStart := len(dst)
|
||||
d.history.decoders.maxSyncLen = 0
|
||||
if d.o.limitToCap {
|
||||
d.history.decoders.maxSyncLen = uint64(cap(dst) - len(dst))
|
||||
}
|
||||
if d.FrameContentSize != fcsUnknown {
|
||||
d.history.decoders.maxSyncLen = d.FrameContentSize + uint64(len(dst))
|
||||
if !d.o.limitToCap || d.FrameContentSize+uint64(len(dst)) < d.history.decoders.maxSyncLen {
|
||||
d.history.decoders.maxSyncLen = d.FrameContentSize + uint64(len(dst))
|
||||
}
|
||||
if d.history.decoders.maxSyncLen > d.o.maxDecodedSize {
|
||||
if debugDecoder {
|
||||
println("maxSyncLen:", d.history.decoders.maxSyncLen, "> maxDecodedSize:", d.o.maxDecodedSize)
|
||||
}
|
||||
return dst, ErrDecoderSizeExceeded
|
||||
}
|
||||
if uint64(cap(dst)) < d.history.decoders.maxSyncLen {
|
||||
if debugDecoder {
|
||||
println("maxSyncLen:", d.history.decoders.maxSyncLen)
|
||||
}
|
||||
if !d.o.limitToCap && uint64(cap(dst)) < d.history.decoders.maxSyncLen {
|
||||
// Alloc for output
|
||||
dst2 := make([]byte, len(dst), d.history.decoders.maxSyncLen+compressedBlockOverAlloc)
|
||||
copy(dst2, dst)
|
||||
@ -371,7 +394,13 @@ func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) {
|
||||
if err != nil {
|
||||
break
|
||||
}
|
||||
if uint64(len(d.history.b)) > d.o.maxDecodedSize {
|
||||
if uint64(len(d.history.b)-crcStart) > d.o.maxDecodedSize {
|
||||
println("runDecoder: maxDecodedSize exceeded", uint64(len(d.history.b)-crcStart), ">", d.o.maxDecodedSize)
|
||||
err = ErrDecoderSizeExceeded
|
||||
break
|
||||
}
|
||||
if d.o.limitToCap && len(d.history.b) > cap(dst) {
|
||||
println("runDecoder: cap exceeded", uint64(len(d.history.b)), ">", cap(dst))
|
||||
err = ErrDecoderSizeExceeded
|
||||
break
|
||||
}
|
||||
|
7
vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go
generated
vendored
7
vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.go
generated
vendored
@ -21,7 +21,8 @@ type buildDtableAsmContext struct {
|
||||
|
||||
// buildDtable_asm is an x86 assembly implementation of fseDecoder.buildDtable.
|
||||
// Function returns non-zero exit code on error.
|
||||
// go:noescape
|
||||
//
|
||||
//go:noescape
|
||||
func buildDtable_asm(s *fseDecoder, ctx *buildDtableAsmContext) int
|
||||
|
||||
// please keep in sync with _generate/gen_fse.go
|
||||
@ -34,8 +35,8 @@ const (
|
||||
// buildDtable will build the decoding table.
|
||||
func (s *fseDecoder) buildDtable() error {
|
||||
ctx := buildDtableAsmContext{
|
||||
stateTable: (*uint16)(&s.stateTable[0]),
|
||||
norm: (*int16)(&s.norm[0]),
|
||||
stateTable: &s.stateTable[0],
|
||||
norm: &s.norm[0],
|
||||
dt: (*uint64)(&s.dt[0]),
|
||||
}
|
||||
code := buildDtable_asm(s, &ctx)
|
||||
|
1
vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.s
generated
vendored
1
vendor/github.com/klauspost/compress/zstd/fse_decoder_amd64.s
generated
vendored
@ -1,7 +1,6 @@
|
||||
// Code generated by command: go run gen_fse.go -out ../fse_decoder_amd64.s -pkg=zstd. DO NOT EDIT.
|
||||
|
||||
//go:build !appengine && !noasm && gc && !noasm
|
||||
// +build !appengine,!noasm,gc,!noasm
|
||||
|
||||
// func buildDtable_asm(s *fseDecoder, ctx *buildDtableAsmContext) int
|
||||
TEXT ·buildDtable_asm(SB), $0-24
|
||||
|
25
vendor/github.com/klauspost/compress/zstd/history.go
generated
vendored
25
vendor/github.com/klauspost/compress/zstd/history.go
generated
vendored
@ -37,26 +37,23 @@ func (h *history) reset() {
|
||||
h.ignoreBuffer = 0
|
||||
h.error = false
|
||||
h.recentOffsets = [3]int{1, 4, 8}
|
||||
if f := h.decoders.litLengths.fse; f != nil && !f.preDefined {
|
||||
fseDecoderPool.Put(f)
|
||||
}
|
||||
if f := h.decoders.offsets.fse; f != nil && !f.preDefined {
|
||||
fseDecoderPool.Put(f)
|
||||
}
|
||||
if f := h.decoders.matchLengths.fse; f != nil && !f.preDefined {
|
||||
fseDecoderPool.Put(f)
|
||||
}
|
||||
h.decoders.freeDecoders()
|
||||
h.decoders = sequenceDecs{br: h.decoders.br}
|
||||
if h.huffTree != nil {
|
||||
if h.dict == nil || h.dict.litEnc != h.huffTree {
|
||||
huffDecoderPool.Put(h.huffTree)
|
||||
}
|
||||
}
|
||||
h.freeHuffDecoder()
|
||||
h.huffTree = nil
|
||||
h.dict = nil
|
||||
//printf("history created: %+v (l: %d, c: %d)", *h, len(h.b), cap(h.b))
|
||||
}
|
||||
|
||||
func (h *history) freeHuffDecoder() {
|
||||
if h.huffTree != nil {
|
||||
if h.dict == nil || h.dict.litEnc != h.huffTree {
|
||||
huffDecoderPool.Put(h.huffTree)
|
||||
h.huffTree = nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (h *history) setDict(dict *dict) {
|
||||
if dict == nil {
|
||||
return
|
||||
|
22
vendor/github.com/klauspost/compress/zstd/seqdec.go
generated
vendored
22
vendor/github.com/klauspost/compress/zstd/seqdec.go
generated
vendored
@ -99,6 +99,21 @@ func (s *sequenceDecs) initialize(br *bitReader, hist *history, out []byte) erro
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *sequenceDecs) freeDecoders() {
|
||||
if f := s.litLengths.fse; f != nil && !f.preDefined {
|
||||
fseDecoderPool.Put(f)
|
||||
s.litLengths.fse = nil
|
||||
}
|
||||
if f := s.offsets.fse; f != nil && !f.preDefined {
|
||||
fseDecoderPool.Put(f)
|
||||
s.offsets.fse = nil
|
||||
}
|
||||
if f := s.matchLengths.fse; f != nil && !f.preDefined {
|
||||
fseDecoderPool.Put(f)
|
||||
s.matchLengths.fse = nil
|
||||
}
|
||||
}
|
||||
|
||||
// execute will execute the decoded sequence with the provided history.
|
||||
// The sequence must be evaluated before being sent.
|
||||
func (s *sequenceDecs) execute(seqs []seqVals, hist []byte) error {
|
||||
@ -299,7 +314,10 @@ func (s *sequenceDecs) decodeSync(hist []byte) error {
|
||||
}
|
||||
size := ll + ml + len(out)
|
||||
if size-startSize > maxBlockSize {
|
||||
return fmt.Errorf("output (%d) bigger than max block size (%d)", size-startSize, maxBlockSize)
|
||||
if size-startSize == 424242 {
|
||||
panic("here")
|
||||
}
|
||||
return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize)
|
||||
}
|
||||
if size > cap(out) {
|
||||
// Not enough size, which can happen under high volume block streaming conditions
|
||||
@ -411,7 +429,7 @@ func (s *sequenceDecs) decodeSync(hist []byte) error {
|
||||
|
||||
// Check if space for literals
|
||||
if size := len(s.literals) + len(s.out) - startSize; size > maxBlockSize {
|
||||
return fmt.Errorf("output (%d) bigger than max block size (%d)", size, maxBlockSize)
|
||||
return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize)
|
||||
}
|
||||
|
||||
// Add final literals
|
||||
|
43
vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go
generated
vendored
43
vendor/github.com/klauspost/compress/zstd/seqdec_amd64.go
generated
vendored
@ -32,18 +32,22 @@ type decodeSyncAsmContext struct {
|
||||
// sequenceDecs_decodeSync_amd64 implements the main loop of sequenceDecs.decodeSync in x86 asm.
|
||||
//
|
||||
// Please refer to seqdec_generic.go for the reference implementation.
|
||||
//
|
||||
//go:noescape
|
||||
func sequenceDecs_decodeSync_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
|
||||
|
||||
// sequenceDecs_decodeSync_bmi2 implements the main loop of sequenceDecs.decodeSync in x86 asm with BMI2 extensions.
|
||||
//
|
||||
//go:noescape
|
||||
func sequenceDecs_decodeSync_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
|
||||
|
||||
// sequenceDecs_decodeSync_safe_amd64 does the same as above, but does not write more than output buffer.
|
||||
//
|
||||
//go:noescape
|
||||
func sequenceDecs_decodeSync_safe_amd64(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
|
||||
|
||||
// sequenceDecs_decodeSync_safe_bmi2 does the same as above, but does not write more than output buffer.
|
||||
//
|
||||
//go:noescape
|
||||
func sequenceDecs_decodeSync_safe_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeSyncAsmContext) int
|
||||
|
||||
@ -55,16 +59,22 @@ func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) {
|
||||
if s.maxSyncLen == 0 && cap(s.out)-len(s.out) < maxCompressedBlockSize {
|
||||
return false, nil
|
||||
}
|
||||
useSafe := false
|
||||
if s.maxSyncLen == 0 && cap(s.out)-len(s.out) < maxCompressedBlockSizeAlloc {
|
||||
useSafe = true
|
||||
}
|
||||
if s.maxSyncLen > 0 && cap(s.out)-len(s.out)-compressedBlockOverAlloc < int(s.maxSyncLen) {
|
||||
useSafe = true
|
||||
}
|
||||
if cap(s.literals) < len(s.literals)+compressedBlockOverAlloc {
|
||||
useSafe = true
|
||||
}
|
||||
|
||||
// FIXME: Using unsafe memory copies leads to rare, random crashes
|
||||
// with fuzz testing. It is therefore disabled for now.
|
||||
const useSafe = true
|
||||
/*
|
||||
useSafe := false
|
||||
if s.maxSyncLen == 0 && cap(s.out)-len(s.out) < maxCompressedBlockSizeAlloc {
|
||||
useSafe = true
|
||||
}
|
||||
if s.maxSyncLen > 0 && cap(s.out)-len(s.out)-compressedBlockOverAlloc < int(s.maxSyncLen) {
|
||||
useSafe = true
|
||||
}
|
||||
if cap(s.literals) < len(s.literals)+compressedBlockOverAlloc {
|
||||
useSafe = true
|
||||
}
|
||||
*/
|
||||
|
||||
br := s.br
|
||||
|
||||
@ -129,7 +139,7 @@ func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) {
|
||||
if debugDecoder {
|
||||
println("msl:", s.maxSyncLen, "cap", cap(s.out), "bef:", startSize, "sz:", size-startSize, "mbs:", maxBlockSize, "outsz:", cap(s.out)-startSize)
|
||||
}
|
||||
return true, fmt.Errorf("output (%d) bigger than max block size (%d)", size-startSize, maxBlockSize)
|
||||
return true, fmt.Errorf("output bigger than max block size (%d)", maxBlockSize)
|
||||
|
||||
default:
|
||||
return true, fmt.Errorf("sequenceDecs_decode returned erronous code %d", errCode)
|
||||
@ -137,7 +147,8 @@ func (s *sequenceDecs) decodeSyncSimple(hist []byte) (bool, error) {
|
||||
|
||||
s.seqSize += ctx.litRemain
|
||||
if s.seqSize > maxBlockSize {
|
||||
return true, fmt.Errorf("output (%d) bigger than max block size (%d)", s.seqSize, maxBlockSize)
|
||||
return true, fmt.Errorf("output bigger than max block size (%d)", maxBlockSize)
|
||||
|
||||
}
|
||||
err := br.close()
|
||||
if err != nil {
|
||||
@ -195,20 +206,24 @@ const errorNotEnoughSpace = 5
|
||||
// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm.
|
||||
//
|
||||
// Please refer to seqdec_generic.go for the reference implementation.
|
||||
//
|
||||
//go:noescape
|
||||
func sequenceDecs_decode_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
|
||||
|
||||
// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm.
|
||||
//
|
||||
// Please refer to seqdec_generic.go for the reference implementation.
|
||||
//
|
||||
//go:noescape
|
||||
func sequenceDecs_decode_56_amd64(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
|
||||
|
||||
// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm with BMI2 extensions.
|
||||
//
|
||||
//go:noescape
|
||||
func sequenceDecs_decode_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
|
||||
|
||||
// sequenceDecs_decode implements the main loop of sequenceDecs in x86 asm with BMI2 extensions.
|
||||
//
|
||||
//go:noescape
|
||||
func sequenceDecs_decode_56_bmi2(s *sequenceDecs, br *bitReader, ctx *decodeAsmContext) int
|
||||
|
||||
@ -275,7 +290,7 @@ func (s *sequenceDecs) decode(seqs []seqVals) error {
|
||||
|
||||
s.seqSize += ctx.litRemain
|
||||
if s.seqSize > maxBlockSize {
|
||||
return fmt.Errorf("output (%d) bigger than max block size (%d)", s.seqSize, maxBlockSize)
|
||||
return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize)
|
||||
}
|
||||
err := br.close()
|
||||
if err != nil {
|
||||
@ -302,10 +317,12 @@ type executeAsmContext struct {
|
||||
// Returns false if a match offset is too big.
|
||||
//
|
||||
// Please refer to seqdec_generic.go for the reference implementation.
|
||||
//
|
||||
//go:noescape
|
||||
func sequenceDecs_executeSimple_amd64(ctx *executeAsmContext) bool
|
||||
|
||||
// Same as above, but with safe memcopies
|
||||
//
|
||||
//go:noescape
|
||||
func sequenceDecs_executeSimple_safe_amd64(ctx *executeAsmContext) bool
|
||||
|
||||
|
2384
vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s
generated
vendored
2384
vendor/github.com/klauspost/compress/zstd/seqdec_amd64.s
generated
vendored
File diff suppressed because it is too large
Load Diff
4
vendor/github.com/klauspost/compress/zstd/seqdec_generic.go
generated
vendored
4
vendor/github.com/klauspost/compress/zstd/seqdec_generic.go
generated
vendored
@ -111,7 +111,7 @@ func (s *sequenceDecs) decode(seqs []seqVals) error {
|
||||
}
|
||||
s.seqSize += ll + ml
|
||||
if s.seqSize > maxBlockSize {
|
||||
return fmt.Errorf("output (%d) bigger than max block size (%d)", s.seqSize, maxBlockSize)
|
||||
return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize)
|
||||
}
|
||||
litRemain -= ll
|
||||
if litRemain < 0 {
|
||||
@ -149,7 +149,7 @@ func (s *sequenceDecs) decode(seqs []seqVals) error {
|
||||
}
|
||||
s.seqSize += litRemain
|
||||
if s.seqSize > maxBlockSize {
|
||||
return fmt.Errorf("output (%d) bigger than max block size (%d)", s.seqSize, maxBlockSize)
|
||||
return fmt.Errorf("output bigger than max block size (%d)", maxBlockSize)
|
||||
}
|
||||
err := br.close()
|
||||
if err != nil {
|
||||
|
24
vendor/github.com/moby/buildkit/client/build.go
generated
vendored
24
vendor/github.com/moby/buildkit/client/build.go
generated
vendored
@ -4,6 +4,7 @@ import (
|
||||
"context"
|
||||
|
||||
"github.com/moby/buildkit/client/buildid"
|
||||
"github.com/moby/buildkit/frontend/attestations"
|
||||
gateway "github.com/moby/buildkit/frontend/gateway/client"
|
||||
"github.com/moby/buildkit/frontend/gateway/grpcclient"
|
||||
gatewayapi "github.com/moby/buildkit/frontend/gateway/pb"
|
||||
@ -20,17 +21,15 @@ func (c *Client) Build(ctx context.Context, opt SolveOpt, product string, buildF
|
||||
}
|
||||
}()
|
||||
|
||||
if opt.Frontend != "" {
|
||||
return nil, errors.New("invalid SolveOpt, Build interface cannot use Frontend")
|
||||
}
|
||||
feOpts := opt.FrontendAttrs
|
||||
|
||||
opt.Frontend = ""
|
||||
opt.FrontendAttrs = attestations.Filter(opt.FrontendAttrs)
|
||||
|
||||
if product == "" {
|
||||
product = apicaps.ExportedProduct
|
||||
}
|
||||
|
||||
feOpts := opt.FrontendAttrs
|
||||
opt.FrontendAttrs = nil
|
||||
|
||||
workers, err := c.ListWorkers(ctx)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "listing workers for Build")
|
||||
@ -113,6 +112,19 @@ func (g *gatewayClientForBuild) StatFile(ctx context.Context, in *gatewayapi.Sta
|
||||
return g.gateway.StatFile(ctx, in, opts...)
|
||||
}
|
||||
|
||||
func (g *gatewayClientForBuild) Evaluate(ctx context.Context, in *gatewayapi.EvaluateRequest, opts ...grpc.CallOption) (*gatewayapi.EvaluateResponse, error) {
|
||||
if err := g.caps.Supports(gatewayapi.CapGatewayEvaluate); err != nil {
|
||||
if err2 := g.caps.Supports(gatewayapi.CapStatFile); err2 != nil {
|
||||
return nil, err
|
||||
}
|
||||
ctx = buildid.AppendToOutgoingContext(ctx, g.buildID)
|
||||
_, err := g.gateway.StatFile(ctx, &gatewayapi.StatFileRequest{Ref: in.Ref, Path: "."}, opts...)
|
||||
return &gatewayapi.EvaluateResponse{}, err
|
||||
}
|
||||
ctx = buildid.AppendToOutgoingContext(ctx, g.buildID)
|
||||
return g.gateway.Evaluate(ctx, in, opts...)
|
||||
}
|
||||
|
||||
func (g *gatewayClientForBuild) Ping(ctx context.Context, in *gatewayapi.PingRequest, opts ...grpc.CallOption) (*gatewayapi.PongResponse, error) {
|
||||
ctx = buildid.AppendToOutgoingContext(ctx, g.buildID)
|
||||
return g.gateway.Ping(ctx, in, opts...)
|
||||
|
1
vendor/github.com/moby/buildkit/client/client.go
generated
vendored
1
vendor/github.com/moby/buildkit/client/client.go
generated
vendored
@ -234,7 +234,6 @@ func loadCredentials(opts *withCredentials) (grpc.DialOption, error) {
|
||||
return nil, errors.Wrap(err, "could not read certificate/key")
|
||||
}
|
||||
cfg.Certificates = []tls.Certificate{cert}
|
||||
cfg.BuildNameToCertificate()
|
||||
}
|
||||
|
||||
return grpc.WithTransportCredentials(credentials.NewTLS(cfg)), nil
|
||||
|
1
vendor/github.com/moby/buildkit/client/llb/state.go
generated
vendored
1
vendor/github.com/moby/buildkit/client/llb/state.go
generated
vendored
@ -617,6 +617,7 @@ var (
|
||||
LinuxArmel = Platform(ocispecs.Platform{OS: "linux", Architecture: "arm", Variant: "v6"})
|
||||
LinuxArm64 = Platform(ocispecs.Platform{OS: "linux", Architecture: "arm64"})
|
||||
LinuxS390x = Platform(ocispecs.Platform{OS: "linux", Architecture: "s390x"})
|
||||
LinuxPpc64 = Platform(ocispecs.Platform{OS: "linux", Architecture: "ppc64"})
|
||||
LinuxPpc64le = Platform(ocispecs.Platform{OS: "linux", Architecture: "ppc64le"})
|
||||
Darwin = Platform(ocispecs.Platform{OS: "darwin", Architecture: "amd64"})
|
||||
Windows = Platform(ocispecs.Platform{OS: "windows", Architecture: "amd64"})
|
||||
|
184
vendor/github.com/moby/buildkit/client/solve.go
generated
vendored
184
vendor/github.com/moby/buildkit/client/solve.go
generated
vendored
@ -2,6 +2,7 @@ package client
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"os"
|
||||
@ -14,6 +15,7 @@ import (
|
||||
controlapi "github.com/moby/buildkit/api/services/control"
|
||||
"github.com/moby/buildkit/client/llb"
|
||||
"github.com/moby/buildkit/client/ociindex"
|
||||
"github.com/moby/buildkit/exporter/containerimage/exptypes"
|
||||
"github.com/moby/buildkit/identity"
|
||||
"github.com/moby/buildkit/session"
|
||||
sessioncontent "github.com/moby/buildkit/session/content"
|
||||
@ -124,6 +126,8 @@ func (c *Client) solve(ctx context.Context, def *llb.Definition, runGateway runG
|
||||
ex = opt.Exports[0]
|
||||
}
|
||||
|
||||
indicesToUpdate := []string{}
|
||||
|
||||
if !opt.SessionPreInitialized {
|
||||
if len(syncedDirs) > 0 {
|
||||
s.Allow(filesync.NewFSSyncProvider(syncedDirs))
|
||||
@ -133,49 +137,64 @@ func (c *Client) solve(ctx context.Context, def *llb.Definition, runGateway runG
|
||||
s.Allow(a)
|
||||
}
|
||||
|
||||
contentStores := map[string]content.Store{}
|
||||
for key, store := range cacheOpt.contentStores {
|
||||
contentStores[key] = store
|
||||
}
|
||||
for key, store := range opt.OCIStores {
|
||||
key2 := "oci:" + key
|
||||
if _, ok := contentStores[key2]; ok {
|
||||
return nil, errors.Errorf("oci store key %q already exists", key)
|
||||
}
|
||||
contentStores[key2] = store
|
||||
}
|
||||
|
||||
var supportFile bool
|
||||
var supportDir bool
|
||||
switch ex.Type {
|
||||
case ExporterLocal:
|
||||
if ex.Output != nil {
|
||||
return nil, errors.New("output file writer is not supported by local exporter")
|
||||
}
|
||||
if ex.OutputDir == "" {
|
||||
return nil, errors.New("output directory is required for local exporter")
|
||||
}
|
||||
s.Allow(filesync.NewFSSyncTargetDir(ex.OutputDir))
|
||||
case ExporterOCI, ExporterDocker, ExporterTar:
|
||||
if ex.OutputDir != "" {
|
||||
return nil, errors.Errorf("output directory %s is not supported by %s exporter", ex.OutputDir, ex.Type)
|
||||
}
|
||||
supportDir = true
|
||||
case ExporterTar:
|
||||
supportFile = true
|
||||
case ExporterOCI, ExporterDocker:
|
||||
supportDir = ex.OutputDir != ""
|
||||
supportFile = ex.Output != nil
|
||||
}
|
||||
|
||||
if supportFile && supportDir {
|
||||
return nil, errors.Errorf("both file and directory output is not support by %s exporter", ex.Type)
|
||||
}
|
||||
if !supportFile && ex.Output != nil {
|
||||
return nil, errors.Errorf("output file writer is not supported by %s exporter", ex.Type)
|
||||
}
|
||||
if !supportDir && ex.OutputDir != "" {
|
||||
return nil, errors.Errorf("output directory is not supported by %s exporter", ex.Type)
|
||||
}
|
||||
|
||||
if supportFile {
|
||||
if ex.Output == nil {
|
||||
return nil, errors.Errorf("output file writer is required for %s exporter", ex.Type)
|
||||
}
|
||||
s.Allow(filesync.NewFSSyncTarget(ex.Output))
|
||||
default:
|
||||
if ex.Output != nil {
|
||||
return nil, errors.Errorf("output file writer is not supported by %s exporter", ex.Type)
|
||||
}
|
||||
if ex.OutputDir != "" {
|
||||
return nil, errors.Errorf("output directory %s is not supported by %s exporter", ex.OutputDir, ex.Type)
|
||||
}
|
||||
}
|
||||
|
||||
// this is a new map that contains both cacheOpt stores and OCILayout stores
|
||||
contentStores := make(map[string]content.Store, len(cacheOpt.contentStores)+len(opt.OCIStores))
|
||||
// copy over the stores references from cacheOpt
|
||||
for key, store := range cacheOpt.contentStores {
|
||||
contentStores[key] = store
|
||||
}
|
||||
// copy over the stores references from ociLayout opts
|
||||
for key, store := range opt.OCIStores {
|
||||
// conflicts are not allowed
|
||||
if _, ok := contentStores[key]; ok {
|
||||
// we probably should check if the store is identical, but given that
|
||||
// https://pkg.go.dev/github.com/containerd/containerd/content#Store
|
||||
// is just an interface, composing 4 others, that is rather hard to do.
|
||||
// For a future iteration.
|
||||
return nil, errors.Errorf("contentStore key %s exists in both cache and OCI layouts", key)
|
||||
if supportDir {
|
||||
if ex.OutputDir == "" {
|
||||
return nil, errors.Errorf("output directory is required for %s exporter", ex.Type)
|
||||
}
|
||||
switch ex.Type {
|
||||
case ExporterOCI, ExporterDocker:
|
||||
if err := os.MkdirAll(ex.OutputDir, 0755); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
cs, err := contentlocal.NewStore(ex.OutputDir)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
contentStores["export"] = cs
|
||||
indicesToUpdate = append(indicesToUpdate, filepath.Join(ex.OutputDir, "index.json"))
|
||||
default:
|
||||
s.Allow(filesync.NewFSSyncTargetDir(ex.OutputDir))
|
||||
}
|
||||
contentStores[key] = store
|
||||
}
|
||||
|
||||
if len(contentStores) > 0 {
|
||||
@ -352,10 +371,29 @@ func (c *Client) solve(ctx context.Context, def *llb.Definition, runGateway runG
|
||||
}
|
||||
}
|
||||
}
|
||||
if manifestDescDt := res.ExporterResponse[exptypes.ExporterImageDescriptorKey]; manifestDescDt != "" {
|
||||
manifestDescDt, err := base64.StdEncoding.DecodeString(manifestDescDt)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
var manifestDesc ocispecs.Descriptor
|
||||
if err = json.Unmarshal([]byte(manifestDescDt), &manifestDesc); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for _, indexJSONPath := range indicesToUpdate {
|
||||
tag := "latest"
|
||||
if t, ok := res.ExporterResponse["image.name"]; ok {
|
||||
tag = t
|
||||
}
|
||||
if err = ociindex.PutDescToIndexJSONFileLocked(indexJSONPath, manifestDesc, tag); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func prepareSyncedDirs(def *llb.Definition, localDirs map[string]string) ([]filesync.SyncedDir, error) {
|
||||
func prepareSyncedDirs(def *llb.Definition, localDirs map[string]string) (filesync.StaticDirSource, error) {
|
||||
for _, d := range localDirs {
|
||||
fi, err := os.Stat(d)
|
||||
if err != nil {
|
||||
@ -371,10 +409,10 @@ func prepareSyncedDirs(def *llb.Definition, localDirs map[string]string) ([]file
|
||||
return fsutil.MapResultKeep
|
||||
}
|
||||
|
||||
dirs := make([]filesync.SyncedDir, 0, len(localDirs))
|
||||
dirs := make(filesync.StaticDirSource, len(localDirs))
|
||||
if def == nil {
|
||||
for name, d := range localDirs {
|
||||
dirs = append(dirs, filesync.SyncedDir{Name: name, Dir: d, Map: resetUIDAndGID})
|
||||
dirs[name] = filesync.SyncedDir{Dir: d, Map: resetUIDAndGID}
|
||||
}
|
||||
} else {
|
||||
for _, dt := range def.Def {
|
||||
@ -389,7 +427,7 @@ func prepareSyncedDirs(def *llb.Definition, localDirs map[string]string) ([]file
|
||||
if !ok {
|
||||
return nil, errors.Errorf("local directory %s not enabled", name)
|
||||
}
|
||||
dirs = append(dirs, filesync.SyncedDir{Name: name, Dir: d, Map: resetUIDAndGID})
|
||||
dirs[name] = filesync.SyncedDir{Dir: d, Map: resetUIDAndGID}
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -416,14 +454,10 @@ func parseCacheOptions(ctx context.Context, isGateway bool, opt SolveOpt) (*cach
|
||||
var (
|
||||
cacheExports []*controlapi.CacheOptionsEntry
|
||||
cacheImports []*controlapi.CacheOptionsEntry
|
||||
// legacy API is used for registry caches, because the daemon might not support the new API
|
||||
legacyExportRef string
|
||||
legacyImportRefs []string
|
||||
)
|
||||
contentStores := make(map[string]content.Store)
|
||||
indicesToUpdate := make(map[string]string) // key: index.JSON file name, value: tag
|
||||
frontendAttrs := make(map[string]string)
|
||||
legacyExportAttrs := make(map[string]string)
|
||||
for _, ex := range opt.CacheExports {
|
||||
if ex.Type == "local" {
|
||||
csDir := ex.Attrs["dest"]
|
||||
@ -438,26 +472,27 @@ func parseCacheOptions(ctx context.Context, isGateway bool, opt SolveOpt) (*cach
|
||||
return nil, err
|
||||
}
|
||||
contentStores["local:"+csDir] = cs
|
||||
|
||||
tag := "latest"
|
||||
if t, ok := ex.Attrs["tag"]; ok {
|
||||
tag = t
|
||||
}
|
||||
// TODO(AkihiroSuda): support custom index JSON path and tag
|
||||
indexJSONPath := filepath.Join(csDir, "index.json")
|
||||
indicesToUpdate[indexJSONPath] = "latest"
|
||||
indicesToUpdate[indexJSONPath] = tag
|
||||
}
|
||||
if ex.Type == "registry" && legacyExportRef == "" {
|
||||
legacyExportRef = ex.Attrs["ref"]
|
||||
for k, v := range ex.Attrs {
|
||||
if k != "ref" {
|
||||
legacyExportAttrs[k] = v
|
||||
}
|
||||
if ex.Type == "registry" {
|
||||
regRef := ex.Attrs["ref"]
|
||||
if regRef == "" {
|
||||
return nil, errors.New("registry cache exporter requires ref")
|
||||
}
|
||||
} else {
|
||||
cacheExports = append(cacheExports, &controlapi.CacheOptionsEntry{
|
||||
Type: ex.Type,
|
||||
Attrs: ex.Attrs,
|
||||
})
|
||||
}
|
||||
cacheExports = append(cacheExports, &controlapi.CacheOptionsEntry{
|
||||
Type: ex.Type,
|
||||
Attrs: ex.Attrs,
|
||||
})
|
||||
}
|
||||
for _, im := range opt.CacheImports {
|
||||
attrs := im.Attrs
|
||||
if im.Type == "local" {
|
||||
csDir := im.Attrs["src"]
|
||||
if csDir == "" {
|
||||
@ -469,40 +504,40 @@ func parseCacheOptions(ctx context.Context, isGateway bool, opt SolveOpt) (*cach
|
||||
continue
|
||||
}
|
||||
// if digest is not specified, load from "latest" tag
|
||||
if attrs["digest"] == "" {
|
||||
if im.Attrs["digest"] == "" {
|
||||
idx, err := ociindex.ReadIndexJSONFileLocked(filepath.Join(csDir, "index.json"))
|
||||
if err != nil {
|
||||
bklog.G(ctx).Warning("local cache import at " + csDir + " not found due to err: " + err.Error())
|
||||
continue
|
||||
}
|
||||
for _, m := range idx.Manifests {
|
||||
if (m.Annotations[ocispecs.AnnotationRefName] == "latest" && attrs["tag"] == "") || (attrs["tag"] != "" && m.Annotations[ocispecs.AnnotationRefName] == attrs["tag"]) {
|
||||
attrs["digest"] = string(m.Digest)
|
||||
tag := "latest"
|
||||
if t, ok := im.Attrs["tag"]; ok {
|
||||
tag = t
|
||||
}
|
||||
if m.Annotations[ocispecs.AnnotationRefName] == tag {
|
||||
im.Attrs["digest"] = string(m.Digest)
|
||||
break
|
||||
}
|
||||
}
|
||||
if attrs["digest"] == "" {
|
||||
if im.Attrs["digest"] == "" {
|
||||
return nil, errors.New("local cache importer requires either explicit digest, \"latest\" tag or custom tag on index.json")
|
||||
}
|
||||
}
|
||||
contentStores["local:"+csDir] = cs
|
||||
}
|
||||
if im.Type == "registry" {
|
||||
legacyImportRef := attrs["ref"]
|
||||
legacyImportRefs = append(legacyImportRefs, legacyImportRef)
|
||||
} else {
|
||||
cacheImports = append(cacheImports, &controlapi.CacheOptionsEntry{
|
||||
Type: im.Type,
|
||||
Attrs: attrs,
|
||||
})
|
||||
regRef := im.Attrs["ref"]
|
||||
if regRef == "" {
|
||||
return nil, errors.New("registry cache importer requires ref")
|
||||
}
|
||||
}
|
||||
cacheImports = append(cacheImports, &controlapi.CacheOptionsEntry{
|
||||
Type: im.Type,
|
||||
Attrs: im.Attrs,
|
||||
})
|
||||
}
|
||||
if opt.Frontend != "" || isGateway {
|
||||
// use legacy API for registry importers, because the frontend might not support the new API
|
||||
if len(legacyImportRefs) > 0 {
|
||||
frontendAttrs["cache-from"] = strings.Join(legacyImportRefs, ",")
|
||||
}
|
||||
// use new API for other importers
|
||||
if len(cacheImports) > 0 {
|
||||
s, err := json.Marshal(cacheImports)
|
||||
if err != nil {
|
||||
@ -513,11 +548,6 @@ func parseCacheOptions(ctx context.Context, isGateway bool, opt SolveOpt) (*cach
|
||||
}
|
||||
res := cacheOptions{
|
||||
options: controlapi.CacheOptions{
|
||||
// old API (for registry caches, planned to be removed in early 2019)
|
||||
ExportRefDeprecated: legacyExportRef,
|
||||
ExportAttrsDeprecated: legacyExportAttrs,
|
||||
ImportRefsDeprecated: legacyImportRefs,
|
||||
// new API
|
||||
Exports: cacheExports,
|
||||
Imports: cacheImports,
|
||||
},
|
||||
|
7
vendor/github.com/moby/buildkit/cmd/buildkitd/config/config.go
generated
vendored
7
vendor/github.com/moby/buildkit/cmd/buildkitd/config/config.go
generated
vendored
@ -53,6 +53,7 @@ type NetworkConfig struct {
|
||||
Mode string `toml:"networkMode"`
|
||||
CNIConfigPath string `toml:"cniConfigPath"`
|
||||
CNIBinaryPath string `toml:"cniBinaryPath"`
|
||||
CNIPoolSize int `toml:"cniPoolSize"`
|
||||
}
|
||||
|
||||
type OCIConfig struct {
|
||||
@ -81,6 +82,9 @@ type OCIConfig struct {
|
||||
// The profile should already be loaded (by a higher level system) before creating a worker.
|
||||
ApparmorProfile string `toml:"apparmor-profile"`
|
||||
|
||||
// SELinux enables applying SELinux labels.
|
||||
SELinux bool `toml:"selinux"`
|
||||
|
||||
// MaxParallelism is the maximum number of parallel build steps that can be run at the same time.
|
||||
MaxParallelism int `toml:"max-parallelism"`
|
||||
}
|
||||
@ -99,6 +103,9 @@ type ContainerdConfig struct {
|
||||
// The profile should already be loaded (by a higher level system) before creating a worker.
|
||||
ApparmorProfile string `toml:"apparmor-profile"`
|
||||
|
||||
// SELinux enables applying SELinux labels.
|
||||
SELinux bool `toml:"selinux"`
|
||||
|
||||
MaxParallelism int `toml:"max-parallelism"`
|
||||
|
||||
Rootless bool `toml:"rootless"`
|
||||
|
9
vendor/github.com/moby/buildkit/exporter/containerimage/exptypes/types.go
generated
vendored
9
vendor/github.com/moby/buildkit/exporter/containerimage/exptypes/types.go
generated
vendored
@ -13,8 +13,17 @@ const (
|
||||
ExporterInlineCache = "containerimage.inlinecache"
|
||||
ExporterBuildInfo = "containerimage.buildinfo"
|
||||
ExporterPlatformsKey = "refs.platforms"
|
||||
ExporterEpochKey = "source.date.epoch"
|
||||
)
|
||||
|
||||
// KnownRefMetadataKeys are the subset of exporter keys that can be suffixed by
|
||||
// a platform to become platform specific
|
||||
var KnownRefMetadataKeys = []string{
|
||||
ExporterImageConfigKey,
|
||||
ExporterInlineCache,
|
||||
ExporterBuildInfo,
|
||||
}
|
||||
|
||||
type Platforms struct {
|
||||
Platforms []Platform
|
||||
}
|
||||
|
52
vendor/github.com/moby/buildkit/exporter/containerimage/image/docker_image.go
generated
vendored
Normal file
52
vendor/github.com/moby/buildkit/exporter/containerimage/image/docker_image.go
generated
vendored
Normal file
@ -0,0 +1,52 @@
|
||||
package image
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/api/types/strslice"
|
||||
ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
)
|
||||
|
||||
// HealthConfig holds configuration settings for the HEALTHCHECK feature.
|
||||
type HealthConfig struct {
|
||||
// Test is the test to perform to check that the container is healthy.
|
||||
// An empty slice means to inherit the default.
|
||||
// The options are:
|
||||
// {} : inherit healthcheck
|
||||
// {"NONE"} : disable healthcheck
|
||||
// {"CMD", args...} : exec arguments directly
|
||||
// {"CMD-SHELL", command} : run command with system's default shell
|
||||
Test []string `json:",omitempty"`
|
||||
|
||||
// Zero means to inherit. Durations are expressed as integer nanoseconds.
|
||||
Interval time.Duration `json:",omitempty"` // Interval is the time to wait between checks.
|
||||
Timeout time.Duration `json:",omitempty"` // Timeout is the time to wait before considering the check to have hung.
|
||||
StartPeriod time.Duration `json:",omitempty"` // The start period for the container to initialize before the retries starts to count down.
|
||||
|
||||
// Retries is the number of consecutive failures needed to consider a container as unhealthy.
|
||||
// Zero means inherit.
|
||||
Retries int `json:",omitempty"`
|
||||
}
|
||||
|
||||
// ImageConfig is a docker compatible config for an image
|
||||
type ImageConfig struct {
|
||||
ocispecs.ImageConfig
|
||||
|
||||
Healthcheck *HealthConfig `json:",omitempty"` // Healthcheck describes how to check the container is healthy
|
||||
ArgsEscaped bool `json:",omitempty"` // True if command is already escaped (Windows specific)
|
||||
|
||||
// NetworkDisabled bool `json:",omitempty"` // Is network disabled
|
||||
// MacAddress string `json:",omitempty"` // Mac Address of the container
|
||||
OnBuild []string // ONBUILD metadata that were defined on the image Dockerfile
|
||||
StopTimeout *int `json:",omitempty"` // Timeout (in seconds) to stop a container
|
||||
Shell strslice.StrSlice `json:",omitempty"` // Shell for shell-form of RUN, CMD, ENTRYPOINT
|
||||
}
|
||||
|
||||
// Image is the JSON structure which describes some basic information about the image.
|
||||
// This provides the `application/vnd.oci.image.config.v1+json` mediatype when marshalled to JSON.
|
||||
type Image struct {
|
||||
ocispecs.Image
|
||||
|
||||
// Config defines the execution parameters which should be used as a base when running a container using the image.
|
||||
Config ImageConfig `json:"config,omitempty"`
|
||||
}
|
82
vendor/github.com/moby/buildkit/frontend/attestations/parse.go
generated
vendored
Normal file
82
vendor/github.com/moby/buildkit/frontend/attestations/parse.go
generated
vendored
Normal file
@ -0,0 +1,82 @@
|
||||
package attestations
|
||||
|
||||
import (
|
||||
"encoding/csv"
|
||||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
const (
|
||||
KeyTypeSbom = "sbom"
|
||||
KeyTypeProvenance = "provenance"
|
||||
)
|
||||
|
||||
const (
|
||||
// TODO: update this before next buildkit release
|
||||
defaultSBOMGenerator = "jedevc/buildkit-syft-scanner:master@sha256:de630f621eb0ab1bb1245cea76d01c5bddfe78af4f5b9adecde424cb7ec5605e"
|
||||
)
|
||||
|
||||
func Filter(v map[string]string) map[string]string {
|
||||
attests := make(map[string]string)
|
||||
for k, v := range v {
|
||||
if strings.HasPrefix(k, "attest:") {
|
||||
attests[k] = v
|
||||
continue
|
||||
}
|
||||
if strings.HasPrefix(k, "build-arg:BUILDKIT_ATTEST_") {
|
||||
attests[k] = v
|
||||
continue
|
||||
}
|
||||
}
|
||||
return attests
|
||||
}
|
||||
|
||||
func Validate(values map[string]map[string]string) (map[string]map[string]string, error) {
|
||||
for k := range values {
|
||||
if k != KeyTypeSbom && k != KeyTypeProvenance {
|
||||
return nil, errors.Errorf("unknown attestation type %q", k)
|
||||
}
|
||||
}
|
||||
return values, nil
|
||||
}
|
||||
|
||||
func Parse(values map[string]string) (map[string]map[string]string, error) {
|
||||
attests := make(map[string]string)
|
||||
for k, v := range values {
|
||||
if strings.HasPrefix(k, "attest:") {
|
||||
attests[strings.ToLower(strings.TrimPrefix(k, "attest:"))] = v
|
||||
continue
|
||||
}
|
||||
if strings.HasPrefix(k, "build-arg:BUILDKIT_ATTEST_") {
|
||||
attests[strings.ToLower(strings.TrimPrefix(k, "build-arg:BUILDKIT_ATTEST_"))] = v
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
out := make(map[string]map[string]string)
|
||||
for k, v := range attests {
|
||||
attrs := make(map[string]string)
|
||||
out[k] = attrs
|
||||
if k == KeyTypeSbom {
|
||||
attrs["generator"] = defaultSBOMGenerator
|
||||
}
|
||||
if v == "" {
|
||||
continue
|
||||
}
|
||||
csvReader := csv.NewReader(strings.NewReader(v))
|
||||
fields, err := csvReader.Read()
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to parse %s", k)
|
||||
}
|
||||
for _, field := range fields {
|
||||
parts := strings.SplitN(field, "=", 2)
|
||||
if len(parts) != 2 {
|
||||
parts = append(parts, "")
|
||||
}
|
||||
attrs[parts[0]] = parts[1]
|
||||
}
|
||||
}
|
||||
|
||||
return Validate(out)
|
||||
}
|
46
vendor/github.com/moby/buildkit/frontend/gateway/client/attestation.go
generated
vendored
Normal file
46
vendor/github.com/moby/buildkit/frontend/gateway/client/attestation.go
generated
vendored
Normal file
@ -0,0 +1,46 @@
|
||||
package client
|
||||
|
||||
import (
|
||||
pb "github.com/moby/buildkit/frontend/gateway/pb"
|
||||
"github.com/moby/buildkit/solver/result"
|
||||
)
|
||||
|
||||
func AttestationToPB(a *result.Attestation) (*pb.Attestation, error) {
|
||||
subjects := make([]*pb.InTotoSubject, len(a.InToto.Subjects))
|
||||
for i, subject := range a.InToto.Subjects {
|
||||
subjects[i] = &pb.InTotoSubject{
|
||||
Kind: subject.Kind,
|
||||
Name: subject.Name,
|
||||
Digest: subject.Digest,
|
||||
}
|
||||
}
|
||||
|
||||
return &pb.Attestation{
|
||||
Kind: a.Kind,
|
||||
Path: a.Path,
|
||||
Ref: a.Ref,
|
||||
InTotoPredicateType: a.InToto.PredicateType,
|
||||
InTotoSubjects: subjects,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func AttestationFromPB(a *pb.Attestation) (*result.Attestation, error) {
|
||||
subjects := make([]result.InTotoSubject, len(a.InTotoSubjects))
|
||||
for i, subject := range a.InTotoSubjects {
|
||||
subjects[i] = result.InTotoSubject{
|
||||
Kind: subject.Kind,
|
||||
Name: subject.Name,
|
||||
Digest: subject.Digest,
|
||||
}
|
||||
}
|
||||
|
||||
return &result.Attestation{
|
||||
Kind: a.Kind,
|
||||
Path: a.Path,
|
||||
Ref: a.Ref,
|
||||
InToto: result.InTotoAttestation{
|
||||
PredicateType: a.InTotoPredicateType,
|
||||
Subjects: subjects,
|
||||
},
|
||||
}, nil
|
||||
}
|
10
vendor/github.com/moby/buildkit/frontend/gateway/client/client.go
generated
vendored
10
vendor/github.com/moby/buildkit/frontend/gateway/client/client.go
generated
vendored
@ -7,12 +7,21 @@ import (
|
||||
|
||||
"github.com/moby/buildkit/client/llb"
|
||||
"github.com/moby/buildkit/solver/pb"
|
||||
"github.com/moby/buildkit/solver/result"
|
||||
"github.com/moby/buildkit/util/apicaps"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
fstypes "github.com/tonistiigi/fsutil/types"
|
||||
)
|
||||
|
||||
type Result = result.Result[Reference]
|
||||
|
||||
type BuildFunc func(context.Context, Client) (*Result, error)
|
||||
|
||||
func NewResult() *Result {
|
||||
return &Result{}
|
||||
}
|
||||
|
||||
type Client interface {
|
||||
Solve(ctx context.Context, req SolveRequest) (*Result, error)
|
||||
ResolveImageConfig(ctx context.Context, ref string, opt llb.ResolveImageConfigOpt) (digest.Digest, []byte, error)
|
||||
@ -82,6 +91,7 @@ type ContainerProcess interface {
|
||||
|
||||
type Reference interface {
|
||||
ToState() (llb.State, error)
|
||||
Evaluate(ctx context.Context) error
|
||||
ReadFile(ctx context.Context, req ReadRequest) ([]byte, error)
|
||||
StatFile(ctx context.Context, req StatRequest) (*fstypes.Stat, error)
|
||||
ReadDir(ctx context.Context, req ReadDirRequest) ([]*fstypes.Stat, error)
|
||||
|
54
vendor/github.com/moby/buildkit/frontend/gateway/client/result.go
generated
vendored
54
vendor/github.com/moby/buildkit/frontend/gateway/client/result.go
generated
vendored
@ -1,54 +0,0 @@
|
||||
package client
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sync"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type BuildFunc func(context.Context, Client) (*Result, error)
|
||||
|
||||
type Result struct {
|
||||
mu sync.Mutex
|
||||
Ref Reference
|
||||
Refs map[string]Reference
|
||||
Metadata map[string][]byte
|
||||
}
|
||||
|
||||
func NewResult() *Result {
|
||||
return &Result{}
|
||||
}
|
||||
|
||||
func (r *Result) AddMeta(k string, v []byte) {
|
||||
r.mu.Lock()
|
||||
if r.Metadata == nil {
|
||||
r.Metadata = map[string][]byte{}
|
||||
}
|
||||
r.Metadata[k] = v
|
||||
r.mu.Unlock()
|
||||
}
|
||||
|
||||
func (r *Result) AddRef(k string, ref Reference) {
|
||||
r.mu.Lock()
|
||||
if r.Refs == nil {
|
||||
r.Refs = map[string]Reference{}
|
||||
}
|
||||
r.Refs[k] = ref
|
||||
r.mu.Unlock()
|
||||
}
|
||||
|
||||
func (r *Result) SetRef(ref Reference) {
|
||||
r.Ref = ref
|
||||
}
|
||||
|
||||
func (r *Result) SingleRef() (Reference, error) {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
|
||||
if r.Refs != nil && r.Ref == nil {
|
||||
return nil, errors.Errorf("invalid map result")
|
||||
}
|
||||
|
||||
return r.Ref, nil
|
||||
}
|
69
vendor/github.com/moby/buildkit/frontend/gateway/grpcclient/client.go
generated
vendored
69
vendor/github.com/moby/buildkit/frontend/gateway/grpcclient/client.go
generated
vendored
@ -115,7 +115,7 @@ func (c *grpcClient) Run(ctx context.Context, f client.BuildFunc) (retError erro
|
||||
req := &pb.ReturnRequest{}
|
||||
if retError == nil {
|
||||
if res == nil {
|
||||
res = &client.Result{}
|
||||
res = client.NewResult()
|
||||
}
|
||||
pbRes := &pb.Result{
|
||||
Metadata: res.Metadata,
|
||||
@ -160,6 +160,25 @@ func (c *grpcClient) Run(ctx context.Context, f client.BuildFunc) (retError erro
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if res.Attestations != nil && c.caps.Supports(pb.CapAttestations) == nil {
|
||||
attestations := map[string]*pb.Attestations{}
|
||||
for k, as := range res.Attestations {
|
||||
for _, a := range as {
|
||||
pbAtt, err := client.AttestationToPB(&a)
|
||||
if err != nil {
|
||||
retError = err
|
||||
continue
|
||||
}
|
||||
if attestations[k] == nil {
|
||||
attestations[k] = &pb.Attestations{}
|
||||
}
|
||||
attestations[k].Attestation = append(attestations[k].Attestation, pbAtt)
|
||||
}
|
||||
}
|
||||
pbRes.Attestations = attestations
|
||||
}
|
||||
|
||||
if retError == nil {
|
||||
req.Result = pbRes
|
||||
}
|
||||
@ -368,30 +387,15 @@ func (c *grpcClient) Solve(ctx context.Context, creq client.SolveRequest) (res *
|
||||
if c.caps.Supports(pb.CapGatewayEvaluateSolve) == nil {
|
||||
req.Evaluate = creq.Evaluate
|
||||
} else {
|
||||
// If evaluate is not supported, fallback to running Stat(".") in order to
|
||||
// trigger an evaluation of the result.
|
||||
// If evaluate is not supported, fallback to running Stat(".") in
|
||||
// order to trigger an evaluation of the result.
|
||||
defer func() {
|
||||
if res == nil {
|
||||
return
|
||||
}
|
||||
|
||||
var (
|
||||
id string
|
||||
ref client.Reference
|
||||
)
|
||||
ref, err = res.SingleRef()
|
||||
if err != nil {
|
||||
for refID := range res.Refs {
|
||||
id = refID
|
||||
break
|
||||
}
|
||||
} else {
|
||||
id = ref.(*reference).id
|
||||
}
|
||||
|
||||
_, err = c.client.StatFile(ctx, &pb.StatFileRequest{
|
||||
Ref: id,
|
||||
Path: ".",
|
||||
err = res.EachRef(func(ref client.Reference) error {
|
||||
_, err := ref.StatFile(ctx, client.StatRequest{Path: "."})
|
||||
return err
|
||||
})
|
||||
}()
|
||||
}
|
||||
@ -402,7 +406,7 @@ func (c *grpcClient) Solve(ctx context.Context, creq client.SolveRequest) (res *
|
||||
return nil, err
|
||||
}
|
||||
|
||||
res = &client.Result{}
|
||||
res = client.NewResult()
|
||||
if resp.Result == nil {
|
||||
if id := resp.Ref; id != "" {
|
||||
c.requests[id] = req
|
||||
@ -443,6 +447,18 @@ func (c *grpcClient) Solve(ctx context.Context, creq client.SolveRequest) (res *
|
||||
res.AddRef(k, ref)
|
||||
}
|
||||
}
|
||||
|
||||
if resp.Result.Attestations != nil {
|
||||
for p, as := range resp.Result.Attestations {
|
||||
for _, a := range as.Attestation {
|
||||
att, err := client.AttestationFromPB(a)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
res.AddAttestation(p, *att, nil)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return res, nil
|
||||
@ -1023,6 +1039,15 @@ func (r *reference) ToState() (st llb.State, err error) {
|
||||
return llb.NewState(defop), nil
|
||||
}
|
||||
|
||||
func (r *reference) Evaluate(ctx context.Context) error {
|
||||
req := &pb.EvaluateRequest{Ref: r.id}
|
||||
_, err := r.c.client.Evaluate(ctx, req)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (r *reference) ReadFile(ctx context.Context, req client.ReadRequest) ([]byte, error) {
|
||||
rfr := &pb.ReadFileRequest{FilePath: req.Filename, Ref: r.id}
|
||||
if r := req.Range; r != nil {
|
||||
|
20
vendor/github.com/moby/buildkit/frontend/gateway/pb/caps.go
generated
vendored
20
vendor/github.com/moby/buildkit/frontend/gateway/pb/caps.go
generated
vendored
@ -56,8 +56,14 @@ const (
|
||||
// errors.
|
||||
CapGatewayEvaluateSolve apicaps.CapID = "gateway.solve.evaluate"
|
||||
|
||||
CapGatewayEvaluate apicaps.CapID = "gateway.evaluate"
|
||||
|
||||
// CapGatewayWarnings is the capability to log warnings from frontend
|
||||
CapGatewayWarnings apicaps.CapID = "gateway.warnings"
|
||||
|
||||
// CapAttestations is the capability to indicate that attestation
|
||||
// references will be attached to results
|
||||
CapAttestations apicaps.CapID = "reference.attestations"
|
||||
)
|
||||
|
||||
func init() {
|
||||
@ -194,10 +200,24 @@ func init() {
|
||||
Status: apicaps.CapStatusExperimental,
|
||||
})
|
||||
|
||||
Caps.Init(apicaps.Cap{
|
||||
ID: CapGatewayEvaluate,
|
||||
Name: "gateway evaluate",
|
||||
Enabled: true,
|
||||
Status: apicaps.CapStatusExperimental,
|
||||
})
|
||||
|
||||
Caps.Init(apicaps.Cap{
|
||||
ID: CapGatewayWarnings,
|
||||
Name: "logging warnings",
|
||||
Enabled: true,
|
||||
Status: apicaps.CapStatusExperimental,
|
||||
})
|
||||
|
||||
Caps.Init(apicaps.Cap{
|
||||
ID: CapAttestations,
|
||||
Name: "reference attestations",
|
||||
Enabled: true,
|
||||
Status: apicaps.CapStatusExperimental,
|
||||
})
|
||||
}
|
||||
|
1791
vendor/github.com/moby/buildkit/frontend/gateway/pb/gateway.pb.go
generated
vendored
1791
vendor/github.com/moby/buildkit/frontend/gateway/pb/gateway.pb.go
generated
vendored
File diff suppressed because it is too large
Load Diff
42
vendor/github.com/moby/buildkit/frontend/gateway/pb/gateway.proto
generated
vendored
42
vendor/github.com/moby/buildkit/frontend/gateway/pb/gateway.proto
generated
vendored
@ -25,6 +25,8 @@ service LLBBridge {
|
||||
rpc ReadDir(ReadDirRequest) returns (ReadDirResponse);
|
||||
// apicaps:CapStatFile
|
||||
rpc StatFile(StatFileRequest) returns (StatFileResponse);
|
||||
// apicaps:CapGatewayEvaluate
|
||||
rpc Evaluate(EvaluateRequest) returns (EvaluateResponse);
|
||||
rpc Ping(PingRequest) returns (PongResponse);
|
||||
rpc Return(ReturnRequest) returns (ReturnResponse);
|
||||
// apicaps:CapFrontendInputs
|
||||
@ -48,6 +50,7 @@ message Result {
|
||||
RefMap refs = 4;
|
||||
}
|
||||
map<string, bytes> metadata = 10;
|
||||
map<string, Attestations> attestations = 11;
|
||||
}
|
||||
|
||||
message RefMapDeprecated {
|
||||
@ -63,6 +66,38 @@ message RefMap {
|
||||
map<string, Ref> refs = 1;
|
||||
}
|
||||
|
||||
message Attestations {
|
||||
repeated Attestation attestation = 1;
|
||||
}
|
||||
|
||||
message Attestation {
|
||||
AttestationKind kind = 1;
|
||||
|
||||
string ref = 2;
|
||||
string path = 3;
|
||||
string inTotoPredicateType = 4;
|
||||
repeated InTotoSubject inTotoSubjects = 5;
|
||||
}
|
||||
|
||||
enum AttestationKind {
|
||||
option (gogoproto.goproto_enum_prefix) = false;
|
||||
InToto = 0 [(gogoproto.enumvalue_customname) = "AttestationKindInToto"];
|
||||
Bundle = 1 [(gogoproto.enumvalue_customname) = "AttestationKindBundle"];
|
||||
}
|
||||
|
||||
message InTotoSubject {
|
||||
InTotoSubjectKind kind = 1;
|
||||
|
||||
repeated string digest = 2 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false];
|
||||
string name = 3;
|
||||
}
|
||||
|
||||
enum InTotoSubjectKind {
|
||||
option (gogoproto.goproto_enum_prefix) = false;
|
||||
Self = 0 [(gogoproto.enumvalue_customname) = "InTotoSubjectKindSelf"];
|
||||
Raw = 1 [(gogoproto.enumvalue_customname) = "InTotoSubjectKindRaw"];
|
||||
}
|
||||
|
||||
message ReturnRequest {
|
||||
Result result = 1;
|
||||
google.rpc.Status error = 2;
|
||||
@ -163,6 +198,13 @@ message StatFileResponse {
|
||||
fsutil.types.Stat stat = 1;
|
||||
}
|
||||
|
||||
message EvaluateRequest {
|
||||
string Ref = 1;
|
||||
}
|
||||
|
||||
message EvaluateResponse {
|
||||
}
|
||||
|
||||
message PingRequest{
|
||||
}
|
||||
message PongResponse{
|
||||
|
2
vendor/github.com/moby/buildkit/session/auth/auth.go
generated
vendored
2
vendor/github.com/moby/buildkit/session/auth/auth.go
generated
vendored
@ -2,8 +2,8 @@ package auth
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/rand"
|
||||
"crypto/subtle"
|
||||
"math/rand"
|
||||
"sync"
|
||||
|
||||
"github.com/moby/buildkit/session"
|
||||
|
28
vendor/github.com/moby/buildkit/session/filesync/filesync.go
generated
vendored
28
vendor/github.com/moby/buildkit/session/filesync/filesync.go
generated
vendored
@ -27,27 +27,35 @@ const (
|
||||
)
|
||||
|
||||
type fsSyncProvider struct {
|
||||
dirs map[string]SyncedDir
|
||||
dirs DirSource
|
||||
p progressCb
|
||||
doneCh chan error
|
||||
}
|
||||
|
||||
type SyncedDir struct {
|
||||
Name string
|
||||
Dir string
|
||||
Excludes []string
|
||||
Map func(string, *fstypes.Stat) fsutil.MapResult
|
||||
}
|
||||
|
||||
type DirSource interface {
|
||||
LookupDir(string) (SyncedDir, bool)
|
||||
}
|
||||
|
||||
type StaticDirSource map[string]SyncedDir
|
||||
|
||||
var _ DirSource = StaticDirSource{}
|
||||
|
||||
func (dirs StaticDirSource) LookupDir(name string) (SyncedDir, bool) {
|
||||
dir, found := dirs[name]
|
||||
return dir, found
|
||||
}
|
||||
|
||||
// NewFSSyncProvider creates a new provider for sending files from client
|
||||
func NewFSSyncProvider(dirs []SyncedDir) session.Attachable {
|
||||
p := &fsSyncProvider{
|
||||
dirs: map[string]SyncedDir{},
|
||||
func NewFSSyncProvider(dirs DirSource) session.Attachable {
|
||||
return &fsSyncProvider{
|
||||
dirs: dirs,
|
||||
}
|
||||
for _, d := range dirs {
|
||||
p.dirs[d.Name] = d
|
||||
}
|
||||
return p
|
||||
}
|
||||
|
||||
func (sp *fsSyncProvider) Register(server *grpc.Server) {
|
||||
@ -81,7 +89,7 @@ func (sp *fsSyncProvider) handle(method string, stream grpc.ServerStream) (retEr
|
||||
dirName = name[0]
|
||||
}
|
||||
|
||||
dir, ok := sp.dirs[dirName]
|
||||
dir, ok := sp.dirs.LookupDir(dirName)
|
||||
if !ok {
|
||||
return InvalidSessionError{status.Errorf(codes.NotFound, "no access allowed to dir %q", dirName)}
|
||||
}
|
||||
|
44
vendor/github.com/moby/buildkit/session/grpc.go
generated
vendored
44
vendor/github.com/moby/buildkit/session/grpc.go
generated
vendored
@ -2,6 +2,7 @@ package session
|
||||
|
||||
import (
|
||||
"context"
|
||||
"math"
|
||||
"net"
|
||||
"sync/atomic"
|
||||
"time"
|
||||
@ -10,6 +11,7 @@ import (
|
||||
"github.com/moby/buildkit/util/bklog"
|
||||
"github.com/moby/buildkit/util/grpcerrors"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
"golang.org/x/net/http2"
|
||||
@ -79,21 +81,55 @@ func monitorHealth(ctx context.Context, cc *grpc.ClientConn, cancelConn func())
|
||||
defer cancelConn()
|
||||
defer cc.Close()
|
||||
|
||||
ticker := time.NewTicker(1 * time.Second)
|
||||
ticker := time.NewTicker(5 * time.Second)
|
||||
defer ticker.Stop()
|
||||
healthClient := grpc_health_v1.NewHealthClient(cc)
|
||||
|
||||
failedBefore := false
|
||||
consecutiveSuccessful := 0
|
||||
defaultHealthcheckDuration := 30 * time.Second
|
||||
lastHealthcheckDuration := time.Duration(0)
|
||||
|
||||
for {
|
||||
select {
|
||||
case <-ctx.Done():
|
||||
return
|
||||
case <-ticker.C:
|
||||
ctx, cancel := context.WithTimeout(ctx, 10*time.Second)
|
||||
// This healthcheck can erroneously fail in some instances, such as receiving lots of data in a low-bandwidth scenario or too many concurrent builds.
|
||||
// So, this healthcheck is purposely long, and can tolerate some failures on purpose.
|
||||
|
||||
healthcheckStart := time.Now()
|
||||
|
||||
timeout := time.Duration(math.Max(float64(defaultHealthcheckDuration), float64(lastHealthcheckDuration)*1.5))
|
||||
ctx, cancel := context.WithTimeout(ctx, timeout)
|
||||
_, err := healthClient.Check(ctx, &grpc_health_v1.HealthCheckRequest{})
|
||||
cancel()
|
||||
if err != nil {
|
||||
return
|
||||
|
||||
lastHealthcheckDuration = time.Since(healthcheckStart)
|
||||
logFields := logrus.Fields{
|
||||
"timeout": timeout,
|
||||
"actualDuration": lastHealthcheckDuration,
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
if failedBefore {
|
||||
bklog.G(ctx).Error("healthcheck failed fatally")
|
||||
return
|
||||
}
|
||||
|
||||
failedBefore = true
|
||||
consecutiveSuccessful = 0
|
||||
bklog.G(ctx).WithFields(logFields).Warn("healthcheck failed")
|
||||
} else {
|
||||
consecutiveSuccessful++
|
||||
|
||||
if consecutiveSuccessful >= 5 && failedBefore {
|
||||
failedBefore = false
|
||||
bklog.G(ctx).WithFields(logFields).Debug("reset healthcheck failure")
|
||||
}
|
||||
}
|
||||
|
||||
bklog.G(ctx).WithFields(logFields).Debug("healthcheck completed")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
1
vendor/github.com/moby/buildkit/solver/errdefs/errdefs.pb.go
generated
vendored
1
vendor/github.com/moby/buildkit/solver/errdefs/errdefs.pb.go
generated
vendored
@ -186,6 +186,7 @@ type Solve struct {
|
||||
MountIDs []string `protobuf:"bytes,2,rep,name=mountIDs,proto3" json:"mountIDs,omitempty"`
|
||||
Op *pb.Op `protobuf:"bytes,3,opt,name=op,proto3" json:"op,omitempty"`
|
||||
// Types that are valid to be assigned to Subject:
|
||||
//
|
||||
// *Solve_File
|
||||
// *Solve_Cache
|
||||
Subject isSolve_Subject `protobuf_oneof:"subject"`
|
||||
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user