mirror of
https://gitea.com/Lydanne/buildx.git
synced 2025-07-16 16:37:10 +08:00
vendor: update buildkit to master@ae9d0f5
Signed-off-by: Justin Chadwell <me@jedevc.com>
This commit is contained in:
86
vendor/github.com/docker/docker/api/swagger.yaml
generated
vendored
86
vendor/github.com/docker/docker/api/swagger.yaml
generated
vendored
@ -24,7 +24,7 @@ info:
|
||||
title: "Docker Engine API"
|
||||
version: "1.42"
|
||||
x-logo:
|
||||
url: "https://docs.docker.com/images/logo-docker-main.png"
|
||||
url: "https://docs.docker.com/assets/images/logo-docker-main.png"
|
||||
description: |
|
||||
The Engine API is an HTTP API served by Docker Engine. It is the API the
|
||||
Docker client uses to communicate with the Engine, so everything the Docker
|
||||
@ -214,12 +214,14 @@ definitions:
|
||||
- `volume` a docker volume with the given `Name`.
|
||||
- `tmpfs` a `tmpfs`.
|
||||
- `npipe` a named pipe from the host into the container.
|
||||
- `cluster` a Swarm cluster volume
|
||||
type: "string"
|
||||
enum:
|
||||
- "bind"
|
||||
- "volume"
|
||||
- "tmpfs"
|
||||
- "npipe"
|
||||
- "cluster"
|
||||
example: "volume"
|
||||
Name:
|
||||
description: |
|
||||
@ -350,12 +352,14 @@ definitions:
|
||||
- `volume` Creates a volume with the given name and options (or uses a pre-existing volume with the same name and options). These are **not** removed when the container is removed.
|
||||
- `tmpfs` Create a tmpfs with the given options. The mount source cannot be specified for tmpfs.
|
||||
- `npipe` Mounts a named pipe from the host into the container. Must exist prior to creating the container.
|
||||
- `cluster` a Swarm cluster volume
|
||||
type: "string"
|
||||
enum:
|
||||
- "bind"
|
||||
- "volume"
|
||||
- "tmpfs"
|
||||
- "npipe"
|
||||
- "cluster"
|
||||
ReadOnly:
|
||||
description: "Whether the mount should be read-only."
|
||||
type: "boolean"
|
||||
@ -2247,23 +2251,63 @@ definitions:
|
||||
|
||||
BuildCache:
|
||||
type: "object"
|
||||
description: |
|
||||
BuildCache contains information about a build cache record.
|
||||
properties:
|
||||
ID:
|
||||
type: "string"
|
||||
description: |
|
||||
Unique ID of the build cache record.
|
||||
example: "ndlpt0hhvkqcdfkputsk4cq9c"
|
||||
Parent:
|
||||
description: |
|
||||
ID of the parent build cache record.
|
||||
|
||||
> **Deprecated**: This field is deprecated, and omitted if empty.
|
||||
type: "string"
|
||||
x-nullable: true
|
||||
example: ""
|
||||
Parents:
|
||||
description: |
|
||||
List of parent build cache record IDs.
|
||||
type: "array"
|
||||
items:
|
||||
type: "string"
|
||||
x-nullable: true
|
||||
example: ["hw53o5aio51xtltp5xjp8v7fx"]
|
||||
Type:
|
||||
type: "string"
|
||||
description: |
|
||||
Cache record type.
|
||||
example: "regular"
|
||||
# see https://github.com/moby/buildkit/blob/fce4a32258dc9d9664f71a4831d5de10f0670677/client/diskusage.go#L75-L84
|
||||
enum:
|
||||
- "internal"
|
||||
- "frontend"
|
||||
- "source.local"
|
||||
- "source.git.checkout"
|
||||
- "exec.cachemount"
|
||||
- "regular"
|
||||
Description:
|
||||
type: "string"
|
||||
description: |
|
||||
Description of the build-step that produced the build cache.
|
||||
example: "mount / from exec /bin/sh -c echo 'Binary::apt::APT::Keep-Downloaded-Packages \"true\";' > /etc/apt/apt.conf.d/keep-cache"
|
||||
InUse:
|
||||
type: "boolean"
|
||||
description: |
|
||||
Indicates if the build cache is in use.
|
||||
example: false
|
||||
Shared:
|
||||
type: "boolean"
|
||||
description: |
|
||||
Indicates if the build cache is shared.
|
||||
example: true
|
||||
Size:
|
||||
description: |
|
||||
Amount of disk space used by the build cache (in bytes).
|
||||
type: "integer"
|
||||
example: 51
|
||||
CreatedAt:
|
||||
description: |
|
||||
Date and time at which the build cache was created in
|
||||
@ -2281,6 +2325,7 @@ definitions:
|
||||
example: "2017-08-09T07:09:37.632105588Z"
|
||||
UsageCount:
|
||||
type: "integer"
|
||||
example: 26
|
||||
|
||||
ImageID:
|
||||
type: "object"
|
||||
@ -6210,6 +6255,28 @@ paths:
|
||||
`/?[a-zA-Z0-9][a-zA-Z0-9_.-]+`.
|
||||
type: "string"
|
||||
pattern: "^/?[a-zA-Z0-9][a-zA-Z0-9_.-]+$"
|
||||
- name: "platform"
|
||||
in: "query"
|
||||
description: |
|
||||
Platform in the format `os[/arch[/variant]]` used for image lookup.
|
||||
|
||||
When specified, the daemon checks if the requested image is present
|
||||
in the local image cache with the given OS and Architecture, and
|
||||
otherwise returns a `404` status.
|
||||
|
||||
If the option is not set, the host's native OS and Architecture are
|
||||
used to look up the image in the image cache. However, if no platform
|
||||
is passed and the given image does exist in the local image cache,
|
||||
but its OS or architecture does not match, the container is created
|
||||
with the available image, and a warning is added to the `Warnings`
|
||||
field in the response, for example;
|
||||
|
||||
WARNING: The requested image's platform (linux/arm64/v8) does not
|
||||
match the detected host platform (linux/amd64) and no
|
||||
specific platform was requested
|
||||
|
||||
type: "string"
|
||||
default: ""
|
||||
- name: "body"
|
||||
in: "body"
|
||||
description: "Container to create"
|
||||
@ -8719,7 +8786,17 @@ paths:
|
||||
description: "Max API Version the server supports"
|
||||
Builder-Version:
|
||||
type: "string"
|
||||
description: "Default version of docker image builder"
|
||||
description: |
|
||||
Default version of docker image builder
|
||||
|
||||
The default on Linux is version "2" (BuildKit), but the daemon
|
||||
can be configured to recommend version "1" (classic Builder).
|
||||
Windows does not yet support BuildKit for native Windows images,
|
||||
and uses "1" (classic builder) as a default.
|
||||
|
||||
This value is a recommendation as advertised by the daemon, and
|
||||
it is up to the client to choose which builder to use.
|
||||
default: "2"
|
||||
Docker-Experimental:
|
||||
type: "boolean"
|
||||
description: "If the server is running with experimental mode enabled"
|
||||
@ -9013,7 +9090,7 @@ paths:
|
||||
BuildCache:
|
||||
-
|
||||
ID: "hw53o5aio51xtltp5xjp8v7fx"
|
||||
Parent: ""
|
||||
Parents: []
|
||||
Type: "regular"
|
||||
Description: "pulled from docker.io/library/debian@sha256:234cb88d3020898631af0ccbbcca9a66ae7306ecd30c9720690858c1b007d2a0"
|
||||
InUse: false
|
||||
@ -9024,7 +9101,7 @@ paths:
|
||||
UsageCount: 26
|
||||
-
|
||||
ID: "ndlpt0hhvkqcdfkputsk4cq9c"
|
||||
Parent: "hw53o5aio51xtltp5xjp8v7fx"
|
||||
Parents: ["ndlpt0hhvkqcdfkputsk4cq9c"]
|
||||
Type: "regular"
|
||||
Description: "mount / from exec /bin/sh -c echo 'Binary::apt::APT::Keep-Downloaded-Packages \"true\";' > /etc/apt/apt.conf.d/keep-cache"
|
||||
InUse: false
|
||||
@ -9622,6 +9699,7 @@ paths:
|
||||
|
||||
Available filters:
|
||||
- `label` (`label=<key>`, `label=<key>=<value>`, `label!=<key>`, or `label!=<key>=<value>`) Prune volumes with (or without, in case `label!=...` is used) the specified labels.
|
||||
- `all` (`all=true`) - Consider all (local) volumes for pruning and not just anonymous volumes.
|
||||
type: "string"
|
||||
responses:
|
||||
200:
|
||||
|
2
vendor/github.com/docker/docker/api/types/mount/mount.go
generated
vendored
2
vendor/github.com/docker/docker/api/types/mount/mount.go
generated
vendored
@ -18,7 +18,7 @@ const (
|
||||
// TypeNamedPipe is the type for mounting Windows named pipes
|
||||
TypeNamedPipe Type = "npipe"
|
||||
// TypeCluster is the type for Swarm Cluster Volumes.
|
||||
TypeCluster = "csi"
|
||||
TypeCluster Type = "cluster"
|
||||
)
|
||||
|
||||
// Mount represents a mount (volume).
|
||||
|
33
vendor/github.com/docker/docker/api/types/types.go
generated
vendored
33
vendor/github.com/docker/docker/api/types/types.go
generated
vendored
@ -774,18 +774,31 @@ type BuildResult struct {
|
||||
ID string
|
||||
}
|
||||
|
||||
// BuildCache contains information about a build cache record
|
||||
// BuildCache contains information about a build cache record.
|
||||
type BuildCache struct {
|
||||
ID string
|
||||
Parent string
|
||||
Type string
|
||||
// ID is the unique ID of the build cache record.
|
||||
ID string
|
||||
// Parent is the ID of the parent build cache record.
|
||||
//
|
||||
// Deprecated: deprecated in API v1.42 and up, as it was deprecated in BuildKit; use Parents instead.
|
||||
Parent string `json:"Parent,omitempty"`
|
||||
// Parents is the list of parent build cache record IDs.
|
||||
Parents []string `json:" Parents,omitempty"`
|
||||
// Type is the cache record type.
|
||||
Type string
|
||||
// Description is a description of the build-step that produced the build cache.
|
||||
Description string
|
||||
InUse bool
|
||||
Shared bool
|
||||
Size int64
|
||||
CreatedAt time.Time
|
||||
LastUsedAt *time.Time
|
||||
UsageCount int
|
||||
// InUse indicates if the build cache is in use.
|
||||
InUse bool
|
||||
// Shared indicates if the build cache is shared.
|
||||
Shared bool
|
||||
// Size is the amount of disk space used by the build cache (in bytes).
|
||||
Size int64
|
||||
// CreatedAt is the date and time at which the build cache was created.
|
||||
CreatedAt time.Time
|
||||
// LastUsedAt is the date and time at which the build cache was last used.
|
||||
LastUsedAt *time.Time
|
||||
UsageCount int
|
||||
}
|
||||
|
||||
// BuildCachePruneOptions hold parameters to prune the build cache
|
||||
|
4
vendor/github.com/docker/docker/api/types/volume/cluster_volume.go
generated
vendored
4
vendor/github.com/docker/docker/api/types/volume/cluster_volume.go
generated
vendored
@ -104,7 +104,7 @@ type AccessMode struct {
|
||||
BlockVolume *TypeBlock `json:",omitempty"`
|
||||
}
|
||||
|
||||
// Scope defines the Scope of a CSI Volume. This is how many nodes a
|
||||
// Scope defines the Scope of a Cluster Volume. This is how many nodes a
|
||||
// Volume can be accessed simultaneously on.
|
||||
type Scope string
|
||||
|
||||
@ -118,7 +118,7 @@ const (
|
||||
ScopeMultiNode Scope = "multi"
|
||||
)
|
||||
|
||||
// SharingMode defines the Sharing of a CSI Volume. This is how Tasks using a
|
||||
// SharingMode defines the Sharing of a Cluster Volume. This is how Tasks using a
|
||||
// Volume at the same time can use it.
|
||||
type SharingMode string
|
||||
|
||||
|
28
vendor/github.com/docker/docker/client/container_create.go
generated
vendored
28
vendor/github.com/docker/docker/client/container_create.go
generated
vendored
@ -26,23 +26,25 @@ func (cli *Client) ContainerCreate(ctx context.Context, config *container.Config
|
||||
if err := cli.NewVersionError("1.25", "stop timeout"); config != nil && config.StopTimeout != nil && err != nil {
|
||||
return response, err
|
||||
}
|
||||
|
||||
clientVersion := cli.ClientVersion()
|
||||
|
||||
// When using API 1.24 and under, the client is responsible for removing the container
|
||||
if hostConfig != nil && versions.LessThan(clientVersion, "1.25") {
|
||||
hostConfig.AutoRemove = false
|
||||
}
|
||||
|
||||
// When using API under 1.42, the Linux daemon doesn't respect the ConsoleSize
|
||||
if hostConfig != nil && platform != nil && platform.OS == "linux" && versions.LessThan(clientVersion, "1.42") {
|
||||
hostConfig.ConsoleSize = [2]uint{0, 0}
|
||||
}
|
||||
|
||||
if err := cli.NewVersionError("1.41", "specify container image platform"); platform != nil && err != nil {
|
||||
return response, err
|
||||
}
|
||||
|
||||
if hostConfig != nil {
|
||||
if versions.LessThan(cli.ClientVersion(), "1.25") {
|
||||
// When using API 1.24 and under, the client is responsible for removing the container
|
||||
hostConfig.AutoRemove = false
|
||||
}
|
||||
if versions.GreaterThanOrEqualTo(cli.ClientVersion(), "1.42") || versions.LessThan(cli.ClientVersion(), "1.40") {
|
||||
// KernelMemory was added in API 1.40, and deprecated in API 1.42
|
||||
hostConfig.KernelMemory = 0
|
||||
}
|
||||
if platform != nil && platform.OS == "linux" && versions.LessThan(cli.ClientVersion(), "1.42") {
|
||||
// When using API under 1.42, the Linux daemon doesn't respect the ConsoleSize
|
||||
hostConfig.ConsoleSize = [2]uint{0, 0}
|
||||
}
|
||||
}
|
||||
|
||||
query := url.Values{}
|
||||
if p := formatPlatform(platform); p != "" {
|
||||
query.Set("platform", p)
|
||||
|
1
vendor/github.com/docker/docker/client/events.go
generated
vendored
1
vendor/github.com/docker/docker/client/events.go
generated
vendored
@ -17,7 +17,6 @@ import (
|
||||
// be sent over the error channel. If an error is sent all processing will be stopped. It's up
|
||||
// to the caller to reopen the stream in the event of an error by reinvoking this method.
|
||||
func (cli *Client) Events(ctx context.Context, options types.EventsOptions) (<-chan events.Message, <-chan error) {
|
||||
|
||||
messages := make(chan events.Message)
|
||||
errs := make(chan error, 1)
|
||||
|
||||
|
7
vendor/github.com/docker/docker/client/options.go
generated
vendored
7
vendor/github.com/docker/docker/client/options.go
generated
vendored
@ -44,13 +44,6 @@ func FromEnv(c *Client) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// WithDialer applies the dialer.DialContext to the client transport. This can be
|
||||
// used to set the Timeout and KeepAlive settings of the client.
|
||||
// Deprecated: use WithDialContext
|
||||
func WithDialer(dialer *net.Dialer) Opt {
|
||||
return WithDialContext(dialer.DialContext)
|
||||
}
|
||||
|
||||
// WithDialContext applies the dialer to the client transport. This can be
|
||||
// used to set the Timeout and KeepAlive settings of the client.
|
||||
func WithDialContext(dialContext func(ctx context.Context, network, addr string) (net.Conn, error)) Opt {
|
||||
|
87
vendor/github.com/docker/docker/pkg/archive/archive.go
generated
vendored
87
vendor/github.com/docker/docker/pkg/archive/archive.go
generated
vendored
@ -19,17 +19,30 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/containerd/containerd/pkg/userns"
|
||||
"github.com/docker/docker/pkg/fileutils"
|
||||
"github.com/docker/docker/pkg/idtools"
|
||||
"github.com/docker/docker/pkg/ioutils"
|
||||
"github.com/docker/docker/pkg/pools"
|
||||
"github.com/docker/docker/pkg/system"
|
||||
"github.com/klauspost/compress/zstd"
|
||||
"github.com/moby/patternmatcher"
|
||||
"github.com/moby/sys/sequential"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
exec "golang.org/x/sys/execabs"
|
||||
)
|
||||
|
||||
// ImpliedDirectoryMode represents the mode (Unix permissions) applied to directories that are implied by files in a
|
||||
// tar, but that do not have their own header entry.
|
||||
//
|
||||
// The permissions mask is stored in a constant instead of locally to ensure that magic numbers do not
|
||||
// proliferate in the codebase. The default value 0755 has been selected based on the default umask of 0022, and
|
||||
// a convention of mkdir(1) calling mkdir(2) with permissions of 0777, resulting in a final value of 0755.
|
||||
//
|
||||
// This value is currently implementation-defined, and not captured in any cross-runtime specification. Thus, it is
|
||||
// subject to change in Moby at any time -- image authors who require consistent or known directory permissions
|
||||
// should explicitly control them by ensuring that header entries exist for any applicable path.
|
||||
const ImpliedDirectoryMode = 0755
|
||||
|
||||
type (
|
||||
// Compression is the state represents if compressed or not.
|
||||
Compression int
|
||||
@ -382,7 +395,6 @@ func ReplaceFileTarWrapper(inputTarStream io.ReadCloser, mods map[string]TarModi
|
||||
}
|
||||
|
||||
pipeWriter.Close()
|
||||
|
||||
}()
|
||||
return pipeReader
|
||||
}
|
||||
@ -660,10 +672,9 @@ func (ta *tarAppender) addTarFile(path, name string) error {
|
||||
}
|
||||
|
||||
if hdr.Typeflag == tar.TypeReg && hdr.Size > 0 {
|
||||
// We use system.OpenSequential to ensure we use sequential file
|
||||
// access on Windows to avoid depleting the standby list.
|
||||
// On Linux, this equates to a regular os.Open.
|
||||
file, err := system.OpenSequential(path)
|
||||
// We use sequential file access to avoid depleting the standby list on
|
||||
// Windows. On Linux, this equates to a regular os.Open.
|
||||
file, err := sequential.Open(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -701,10 +712,9 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L
|
||||
}
|
||||
|
||||
case tar.TypeReg, tar.TypeRegA:
|
||||
// Source is regular file. We use system.OpenFileSequential to use sequential
|
||||
// file access to avoid depleting the standby list on Windows.
|
||||
// On Linux, this equates to a regular os.OpenFile
|
||||
file, err := system.OpenFileSequential(path, os.O_CREATE|os.O_WRONLY, hdrInfo.Mode())
|
||||
// Source is regular file. We use sequential file access to avoid depleting
|
||||
// the standby list on Windows. On Linux, this equates to a regular os.OpenFile.
|
||||
file, err := sequential.OpenFile(path, os.O_CREATE|os.O_WRONLY, hdrInfo.Mode())
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -791,7 +801,6 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
if len(errors) > 0 {
|
||||
@ -841,12 +850,11 @@ func Tar(path string, compression Compression) (io.ReadCloser, error) {
|
||||
// TarWithOptions creates an archive from the directory at `path`, only including files whose relative
|
||||
// paths are included in `options.IncludeFiles` (if non-nil) or not in `options.ExcludePatterns`.
|
||||
func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error) {
|
||||
|
||||
// Fix the source path to work with long path names. This is a no-op
|
||||
// on platforms other than Windows.
|
||||
srcPath = fixVolumePathPrefix(srcPath)
|
||||
|
||||
pm, err := fileutils.NewPatternMatcher(options.ExcludePatterns)
|
||||
pm, err := patternmatcher.New(options.ExcludePatterns)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -921,7 +929,7 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error)
|
||||
rebaseName := options.RebaseNames[include]
|
||||
|
||||
var (
|
||||
parentMatchInfo []fileutils.MatchInfo
|
||||
parentMatchInfo []patternmatcher.MatchInfo
|
||||
parentDirs []string
|
||||
)
|
||||
|
||||
@ -960,11 +968,11 @@ func TarWithOptions(srcPath string, options *TarOptions) (io.ReadCloser, error)
|
||||
parentMatchInfo = parentMatchInfo[:len(parentMatchInfo)-1]
|
||||
}
|
||||
|
||||
var matchInfo fileutils.MatchInfo
|
||||
var matchInfo patternmatcher.MatchInfo
|
||||
if len(parentMatchInfo) != 0 {
|
||||
skip, matchInfo, err = pm.MatchesUsingParentResults(relFilePath, parentMatchInfo[len(parentMatchInfo)-1])
|
||||
} else {
|
||||
skip, matchInfo, err = pm.MatchesUsingParentResults(relFilePath, fileutils.MatchInfo{})
|
||||
skip, matchInfo, err = pm.MatchesUsingParentResults(relFilePath, patternmatcher.MatchInfo{})
|
||||
}
|
||||
if err != nil {
|
||||
logrus.Errorf("Error matching %s: %v", relFilePath, err)
|
||||
@ -1049,7 +1057,6 @@ func Unpack(decompressedArchive io.Reader, dest string, options *TarOptions) err
|
||||
defer pools.BufioReader32KPool.Put(trBuf)
|
||||
|
||||
var dirs []*tar.Header
|
||||
rootIDs := options.IDMap.RootPair()
|
||||
whiteoutConverter, err := getWhiteoutConverter(options.WhiteoutFormat, options.InUserNS)
|
||||
if err != nil {
|
||||
return err
|
||||
@ -1084,19 +1091,10 @@ loop:
|
||||
}
|
||||
}
|
||||
|
||||
// After calling filepath.Clean(hdr.Name) above, hdr.Name will now be in
|
||||
// the filepath format for the OS on which the daemon is running. Hence
|
||||
// the check for a slash-suffix MUST be done in an OS-agnostic way.
|
||||
if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) {
|
||||
// Not the root directory, ensure that the parent directory exists
|
||||
parent := filepath.Dir(hdr.Name)
|
||||
parentPath := filepath.Join(dest, parent)
|
||||
if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) {
|
||||
err = idtools.MkdirAllAndChownNew(parentPath, 0755, rootIDs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
// Ensure that the parent directory exists.
|
||||
err = createImpliedDirectories(dest, hdr, options)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// #nosec G305 -- The joined path is checked for path traversal.
|
||||
@ -1174,6 +1172,35 @@ loop:
|
||||
return nil
|
||||
}
|
||||
|
||||
// createImpliedDirectories will create all parent directories of the current path with default permissions, if they do
|
||||
// not already exist. This is possible as the tar format supports 'implicit' directories, where their existence is
|
||||
// defined by the paths of files in the tar, but there are no header entries for the directories themselves, and thus
|
||||
// we most both create them and choose metadata like permissions.
|
||||
//
|
||||
// The caller should have performed filepath.Clean(hdr.Name), so hdr.Name will now be in the filepath format for the OS
|
||||
// on which the daemon is running. This precondition is required because this function assumes a OS-specific path
|
||||
// separator when checking that a path is not the root.
|
||||
func createImpliedDirectories(dest string, hdr *tar.Header, options *TarOptions) error {
|
||||
// Not the root directory, ensure that the parent directory exists
|
||||
if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) {
|
||||
parent := filepath.Dir(hdr.Name)
|
||||
parentPath := filepath.Join(dest, parent)
|
||||
if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) {
|
||||
// RootPair() is confined inside this loop as most cases will not require a call, so we can spend some
|
||||
// unneeded function calls in the uncommon case to encapsulate logic -- implied directories are a niche
|
||||
// usage that reduces the portability of an image.
|
||||
rootIDs := options.IDMap.RootPair()
|
||||
|
||||
err = idtools.MkdirAllAndChownNew(parentPath, ImpliedDirectoryMode, rootIDs)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Untar reads a stream of bytes from `archive`, parses it as a tar archive,
|
||||
// and unpacks it into the directory at `dest`.
|
||||
// The archive may be compressed with one of the following algorithms:
|
||||
|
2
vendor/github.com/docker/docker/pkg/archive/changes.go
generated
vendored
2
vendor/github.com/docker/docker/pkg/archive/changes.go
generated
vendored
@ -246,7 +246,6 @@ func (info *FileInfo) path() string {
|
||||
}
|
||||
|
||||
func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) {
|
||||
|
||||
sizeAtEntry := len(*changes)
|
||||
|
||||
if oldInfo == nil {
|
||||
@ -319,7 +318,6 @@ func (info *FileInfo) addChanges(oldInfo *FileInfo, changes *[]Change) {
|
||||
copy((*changes)[sizeAtEntry+1:], (*changes)[sizeAtEntry:])
|
||||
(*changes)[sizeAtEntry] = change
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// Changes add changes to file information.
|
||||
|
1
vendor/github.com/docker/docker/pkg/archive/copy.go
generated
vendored
1
vendor/github.com/docker/docker/pkg/archive/copy.go
generated
vendored
@ -303,7 +303,6 @@ func PrepareArchiveCopy(srcContent io.Reader, srcInfo, dstInfo CopyInfo) (dstDir
|
||||
}
|
||||
return dstDir, RebaseArchiveEntries(srcContent, srcBase, dstBase), nil
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// RebaseArchiveEntries rewrites the given srcContent archive replacing
|
||||
|
18
vendor/github.com/docker/docker/pkg/archive/diff.go
generated
vendored
18
vendor/github.com/docker/docker/pkg/archive/diff.go
generated
vendored
@ -72,20 +72,10 @@ func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64,
|
||||
}
|
||||
}
|
||||
|
||||
// Note as these operations are platform specific, so must the slash be.
|
||||
if !strings.HasSuffix(hdr.Name, string(os.PathSeparator)) {
|
||||
// Not the root directory, ensure that the parent directory exists.
|
||||
// This happened in some tests where an image had a tarfile without any
|
||||
// parent directories.
|
||||
parent := filepath.Dir(hdr.Name)
|
||||
parentPath := filepath.Join(dest, parent)
|
||||
|
||||
if _, err := os.Lstat(parentPath); err != nil && os.IsNotExist(err) {
|
||||
err = system.MkdirAll(parentPath, 0600)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
}
|
||||
// Ensure that the parent directory exists.
|
||||
err = createImpliedDirectories(dest, hdr, options)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
|
||||
// Skip AUFS metadata dirs
|
||||
|
542
vendor/github.com/docker/docker/pkg/fileutils/fileutils.go
generated
vendored
542
vendor/github.com/docker/docker/pkg/fileutils/fileutils.go
generated
vendored
@ -1,542 +0,0 @@
|
||||
package fileutils // import "github.com/docker/docker/pkg/fileutils"
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strings"
|
||||
"text/scanner"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
// escapeBytes is a bitmap used to check whether a character should be escaped when creating the regex.
|
||||
var escapeBytes [8]byte
|
||||
|
||||
// shouldEscape reports whether a rune should be escaped as part of the regex.
|
||||
//
|
||||
// This only includes characters that require escaping in regex but are also NOT valid filepath pattern characters.
|
||||
// Additionally, '\' is not excluded because there is specific logic to properly handle this, as it's a path separator
|
||||
// on Windows.
|
||||
//
|
||||
// Adapted from regexp::QuoteMeta in go stdlib.
|
||||
// See https://cs.opensource.google/go/go/+/refs/tags/go1.17.2:src/regexp/regexp.go;l=703-715;drc=refs%2Ftags%2Fgo1.17.2
|
||||
func shouldEscape(b rune) bool {
|
||||
return b < utf8.RuneSelf && escapeBytes[b%8]&(1<<(b/8)) != 0
|
||||
}
|
||||
|
||||
func init() {
|
||||
for _, b := range []byte(`.+()|{}$`) {
|
||||
escapeBytes[b%8] |= 1 << (b / 8)
|
||||
}
|
||||
}
|
||||
|
||||
// PatternMatcher allows checking paths against a list of patterns
|
||||
type PatternMatcher struct {
|
||||
patterns []*Pattern
|
||||
exclusions bool
|
||||
}
|
||||
|
||||
// NewPatternMatcher creates a new matcher object for specific patterns that can
|
||||
// be used later to match against patterns against paths
|
||||
func NewPatternMatcher(patterns []string) (*PatternMatcher, error) {
|
||||
pm := &PatternMatcher{
|
||||
patterns: make([]*Pattern, 0, len(patterns)),
|
||||
}
|
||||
for _, p := range patterns {
|
||||
// Eliminate leading and trailing whitespace.
|
||||
p = strings.TrimSpace(p)
|
||||
if p == "" {
|
||||
continue
|
||||
}
|
||||
p = filepath.Clean(p)
|
||||
newp := &Pattern{}
|
||||
if p[0] == '!' {
|
||||
if len(p) == 1 {
|
||||
return nil, errors.New("illegal exclusion pattern: \"!\"")
|
||||
}
|
||||
newp.exclusion = true
|
||||
p = p[1:]
|
||||
pm.exclusions = true
|
||||
}
|
||||
// Do some syntax checking on the pattern.
|
||||
// filepath's Match() has some really weird rules that are inconsistent
|
||||
// so instead of trying to dup their logic, just call Match() for its
|
||||
// error state and if there is an error in the pattern return it.
|
||||
// If this becomes an issue we can remove this since its really only
|
||||
// needed in the error (syntax) case - which isn't really critical.
|
||||
if _, err := filepath.Match(p, "."); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
newp.cleanedPattern = p
|
||||
newp.dirs = strings.Split(p, string(os.PathSeparator))
|
||||
pm.patterns = append(pm.patterns, newp)
|
||||
}
|
||||
return pm, nil
|
||||
}
|
||||
|
||||
// Matches returns true if "file" matches any of the patterns
|
||||
// and isn't excluded by any of the subsequent patterns.
|
||||
//
|
||||
// The "file" argument should be a slash-delimited path.
|
||||
//
|
||||
// Matches is not safe to call concurrently.
|
||||
//
|
||||
// Deprecated: This implementation is buggy (it only checks a single parent dir
|
||||
// against the pattern) and will be removed soon. Use either
|
||||
// MatchesOrParentMatches or MatchesUsingParentResults instead.
|
||||
func (pm *PatternMatcher) Matches(file string) (bool, error) {
|
||||
matched := false
|
||||
file = filepath.FromSlash(file)
|
||||
parentPath := filepath.Dir(file)
|
||||
parentPathDirs := strings.Split(parentPath, string(os.PathSeparator))
|
||||
|
||||
for _, pattern := range pm.patterns {
|
||||
// Skip evaluation if this is an inclusion and the filename
|
||||
// already matched the pattern, or it's an exclusion and it has
|
||||
// not matched the pattern yet.
|
||||
if pattern.exclusion != matched {
|
||||
continue
|
||||
}
|
||||
|
||||
match, err := pattern.match(file)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if !match && parentPath != "." {
|
||||
// Check to see if the pattern matches one of our parent dirs.
|
||||
if len(pattern.dirs) <= len(parentPathDirs) {
|
||||
match, _ = pattern.match(strings.Join(parentPathDirs[:len(pattern.dirs)], string(os.PathSeparator)))
|
||||
}
|
||||
}
|
||||
|
||||
if match {
|
||||
matched = !pattern.exclusion
|
||||
}
|
||||
}
|
||||
|
||||
return matched, nil
|
||||
}
|
||||
|
||||
// MatchesOrParentMatches returns true if "file" matches any of the patterns
|
||||
// and isn't excluded by any of the subsequent patterns.
|
||||
//
|
||||
// The "file" argument should be a slash-delimited path.
|
||||
//
|
||||
// Matches is not safe to call concurrently.
|
||||
func (pm *PatternMatcher) MatchesOrParentMatches(file string) (bool, error) {
|
||||
matched := false
|
||||
file = filepath.FromSlash(file)
|
||||
parentPath := filepath.Dir(file)
|
||||
parentPathDirs := strings.Split(parentPath, string(os.PathSeparator))
|
||||
|
||||
for _, pattern := range pm.patterns {
|
||||
// Skip evaluation if this is an inclusion and the filename
|
||||
// already matched the pattern, or it's an exclusion and it has
|
||||
// not matched the pattern yet.
|
||||
if pattern.exclusion != matched {
|
||||
continue
|
||||
}
|
||||
|
||||
match, err := pattern.match(file)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if !match && parentPath != "." {
|
||||
// Check to see if the pattern matches one of our parent dirs.
|
||||
for i := range parentPathDirs {
|
||||
match, _ = pattern.match(strings.Join(parentPathDirs[:i+1], string(os.PathSeparator)))
|
||||
if match {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if match {
|
||||
matched = !pattern.exclusion
|
||||
}
|
||||
}
|
||||
|
||||
return matched, nil
|
||||
}
|
||||
|
||||
// MatchesUsingParentResult returns true if "file" matches any of the patterns
|
||||
// and isn't excluded by any of the subsequent patterns. The functionality is
|
||||
// the same as Matches, but as an optimization, the caller keeps track of
|
||||
// whether the parent directory matched.
|
||||
//
|
||||
// The "file" argument should be a slash-delimited path.
|
||||
//
|
||||
// MatchesUsingParentResult is not safe to call concurrently.
|
||||
//
|
||||
// Deprecated: this function does behave correctly in some cases (see
|
||||
// https://github.com/docker/buildx/issues/850).
|
||||
//
|
||||
// Use MatchesUsingParentResults instead.
|
||||
func (pm *PatternMatcher) MatchesUsingParentResult(file string, parentMatched bool) (bool, error) {
|
||||
matched := parentMatched
|
||||
file = filepath.FromSlash(file)
|
||||
|
||||
for _, pattern := range pm.patterns {
|
||||
// Skip evaluation if this is an inclusion and the filename
|
||||
// already matched the pattern, or it's an exclusion and it has
|
||||
// not matched the pattern yet.
|
||||
if pattern.exclusion != matched {
|
||||
continue
|
||||
}
|
||||
|
||||
match, err := pattern.match(file)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
if match {
|
||||
matched = !pattern.exclusion
|
||||
}
|
||||
}
|
||||
return matched, nil
|
||||
}
|
||||
|
||||
// MatchInfo tracks information about parent dir matches while traversing a
|
||||
// filesystem.
|
||||
type MatchInfo struct {
|
||||
parentMatched []bool
|
||||
}
|
||||
|
||||
// MatchesUsingParentResults returns true if "file" matches any of the patterns
|
||||
// and isn't excluded by any of the subsequent patterns. The functionality is
|
||||
// the same as Matches, but as an optimization, the caller passes in
|
||||
// intermediate results from matching the parent directory.
|
||||
//
|
||||
// The "file" argument should be a slash-delimited path.
|
||||
//
|
||||
// MatchesUsingParentResults is not safe to call concurrently.
|
||||
func (pm *PatternMatcher) MatchesUsingParentResults(file string, parentMatchInfo MatchInfo) (bool, MatchInfo, error) {
|
||||
parentMatched := parentMatchInfo.parentMatched
|
||||
if len(parentMatched) != 0 && len(parentMatched) != len(pm.patterns) {
|
||||
return false, MatchInfo{}, errors.New("wrong number of values in parentMatched")
|
||||
}
|
||||
|
||||
file = filepath.FromSlash(file)
|
||||
matched := false
|
||||
|
||||
matchInfo := MatchInfo{
|
||||
parentMatched: make([]bool, len(pm.patterns)),
|
||||
}
|
||||
for i, pattern := range pm.patterns {
|
||||
match := false
|
||||
// If the parent matched this pattern, we don't need to recheck.
|
||||
if len(parentMatched) != 0 {
|
||||
match = parentMatched[i]
|
||||
}
|
||||
|
||||
if !match {
|
||||
// Skip evaluation if this is an inclusion and the filename
|
||||
// already matched the pattern, or it's an exclusion and it has
|
||||
// not matched the pattern yet.
|
||||
if pattern.exclusion != matched {
|
||||
continue
|
||||
}
|
||||
|
||||
var err error
|
||||
match, err = pattern.match(file)
|
||||
if err != nil {
|
||||
return false, matchInfo, err
|
||||
}
|
||||
|
||||
// If the zero value of MatchInfo was passed in, we don't have
|
||||
// any information about the parent dir's match results, and we
|
||||
// apply the same logic as MatchesOrParentMatches.
|
||||
if !match && len(parentMatched) == 0 {
|
||||
if parentPath := filepath.Dir(file); parentPath != "." {
|
||||
parentPathDirs := strings.Split(parentPath, string(os.PathSeparator))
|
||||
// Check to see if the pattern matches one of our parent dirs.
|
||||
for i := range parentPathDirs {
|
||||
match, _ = pattern.match(strings.Join(parentPathDirs[:i+1], string(os.PathSeparator)))
|
||||
if match {
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
matchInfo.parentMatched[i] = match
|
||||
|
||||
if match {
|
||||
matched = !pattern.exclusion
|
||||
}
|
||||
}
|
||||
return matched, matchInfo, nil
|
||||
}
|
||||
|
||||
// Exclusions returns true if any of the patterns define exclusions
|
||||
func (pm *PatternMatcher) Exclusions() bool {
|
||||
return pm.exclusions
|
||||
}
|
||||
|
||||
// Patterns returns array of active patterns
|
||||
func (pm *PatternMatcher) Patterns() []*Pattern {
|
||||
return pm.patterns
|
||||
}
|
||||
|
||||
// Pattern defines a single regexp used to filter file paths.
|
||||
type Pattern struct {
|
||||
matchType matchType
|
||||
cleanedPattern string
|
||||
dirs []string
|
||||
regexp *regexp.Regexp
|
||||
exclusion bool
|
||||
}
|
||||
|
||||
type matchType int
|
||||
|
||||
const (
|
||||
unknownMatch matchType = iota
|
||||
exactMatch
|
||||
prefixMatch
|
||||
suffixMatch
|
||||
regexpMatch
|
||||
)
|
||||
|
||||
func (p *Pattern) String() string {
|
||||
return p.cleanedPattern
|
||||
}
|
||||
|
||||
// Exclusion returns true if this pattern defines exclusion
|
||||
func (p *Pattern) Exclusion() bool {
|
||||
return p.exclusion
|
||||
}
|
||||
|
||||
func (p *Pattern) match(path string) (bool, error) {
|
||||
if p.matchType == unknownMatch {
|
||||
if err := p.compile(string(os.PathSeparator)); err != nil {
|
||||
return false, filepath.ErrBadPattern
|
||||
}
|
||||
}
|
||||
|
||||
switch p.matchType {
|
||||
case exactMatch:
|
||||
return path == p.cleanedPattern, nil
|
||||
case prefixMatch:
|
||||
// strip trailing **
|
||||
return strings.HasPrefix(path, p.cleanedPattern[:len(p.cleanedPattern)-2]), nil
|
||||
case suffixMatch:
|
||||
// strip leading **
|
||||
suffix := p.cleanedPattern[2:]
|
||||
if strings.HasSuffix(path, suffix) {
|
||||
return true, nil
|
||||
}
|
||||
// **/foo matches "foo"
|
||||
return suffix[0] == os.PathSeparator && path == suffix[1:], nil
|
||||
case regexpMatch:
|
||||
return p.regexp.MatchString(path), nil
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func (p *Pattern) compile(sl string) error {
|
||||
regStr := "^"
|
||||
pattern := p.cleanedPattern
|
||||
// Go through the pattern and convert it to a regexp.
|
||||
// We use a scanner so we can support utf-8 chars.
|
||||
var scan scanner.Scanner
|
||||
scan.Init(strings.NewReader(pattern))
|
||||
|
||||
escSL := sl
|
||||
if sl == `\` {
|
||||
escSL += `\`
|
||||
}
|
||||
|
||||
p.matchType = exactMatch
|
||||
for i := 0; scan.Peek() != scanner.EOF; i++ {
|
||||
ch := scan.Next()
|
||||
|
||||
if ch == '*' {
|
||||
if scan.Peek() == '*' {
|
||||
// is some flavor of "**"
|
||||
scan.Next()
|
||||
|
||||
// Treat **/ as ** so eat the "/"
|
||||
if string(scan.Peek()) == sl {
|
||||
scan.Next()
|
||||
}
|
||||
|
||||
if scan.Peek() == scanner.EOF {
|
||||
// is "**EOF" - to align with .gitignore just accept all
|
||||
if p.matchType == exactMatch {
|
||||
p.matchType = prefixMatch
|
||||
} else {
|
||||
regStr += ".*"
|
||||
p.matchType = regexpMatch
|
||||
}
|
||||
} else {
|
||||
// is "**"
|
||||
// Note that this allows for any # of /'s (even 0) because
|
||||
// the .* will eat everything, even /'s
|
||||
regStr += "(.*" + escSL + ")?"
|
||||
p.matchType = regexpMatch
|
||||
}
|
||||
|
||||
if i == 0 {
|
||||
p.matchType = suffixMatch
|
||||
}
|
||||
} else {
|
||||
// is "*" so map it to anything but "/"
|
||||
regStr += "[^" + escSL + "]*"
|
||||
p.matchType = regexpMatch
|
||||
}
|
||||
} else if ch == '?' {
|
||||
// "?" is any char except "/"
|
||||
regStr += "[^" + escSL + "]"
|
||||
p.matchType = regexpMatch
|
||||
} else if shouldEscape(ch) {
|
||||
// Escape some regexp special chars that have no meaning
|
||||
// in golang's filepath.Match
|
||||
regStr += `\` + string(ch)
|
||||
} else if ch == '\\' {
|
||||
// escape next char. Note that a trailing \ in the pattern
|
||||
// will be left alone (but need to escape it)
|
||||
if sl == `\` {
|
||||
// On windows map "\" to "\\", meaning an escaped backslash,
|
||||
// and then just continue because filepath.Match on
|
||||
// Windows doesn't allow escaping at all
|
||||
regStr += escSL
|
||||
continue
|
||||
}
|
||||
if scan.Peek() != scanner.EOF {
|
||||
regStr += `\` + string(scan.Next())
|
||||
p.matchType = regexpMatch
|
||||
} else {
|
||||
regStr += `\`
|
||||
}
|
||||
} else if ch == '[' || ch == ']' {
|
||||
regStr += string(ch)
|
||||
p.matchType = regexpMatch
|
||||
} else {
|
||||
regStr += string(ch)
|
||||
}
|
||||
}
|
||||
|
||||
if p.matchType != regexpMatch {
|
||||
return nil
|
||||
}
|
||||
|
||||
regStr += "$"
|
||||
|
||||
re, err := regexp.Compile(regStr)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
p.regexp = re
|
||||
p.matchType = regexpMatch
|
||||
return nil
|
||||
}
|
||||
|
||||
// Matches returns true if file matches any of the patterns
|
||||
// and isn't excluded by any of the subsequent patterns.
|
||||
//
|
||||
// This implementation is buggy (it only checks a single parent dir against the
|
||||
// pattern) and will be removed soon. Use MatchesOrParentMatches instead.
|
||||
func Matches(file string, patterns []string) (bool, error) {
|
||||
pm, err := NewPatternMatcher(patterns)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
file = filepath.Clean(file)
|
||||
|
||||
if file == "." {
|
||||
// Don't let them exclude everything, kind of silly.
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return pm.Matches(file)
|
||||
}
|
||||
|
||||
// MatchesOrParentMatches returns true if file matches any of the patterns
|
||||
// and isn't excluded by any of the subsequent patterns.
|
||||
func MatchesOrParentMatches(file string, patterns []string) (bool, error) {
|
||||
pm, err := NewPatternMatcher(patterns)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
file = filepath.Clean(file)
|
||||
|
||||
if file == "." {
|
||||
// Don't let them exclude everything, kind of silly.
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return pm.MatchesOrParentMatches(file)
|
||||
}
|
||||
|
||||
// CopyFile copies from src to dst until either EOF is reached
|
||||
// on src or an error occurs. It verifies src exists and removes
|
||||
// the dst if it exists.
|
||||
func CopyFile(src, dst string) (int64, error) {
|
||||
cleanSrc := filepath.Clean(src)
|
||||
cleanDst := filepath.Clean(dst)
|
||||
if cleanSrc == cleanDst {
|
||||
return 0, nil
|
||||
}
|
||||
sf, err := os.Open(cleanSrc)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer sf.Close()
|
||||
if err := os.Remove(cleanDst); err != nil && !os.IsNotExist(err) {
|
||||
return 0, err
|
||||
}
|
||||
df, err := os.Create(cleanDst)
|
||||
if err != nil {
|
||||
return 0, err
|
||||
}
|
||||
defer df.Close()
|
||||
return io.Copy(df, sf)
|
||||
}
|
||||
|
||||
// ReadSymlinkedDirectory returns the target directory of a symlink.
|
||||
// The target of the symbolic link may not be a file.
|
||||
func ReadSymlinkedDirectory(path string) (string, error) {
|
||||
var realPath string
|
||||
var err error
|
||||
if realPath, err = filepath.Abs(path); err != nil {
|
||||
return "", fmt.Errorf("unable to get absolute path for %s: %s", path, err)
|
||||
}
|
||||
if realPath, err = filepath.EvalSymlinks(realPath); err != nil {
|
||||
return "", fmt.Errorf("failed to canonicalise path for %s: %s", path, err)
|
||||
}
|
||||
realPathInfo, err := os.Stat(realPath)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to stat target '%s' of '%s': %s", realPath, path, err)
|
||||
}
|
||||
if !realPathInfo.Mode().IsDir() {
|
||||
return "", fmt.Errorf("canonical path points to a file '%s'", realPath)
|
||||
}
|
||||
return realPath, nil
|
||||
}
|
||||
|
||||
// CreateIfNotExists creates a file or a directory only if it does not already exist.
|
||||
func CreateIfNotExists(path string, isDir bool) error {
|
||||
if _, err := os.Stat(path); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
if isDir {
|
||||
return os.MkdirAll(path, 0755)
|
||||
}
|
||||
if err := os.MkdirAll(filepath.Dir(path), 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
f, err := os.OpenFile(path, os.O_CREATE, 0755)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
f.Close()
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
27
vendor/github.com/docker/docker/pkg/fileutils/fileutils_darwin.go
generated
vendored
27
vendor/github.com/docker/docker/pkg/fileutils/fileutils_darwin.go
generated
vendored
@ -1,27 +0,0 @@
|
||||
package fileutils // import "github.com/docker/docker/pkg/fileutils"
|
||||
|
||||
import (
|
||||
"os"
|
||||
"os/exec"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// GetTotalUsedFds returns the number of used File Descriptors by
|
||||
// executing `lsof -p PID`
|
||||
func GetTotalUsedFds() int {
|
||||
pid := os.Getpid()
|
||||
|
||||
cmd := exec.Command("lsof", "-p", strconv.Itoa(pid))
|
||||
|
||||
output, err := cmd.CombinedOutput()
|
||||
if err != nil {
|
||||
return -1
|
||||
}
|
||||
|
||||
outputStr := strings.TrimSpace(string(output))
|
||||
|
||||
fds := strings.Split(outputStr, "\n")
|
||||
|
||||
return len(fds) - 1
|
||||
}
|
22
vendor/github.com/docker/docker/pkg/fileutils/fileutils_unix.go
generated
vendored
22
vendor/github.com/docker/docker/pkg/fileutils/fileutils_unix.go
generated
vendored
@ -1,22 +0,0 @@
|
||||
//go:build linux || freebsd
|
||||
// +build linux freebsd
|
||||
|
||||
package fileutils // import "github.com/docker/docker/pkg/fileutils"
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
// GetTotalUsedFds Returns the number of used File Descriptors by
|
||||
// reading it via /proc filesystem.
|
||||
func GetTotalUsedFds() int {
|
||||
if fds, err := os.ReadDir(fmt.Sprintf("/proc/%d/fd", os.Getpid())); err != nil {
|
||||
logrus.Errorf("Error opening /proc/%d/fd: %s", os.Getpid(), err)
|
||||
} else {
|
||||
return len(fds)
|
||||
}
|
||||
return -1
|
||||
}
|
7
vendor/github.com/docker/docker/pkg/fileutils/fileutils_windows.go
generated
vendored
7
vendor/github.com/docker/docker/pkg/fileutils/fileutils_windows.go
generated
vendored
@ -1,7 +0,0 @@
|
||||
package fileutils // import "github.com/docker/docker/pkg/fileutils"
|
||||
|
||||
// GetTotalUsedFds Returns the number of used File Descriptors. Not supported
|
||||
// on Windows.
|
||||
func GetTotalUsedFds() int {
|
||||
return -1
|
||||
}
|
5
vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go
generated
vendored
5
vendor/github.com/docker/docker/pkg/idtools/idtools_unix.go
generated
vendored
@ -30,6 +30,10 @@ func mkdirAs(path string, mode os.FileMode, owner Identity, mkAll, chownExisting
|
||||
// chown the full directory path if it exists
|
||||
|
||||
var paths []string
|
||||
path, err := filepath.Abs(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
stat, err := system.Stat(path)
|
||||
if err == nil {
|
||||
@ -209,7 +213,6 @@ func callGetent(database, key string) (io.Reader, error) {
|
||||
default:
|
||||
return nil, err
|
||||
}
|
||||
|
||||
}
|
||||
return bytes.NewReader(out), nil
|
||||
}
|
||||
|
1
vendor/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go
generated
vendored
1
vendor/github.com/docker/docker/pkg/idtools/usergroupadd_linux.go
generated
vendored
@ -88,7 +88,6 @@ func addUser(name string) error {
|
||||
}
|
||||
|
||||
func createSubordinateRanges(name string) error {
|
||||
|
||||
// first, we should verify that ranges weren't automatically created
|
||||
// by the distro tooling
|
||||
ranges, err := parseSubuid(name)
|
||||
|
3
vendor/github.com/docker/docker/pkg/namesgenerator/names-generator.go
generated
vendored
3
vendor/github.com/docker/docker/pkg/namesgenerator/names-generator.go
generated
vendored
@ -769,9 +769,6 @@ var (
|
||||
// Helen Brooke Taussig - American cardiologist and founder of the field of paediatric cardiology. https://en.wikipedia.org/wiki/Helen_B._Taussig
|
||||
"taussig",
|
||||
|
||||
// Valentina Tereshkova is a Russian engineer, cosmonaut and politician. She was the first woman to fly to space in 1963. In 2013, at the age of 76, she offered to go on a one-way mission to Mars. https://en.wikipedia.org/wiki/Valentina_Tereshkova
|
||||
"tereshkova",
|
||||
|
||||
// Nikola Tesla invented the AC electric system and every gadget ever used by a James Bond villain. https://en.wikipedia.org/wiki/Nikola_Tesla
|
||||
"tesla",
|
||||
|
||||
|
19
vendor/github.com/docker/docker/pkg/system/filesys.go
generated
vendored
Normal file
19
vendor/github.com/docker/docker/pkg/system/filesys.go
generated
vendored
Normal file
@ -0,0 +1,19 @@
|
||||
package system
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// IsAbs is a platform-agnostic wrapper for filepath.IsAbs.
|
||||
//
|
||||
// On Windows, golang filepath.IsAbs does not consider a path \windows\system32
|
||||
// as absolute as it doesn't start with a drive-letter/colon combination. However,
|
||||
// in docker we need to verify things such as WORKDIR /windows/system32 in
|
||||
// a Dockerfile (which gets translated to \windows\system32 when being processed
|
||||
// by the daemon). This SHOULD be treated as absolute from a docker processing
|
||||
// perspective.
|
||||
func IsAbs(path string) bool {
|
||||
return filepath.IsAbs(path) || strings.HasPrefix(path, string(os.PathSeparator))
|
||||
}
|
35
vendor/github.com/docker/docker/pkg/system/filesys_deprecated.go
generated
vendored
Normal file
35
vendor/github.com/docker/docker/pkg/system/filesys_deprecated.go
generated
vendored
Normal file
@ -0,0 +1,35 @@
|
||||
package system
|
||||
|
||||
import (
|
||||
"os"
|
||||
|
||||
"github.com/moby/sys/sequential"
|
||||
)
|
||||
|
||||
// CreateSequential is deprecated.
|
||||
//
|
||||
// Deprecated: use os.Create or github.com/moby/sys/sequential.Create()
|
||||
func CreateSequential(name string) (*os.File, error) {
|
||||
return sequential.Create(name)
|
||||
}
|
||||
|
||||
// OpenSequential is deprecated.
|
||||
//
|
||||
// Deprecated: use os.Open or github.com/moby/sys/sequential.Open
|
||||
func OpenSequential(name string) (*os.File, error) {
|
||||
return sequential.Open(name)
|
||||
}
|
||||
|
||||
// OpenFileSequential is deprecated.
|
||||
//
|
||||
// Deprecated: use github.com/moby/sys/sequential.OpenFile()
|
||||
func OpenFileSequential(name string, flag int, perm os.FileMode) (*os.File, error) {
|
||||
return sequential.OpenFile(name, flag, perm)
|
||||
}
|
||||
|
||||
// TempFileSequential is deprecated.
|
||||
//
|
||||
// Deprecated: use os.CreateTemp or github.com/moby/sys/sequential.CreateTemp
|
||||
func TempFileSequential(dir, prefix string) (f *os.File, err error) {
|
||||
return sequential.CreateTemp(dir, prefix)
|
||||
}
|
52
vendor/github.com/docker/docker/pkg/system/filesys_unix.go
generated
vendored
52
vendor/github.com/docker/docker/pkg/system/filesys_unix.go
generated
vendored
@ -3,10 +3,7 @@
|
||||
|
||||
package system // import "github.com/docker/docker/pkg/system"
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
)
|
||||
import "os"
|
||||
|
||||
// MkdirAllWithACL is a wrapper for os.MkdirAll on unix systems.
|
||||
func MkdirAllWithACL(path string, perm os.FileMode, sddl string) error {
|
||||
@ -18,50 +15,3 @@ func MkdirAllWithACL(path string, perm os.FileMode, sddl string) error {
|
||||
func MkdirAll(path string, perm os.FileMode) error {
|
||||
return os.MkdirAll(path, perm)
|
||||
}
|
||||
|
||||
// IsAbs is a platform-specific wrapper for filepath.IsAbs.
|
||||
func IsAbs(path string) bool {
|
||||
return filepath.IsAbs(path)
|
||||
}
|
||||
|
||||
// The functions below here are wrappers for the equivalents in the os and ioutils packages.
|
||||
// They are passthrough on Unix platforms, and only relevant on Windows.
|
||||
|
||||
// CreateSequential creates the named file with mode 0666 (before umask), truncating
|
||||
// it if it already exists. If successful, methods on the returned
|
||||
// File can be used for I/O; the associated file descriptor has mode
|
||||
// O_RDWR.
|
||||
// If there is an error, it will be of type *PathError.
|
||||
func CreateSequential(name string) (*os.File, error) {
|
||||
return os.Create(name)
|
||||
}
|
||||
|
||||
// OpenSequential opens the named file for reading. If successful, methods on
|
||||
// the returned file can be used for reading; the associated file
|
||||
// descriptor has mode O_RDONLY.
|
||||
// If there is an error, it will be of type *PathError.
|
||||
func OpenSequential(name string) (*os.File, error) {
|
||||
return os.Open(name)
|
||||
}
|
||||
|
||||
// OpenFileSequential is the generalized open call; most users will use Open
|
||||
// or Create instead. It opens the named file with specified flag
|
||||
// (O_RDONLY etc.) and perm, (0666 etc.) if applicable. If successful,
|
||||
// methods on the returned File can be used for I/O.
|
||||
// If there is an error, it will be of type *PathError.
|
||||
func OpenFileSequential(name string, flag int, perm os.FileMode) (*os.File, error) {
|
||||
return os.OpenFile(name, flag, perm)
|
||||
}
|
||||
|
||||
// TempFileSequential creates a new temporary file in the directory dir
|
||||
// with a name beginning with prefix, opens the file for reading
|
||||
// and writing, and returns the resulting *os.File.
|
||||
// If dir is the empty string, TempFile uses the default directory
|
||||
// for temporary files (see os.TempDir).
|
||||
// Multiple programs calling TempFile simultaneously
|
||||
// will not choose the same file. The caller can use f.Name()
|
||||
// to find the pathname of the file. It is the caller's responsibility
|
||||
// to remove the file when no longer needed.
|
||||
func TempFileSequential(dir, prefix string) (f *os.File, err error) {
|
||||
return os.CreateTemp(dir, prefix)
|
||||
}
|
||||
|
174
vendor/github.com/docker/docker/pkg/system/filesys_windows.go
generated
vendored
174
vendor/github.com/docker/docker/pkg/system/filesys_windows.go
generated
vendored
@ -2,13 +2,8 @@ package system // import "github.com/docker/docker/pkg/system"
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
"time"
|
||||
"unsafe"
|
||||
|
||||
"golang.org/x/sys/windows"
|
||||
@ -121,172 +116,3 @@ func mkdirWithACL(name string, sddl string) error {
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// IsAbs is a platform-specific wrapper for filepath.IsAbs. On Windows,
|
||||
// golang filepath.IsAbs does not consider a path \windows\system32 as absolute
|
||||
// as it doesn't start with a drive-letter/colon combination. However, in
|
||||
// docker we need to verify things such as WORKDIR /windows/system32 in
|
||||
// a Dockerfile (which gets translated to \windows\system32 when being processed
|
||||
// by the daemon. This SHOULD be treated as absolute from a docker processing
|
||||
// perspective.
|
||||
func IsAbs(path string) bool {
|
||||
if filepath.IsAbs(path) || strings.HasPrefix(path, string(os.PathSeparator)) {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// The origin of the functions below here are the golang OS and windows packages,
|
||||
// slightly modified to only cope with files, not directories due to the
|
||||
// specific use case.
|
||||
//
|
||||
// The alteration is to allow a file on Windows to be opened with
|
||||
// FILE_FLAG_SEQUENTIAL_SCAN (particular for docker load), to avoid eating
|
||||
// the standby list, particularly when accessing large files such as layer.tar.
|
||||
|
||||
// CreateSequential creates the named file with mode 0666 (before umask), truncating
|
||||
// it if it already exists. If successful, methods on the returned
|
||||
// File can be used for I/O; the associated file descriptor has mode
|
||||
// O_RDWR.
|
||||
// If there is an error, it will be of type *PathError.
|
||||
func CreateSequential(name string) (*os.File, error) {
|
||||
return OpenFileSequential(name, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0)
|
||||
}
|
||||
|
||||
// OpenSequential opens the named file for reading. If successful, methods on
|
||||
// the returned file can be used for reading; the associated file
|
||||
// descriptor has mode O_RDONLY.
|
||||
// If there is an error, it will be of type *PathError.
|
||||
func OpenSequential(name string) (*os.File, error) {
|
||||
return OpenFileSequential(name, os.O_RDONLY, 0)
|
||||
}
|
||||
|
||||
// OpenFileSequential is the generalized open call; most users will use Open
|
||||
// or Create instead.
|
||||
// If there is an error, it will be of type *PathError.
|
||||
func OpenFileSequential(name string, flag int, _ os.FileMode) (*os.File, error) {
|
||||
if name == "" {
|
||||
return nil, &os.PathError{Op: "open", Path: name, Err: syscall.ENOENT}
|
||||
}
|
||||
r, errf := windowsOpenFileSequential(name, flag, 0)
|
||||
if errf == nil {
|
||||
return r, nil
|
||||
}
|
||||
return nil, &os.PathError{Op: "open", Path: name, Err: errf}
|
||||
}
|
||||
|
||||
func windowsOpenFileSequential(name string, flag int, _ os.FileMode) (file *os.File, err error) {
|
||||
r, e := windowsOpenSequential(name, flag|windows.O_CLOEXEC, 0)
|
||||
if e != nil {
|
||||
return nil, e
|
||||
}
|
||||
return os.NewFile(uintptr(r), name), nil
|
||||
}
|
||||
|
||||
func makeInheritSa() *windows.SecurityAttributes {
|
||||
var sa windows.SecurityAttributes
|
||||
sa.Length = uint32(unsafe.Sizeof(sa))
|
||||
sa.InheritHandle = 1
|
||||
return &sa
|
||||
}
|
||||
|
||||
func windowsOpenSequential(path string, mode int, _ uint32) (fd windows.Handle, err error) {
|
||||
if len(path) == 0 {
|
||||
return windows.InvalidHandle, windows.ERROR_FILE_NOT_FOUND
|
||||
}
|
||||
pathp, err := windows.UTF16PtrFromString(path)
|
||||
if err != nil {
|
||||
return windows.InvalidHandle, err
|
||||
}
|
||||
var access uint32
|
||||
switch mode & (windows.O_RDONLY | windows.O_WRONLY | windows.O_RDWR) {
|
||||
case windows.O_RDONLY:
|
||||
access = windows.GENERIC_READ
|
||||
case windows.O_WRONLY:
|
||||
access = windows.GENERIC_WRITE
|
||||
case windows.O_RDWR:
|
||||
access = windows.GENERIC_READ | windows.GENERIC_WRITE
|
||||
}
|
||||
if mode&windows.O_CREAT != 0 {
|
||||
access |= windows.GENERIC_WRITE
|
||||
}
|
||||
if mode&windows.O_APPEND != 0 {
|
||||
access &^= windows.GENERIC_WRITE
|
||||
access |= windows.FILE_APPEND_DATA
|
||||
}
|
||||
sharemode := uint32(windows.FILE_SHARE_READ | windows.FILE_SHARE_WRITE)
|
||||
var sa *windows.SecurityAttributes
|
||||
if mode&windows.O_CLOEXEC == 0 {
|
||||
sa = makeInheritSa()
|
||||
}
|
||||
var createmode uint32
|
||||
switch {
|
||||
case mode&(windows.O_CREAT|windows.O_EXCL) == (windows.O_CREAT | windows.O_EXCL):
|
||||
createmode = windows.CREATE_NEW
|
||||
case mode&(windows.O_CREAT|windows.O_TRUNC) == (windows.O_CREAT | windows.O_TRUNC):
|
||||
createmode = windows.CREATE_ALWAYS
|
||||
case mode&windows.O_CREAT == windows.O_CREAT:
|
||||
createmode = windows.OPEN_ALWAYS
|
||||
case mode&windows.O_TRUNC == windows.O_TRUNC:
|
||||
createmode = windows.TRUNCATE_EXISTING
|
||||
default:
|
||||
createmode = windows.OPEN_EXISTING
|
||||
}
|
||||
// Use FILE_FLAG_SEQUENTIAL_SCAN rather than FILE_ATTRIBUTE_NORMAL as implemented in golang.
|
||||
// https://msdn.microsoft.com/en-us/library/windows/desktop/aa363858(v=vs.85).aspx
|
||||
const fileFlagSequentialScan = 0x08000000 // FILE_FLAG_SEQUENTIAL_SCAN
|
||||
h, e := windows.CreateFile(pathp, access, sharemode, sa, createmode, fileFlagSequentialScan, 0)
|
||||
return h, e
|
||||
}
|
||||
|
||||
// Helpers for TempFileSequential
|
||||
var rand uint32
|
||||
var randmu sync.Mutex
|
||||
|
||||
func reseed() uint32 {
|
||||
return uint32(time.Now().UnixNano() + int64(os.Getpid()))
|
||||
}
|
||||
func nextSuffix() string {
|
||||
randmu.Lock()
|
||||
r := rand
|
||||
if r == 0 {
|
||||
r = reseed()
|
||||
}
|
||||
r = r*1664525 + 1013904223 // constants from Numerical Recipes
|
||||
rand = r
|
||||
randmu.Unlock()
|
||||
return strconv.Itoa(int(1e9 + r%1e9))[1:]
|
||||
}
|
||||
|
||||
// TempFileSequential is a copy of os.CreateTemp, modified to use sequential
|
||||
// file access. Below is the original comment from golang:
|
||||
// TempFile creates a new temporary file in the directory dir
|
||||
// with a name beginning with prefix, opens the file for reading
|
||||
// and writing, and returns the resulting *os.File.
|
||||
// If dir is the empty string, TempFile uses the default directory
|
||||
// for temporary files (see os.TempDir).
|
||||
// Multiple programs calling TempFile simultaneously
|
||||
// will not choose the same file. The caller can use f.Name()
|
||||
// to find the pathname of the file. It is the caller's responsibility
|
||||
// to remove the file when no longer needed.
|
||||
func TempFileSequential(dir, prefix string) (f *os.File, err error) {
|
||||
if dir == "" {
|
||||
dir = os.TempDir()
|
||||
}
|
||||
|
||||
nconflict := 0
|
||||
for i := 0; i < 10000; i++ {
|
||||
name := filepath.Join(dir, prefix+nextSuffix())
|
||||
f, err = OpenFileSequential(name, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0600)
|
||||
if os.IsExist(err) {
|
||||
if nconflict++; nconflict > 10 {
|
||||
randmu.Lock()
|
||||
rand = reseed()
|
||||
randmu.Unlock()
|
||||
}
|
||||
continue
|
||||
}
|
||||
break
|
||||
}
|
||||
return
|
||||
}
|
||||
|
1
vendor/github.com/docker/docker/pkg/system/meminfo_linux.go
generated
vendored
1
vendor/github.com/docker/docker/pkg/system/meminfo_linux.go
generated
vendored
@ -55,7 +55,6 @@ func parseMemInfo(reader io.Reader) (*MemInfo, error) {
|
||||
case "SwapFree:":
|
||||
meminfo.SwapFree = bytes
|
||||
}
|
||||
|
||||
}
|
||||
if memAvailable != -1 {
|
||||
meminfo.MemFree = memAvailable
|
||||
|
1
vendor/github.com/docker/docker/pkg/system/path.go
generated
vendored
1
vendor/github.com/docker/docker/pkg/system/path.go
generated
vendored
@ -13,7 +13,6 @@ func DefaultPathEnv(os string) string {
|
||||
return ""
|
||||
}
|
||||
return defaultUnixPathEnv
|
||||
|
||||
}
|
||||
|
||||
// PathVerifier defines the subset of a PathDriver that CheckSystemDriveAndRemoveDriveLetter
|
||||
|
Reference in New Issue
Block a user