vendor: update buildkit to master@ae9d0f5

Signed-off-by: Justin Chadwell <me@jedevc.com>
This commit is contained in:
Justin Chadwell
2022-11-22 14:39:36 +00:00
parent 6e9b743296
commit 36e663edda
375 changed files with 14834 additions and 13552 deletions

View File

@ -4,6 +4,7 @@ import (
"context"
"github.com/moby/buildkit/client/buildid"
"github.com/moby/buildkit/frontend/attestations"
gateway "github.com/moby/buildkit/frontend/gateway/client"
"github.com/moby/buildkit/frontend/gateway/grpcclient"
gatewayapi "github.com/moby/buildkit/frontend/gateway/pb"
@ -20,17 +21,15 @@ func (c *Client) Build(ctx context.Context, opt SolveOpt, product string, buildF
}
}()
if opt.Frontend != "" {
return nil, errors.New("invalid SolveOpt, Build interface cannot use Frontend")
}
feOpts := opt.FrontendAttrs
opt.Frontend = ""
opt.FrontendAttrs = attestations.Filter(opt.FrontendAttrs)
if product == "" {
product = apicaps.ExportedProduct
}
feOpts := opt.FrontendAttrs
opt.FrontendAttrs = nil
workers, err := c.ListWorkers(ctx)
if err != nil {
return nil, errors.Wrap(err, "listing workers for Build")
@ -113,6 +112,19 @@ func (g *gatewayClientForBuild) StatFile(ctx context.Context, in *gatewayapi.Sta
return g.gateway.StatFile(ctx, in, opts...)
}
func (g *gatewayClientForBuild) Evaluate(ctx context.Context, in *gatewayapi.EvaluateRequest, opts ...grpc.CallOption) (*gatewayapi.EvaluateResponse, error) {
if err := g.caps.Supports(gatewayapi.CapGatewayEvaluate); err != nil {
if err2 := g.caps.Supports(gatewayapi.CapStatFile); err2 != nil {
return nil, err
}
ctx = buildid.AppendToOutgoingContext(ctx, g.buildID)
_, err := g.gateway.StatFile(ctx, &gatewayapi.StatFileRequest{Ref: in.Ref, Path: "."}, opts...)
return &gatewayapi.EvaluateResponse{}, err
}
ctx = buildid.AppendToOutgoingContext(ctx, g.buildID)
return g.gateway.Evaluate(ctx, in, opts...)
}
func (g *gatewayClientForBuild) Ping(ctx context.Context, in *gatewayapi.PingRequest, opts ...grpc.CallOption) (*gatewayapi.PongResponse, error) {
ctx = buildid.AppendToOutgoingContext(ctx, g.buildID)
return g.gateway.Ping(ctx, in, opts...)

View File

@ -234,7 +234,6 @@ func loadCredentials(opts *withCredentials) (grpc.DialOption, error) {
return nil, errors.Wrap(err, "could not read certificate/key")
}
cfg.Certificates = []tls.Certificate{cert}
cfg.BuildNameToCertificate()
}
return grpc.WithTransportCredentials(credentials.NewTLS(cfg)), nil

View File

@ -617,6 +617,7 @@ var (
LinuxArmel = Platform(ocispecs.Platform{OS: "linux", Architecture: "arm", Variant: "v6"})
LinuxArm64 = Platform(ocispecs.Platform{OS: "linux", Architecture: "arm64"})
LinuxS390x = Platform(ocispecs.Platform{OS: "linux", Architecture: "s390x"})
LinuxPpc64 = Platform(ocispecs.Platform{OS: "linux", Architecture: "ppc64"})
LinuxPpc64le = Platform(ocispecs.Platform{OS: "linux", Architecture: "ppc64le"})
Darwin = Platform(ocispecs.Platform{OS: "darwin", Architecture: "amd64"})
Windows = Platform(ocispecs.Platform{OS: "windows", Architecture: "amd64"})

View File

@ -2,6 +2,7 @@ package client
import (
"context"
"encoding/base64"
"encoding/json"
"io"
"os"
@ -14,6 +15,7 @@ import (
controlapi "github.com/moby/buildkit/api/services/control"
"github.com/moby/buildkit/client/llb"
"github.com/moby/buildkit/client/ociindex"
"github.com/moby/buildkit/exporter/containerimage/exptypes"
"github.com/moby/buildkit/identity"
"github.com/moby/buildkit/session"
sessioncontent "github.com/moby/buildkit/session/content"
@ -124,6 +126,8 @@ func (c *Client) solve(ctx context.Context, def *llb.Definition, runGateway runG
ex = opt.Exports[0]
}
indicesToUpdate := []string{}
if !opt.SessionPreInitialized {
if len(syncedDirs) > 0 {
s.Allow(filesync.NewFSSyncProvider(syncedDirs))
@ -133,49 +137,64 @@ func (c *Client) solve(ctx context.Context, def *llb.Definition, runGateway runG
s.Allow(a)
}
contentStores := map[string]content.Store{}
for key, store := range cacheOpt.contentStores {
contentStores[key] = store
}
for key, store := range opt.OCIStores {
key2 := "oci:" + key
if _, ok := contentStores[key2]; ok {
return nil, errors.Errorf("oci store key %q already exists", key)
}
contentStores[key2] = store
}
var supportFile bool
var supportDir bool
switch ex.Type {
case ExporterLocal:
if ex.Output != nil {
return nil, errors.New("output file writer is not supported by local exporter")
}
if ex.OutputDir == "" {
return nil, errors.New("output directory is required for local exporter")
}
s.Allow(filesync.NewFSSyncTargetDir(ex.OutputDir))
case ExporterOCI, ExporterDocker, ExporterTar:
if ex.OutputDir != "" {
return nil, errors.Errorf("output directory %s is not supported by %s exporter", ex.OutputDir, ex.Type)
}
supportDir = true
case ExporterTar:
supportFile = true
case ExporterOCI, ExporterDocker:
supportDir = ex.OutputDir != ""
supportFile = ex.Output != nil
}
if supportFile && supportDir {
return nil, errors.Errorf("both file and directory output is not support by %s exporter", ex.Type)
}
if !supportFile && ex.Output != nil {
return nil, errors.Errorf("output file writer is not supported by %s exporter", ex.Type)
}
if !supportDir && ex.OutputDir != "" {
return nil, errors.Errorf("output directory is not supported by %s exporter", ex.Type)
}
if supportFile {
if ex.Output == nil {
return nil, errors.Errorf("output file writer is required for %s exporter", ex.Type)
}
s.Allow(filesync.NewFSSyncTarget(ex.Output))
default:
if ex.Output != nil {
return nil, errors.Errorf("output file writer is not supported by %s exporter", ex.Type)
}
if ex.OutputDir != "" {
return nil, errors.Errorf("output directory %s is not supported by %s exporter", ex.OutputDir, ex.Type)
}
}
// this is a new map that contains both cacheOpt stores and OCILayout stores
contentStores := make(map[string]content.Store, len(cacheOpt.contentStores)+len(opt.OCIStores))
// copy over the stores references from cacheOpt
for key, store := range cacheOpt.contentStores {
contentStores[key] = store
}
// copy over the stores references from ociLayout opts
for key, store := range opt.OCIStores {
// conflicts are not allowed
if _, ok := contentStores[key]; ok {
// we probably should check if the store is identical, but given that
// https://pkg.go.dev/github.com/containerd/containerd/content#Store
// is just an interface, composing 4 others, that is rather hard to do.
// For a future iteration.
return nil, errors.Errorf("contentStore key %s exists in both cache and OCI layouts", key)
if supportDir {
if ex.OutputDir == "" {
return nil, errors.Errorf("output directory is required for %s exporter", ex.Type)
}
switch ex.Type {
case ExporterOCI, ExporterDocker:
if err := os.MkdirAll(ex.OutputDir, 0755); err != nil {
return nil, err
}
cs, err := contentlocal.NewStore(ex.OutputDir)
if err != nil {
return nil, err
}
contentStores["export"] = cs
indicesToUpdate = append(indicesToUpdate, filepath.Join(ex.OutputDir, "index.json"))
default:
s.Allow(filesync.NewFSSyncTargetDir(ex.OutputDir))
}
contentStores[key] = store
}
if len(contentStores) > 0 {
@ -352,10 +371,29 @@ func (c *Client) solve(ctx context.Context, def *llb.Definition, runGateway runG
}
}
}
if manifestDescDt := res.ExporterResponse[exptypes.ExporterImageDescriptorKey]; manifestDescDt != "" {
manifestDescDt, err := base64.StdEncoding.DecodeString(manifestDescDt)
if err != nil {
return nil, err
}
var manifestDesc ocispecs.Descriptor
if err = json.Unmarshal([]byte(manifestDescDt), &manifestDesc); err != nil {
return nil, err
}
for _, indexJSONPath := range indicesToUpdate {
tag := "latest"
if t, ok := res.ExporterResponse["image.name"]; ok {
tag = t
}
if err = ociindex.PutDescToIndexJSONFileLocked(indexJSONPath, manifestDesc, tag); err != nil {
return nil, err
}
}
}
return res, nil
}
func prepareSyncedDirs(def *llb.Definition, localDirs map[string]string) ([]filesync.SyncedDir, error) {
func prepareSyncedDirs(def *llb.Definition, localDirs map[string]string) (filesync.StaticDirSource, error) {
for _, d := range localDirs {
fi, err := os.Stat(d)
if err != nil {
@ -371,10 +409,10 @@ func prepareSyncedDirs(def *llb.Definition, localDirs map[string]string) ([]file
return fsutil.MapResultKeep
}
dirs := make([]filesync.SyncedDir, 0, len(localDirs))
dirs := make(filesync.StaticDirSource, len(localDirs))
if def == nil {
for name, d := range localDirs {
dirs = append(dirs, filesync.SyncedDir{Name: name, Dir: d, Map: resetUIDAndGID})
dirs[name] = filesync.SyncedDir{Dir: d, Map: resetUIDAndGID}
}
} else {
for _, dt := range def.Def {
@ -389,7 +427,7 @@ func prepareSyncedDirs(def *llb.Definition, localDirs map[string]string) ([]file
if !ok {
return nil, errors.Errorf("local directory %s not enabled", name)
}
dirs = append(dirs, filesync.SyncedDir{Name: name, Dir: d, Map: resetUIDAndGID})
dirs[name] = filesync.SyncedDir{Dir: d, Map: resetUIDAndGID}
}
}
}
@ -416,14 +454,10 @@ func parseCacheOptions(ctx context.Context, isGateway bool, opt SolveOpt) (*cach
var (
cacheExports []*controlapi.CacheOptionsEntry
cacheImports []*controlapi.CacheOptionsEntry
// legacy API is used for registry caches, because the daemon might not support the new API
legacyExportRef string
legacyImportRefs []string
)
contentStores := make(map[string]content.Store)
indicesToUpdate := make(map[string]string) // key: index.JSON file name, value: tag
frontendAttrs := make(map[string]string)
legacyExportAttrs := make(map[string]string)
for _, ex := range opt.CacheExports {
if ex.Type == "local" {
csDir := ex.Attrs["dest"]
@ -438,26 +472,27 @@ func parseCacheOptions(ctx context.Context, isGateway bool, opt SolveOpt) (*cach
return nil, err
}
contentStores["local:"+csDir] = cs
tag := "latest"
if t, ok := ex.Attrs["tag"]; ok {
tag = t
}
// TODO(AkihiroSuda): support custom index JSON path and tag
indexJSONPath := filepath.Join(csDir, "index.json")
indicesToUpdate[indexJSONPath] = "latest"
indicesToUpdate[indexJSONPath] = tag
}
if ex.Type == "registry" && legacyExportRef == "" {
legacyExportRef = ex.Attrs["ref"]
for k, v := range ex.Attrs {
if k != "ref" {
legacyExportAttrs[k] = v
}
if ex.Type == "registry" {
regRef := ex.Attrs["ref"]
if regRef == "" {
return nil, errors.New("registry cache exporter requires ref")
}
} else {
cacheExports = append(cacheExports, &controlapi.CacheOptionsEntry{
Type: ex.Type,
Attrs: ex.Attrs,
})
}
cacheExports = append(cacheExports, &controlapi.CacheOptionsEntry{
Type: ex.Type,
Attrs: ex.Attrs,
})
}
for _, im := range opt.CacheImports {
attrs := im.Attrs
if im.Type == "local" {
csDir := im.Attrs["src"]
if csDir == "" {
@ -469,40 +504,40 @@ func parseCacheOptions(ctx context.Context, isGateway bool, opt SolveOpt) (*cach
continue
}
// if digest is not specified, load from "latest" tag
if attrs["digest"] == "" {
if im.Attrs["digest"] == "" {
idx, err := ociindex.ReadIndexJSONFileLocked(filepath.Join(csDir, "index.json"))
if err != nil {
bklog.G(ctx).Warning("local cache import at " + csDir + " not found due to err: " + err.Error())
continue
}
for _, m := range idx.Manifests {
if (m.Annotations[ocispecs.AnnotationRefName] == "latest" && attrs["tag"] == "") || (attrs["tag"] != "" && m.Annotations[ocispecs.AnnotationRefName] == attrs["tag"]) {
attrs["digest"] = string(m.Digest)
tag := "latest"
if t, ok := im.Attrs["tag"]; ok {
tag = t
}
if m.Annotations[ocispecs.AnnotationRefName] == tag {
im.Attrs["digest"] = string(m.Digest)
break
}
}
if attrs["digest"] == "" {
if im.Attrs["digest"] == "" {
return nil, errors.New("local cache importer requires either explicit digest, \"latest\" tag or custom tag on index.json")
}
}
contentStores["local:"+csDir] = cs
}
if im.Type == "registry" {
legacyImportRef := attrs["ref"]
legacyImportRefs = append(legacyImportRefs, legacyImportRef)
} else {
cacheImports = append(cacheImports, &controlapi.CacheOptionsEntry{
Type: im.Type,
Attrs: attrs,
})
regRef := im.Attrs["ref"]
if regRef == "" {
return nil, errors.New("registry cache importer requires ref")
}
}
cacheImports = append(cacheImports, &controlapi.CacheOptionsEntry{
Type: im.Type,
Attrs: im.Attrs,
})
}
if opt.Frontend != "" || isGateway {
// use legacy API for registry importers, because the frontend might not support the new API
if len(legacyImportRefs) > 0 {
frontendAttrs["cache-from"] = strings.Join(legacyImportRefs, ",")
}
// use new API for other importers
if len(cacheImports) > 0 {
s, err := json.Marshal(cacheImports)
if err != nil {
@ -513,11 +548,6 @@ func parseCacheOptions(ctx context.Context, isGateway bool, opt SolveOpt) (*cach
}
res := cacheOptions{
options: controlapi.CacheOptions{
// old API (for registry caches, planned to be removed in early 2019)
ExportRefDeprecated: legacyExportRef,
ExportAttrsDeprecated: legacyExportAttrs,
ImportRefsDeprecated: legacyImportRefs,
// new API
Exports: cacheExports,
Imports: cacheImports,
},

View File

@ -53,6 +53,7 @@ type NetworkConfig struct {
Mode string `toml:"networkMode"`
CNIConfigPath string `toml:"cniConfigPath"`
CNIBinaryPath string `toml:"cniBinaryPath"`
CNIPoolSize int `toml:"cniPoolSize"`
}
type OCIConfig struct {
@ -81,6 +82,9 @@ type OCIConfig struct {
// The profile should already be loaded (by a higher level system) before creating a worker.
ApparmorProfile string `toml:"apparmor-profile"`
// SELinux enables applying SELinux labels.
SELinux bool `toml:"selinux"`
// MaxParallelism is the maximum number of parallel build steps that can be run at the same time.
MaxParallelism int `toml:"max-parallelism"`
}
@ -99,6 +103,9 @@ type ContainerdConfig struct {
// The profile should already be loaded (by a higher level system) before creating a worker.
ApparmorProfile string `toml:"apparmor-profile"`
// SELinux enables applying SELinux labels.
SELinux bool `toml:"selinux"`
MaxParallelism int `toml:"max-parallelism"`
Rootless bool `toml:"rootless"`

View File

@ -13,8 +13,17 @@ const (
ExporterInlineCache = "containerimage.inlinecache"
ExporterBuildInfo = "containerimage.buildinfo"
ExporterPlatformsKey = "refs.platforms"
ExporterEpochKey = "source.date.epoch"
)
// KnownRefMetadataKeys are the subset of exporter keys that can be suffixed by
// a platform to become platform specific
var KnownRefMetadataKeys = []string{
ExporterImageConfigKey,
ExporterInlineCache,
ExporterBuildInfo,
}
type Platforms struct {
Platforms []Platform
}

View File

@ -0,0 +1,52 @@
package image
import (
"time"
"github.com/docker/docker/api/types/strslice"
ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
)
// HealthConfig holds configuration settings for the HEALTHCHECK feature.
type HealthConfig struct {
// Test is the test to perform to check that the container is healthy.
// An empty slice means to inherit the default.
// The options are:
// {} : inherit healthcheck
// {"NONE"} : disable healthcheck
// {"CMD", args...} : exec arguments directly
// {"CMD-SHELL", command} : run command with system's default shell
Test []string `json:",omitempty"`
// Zero means to inherit. Durations are expressed as integer nanoseconds.
Interval time.Duration `json:",omitempty"` // Interval is the time to wait between checks.
Timeout time.Duration `json:",omitempty"` // Timeout is the time to wait before considering the check to have hung.
StartPeriod time.Duration `json:",omitempty"` // The start period for the container to initialize before the retries starts to count down.
// Retries is the number of consecutive failures needed to consider a container as unhealthy.
// Zero means inherit.
Retries int `json:",omitempty"`
}
// ImageConfig is a docker compatible config for an image
type ImageConfig struct {
ocispecs.ImageConfig
Healthcheck *HealthConfig `json:",omitempty"` // Healthcheck describes how to check the container is healthy
ArgsEscaped bool `json:",omitempty"` // True if command is already escaped (Windows specific)
// NetworkDisabled bool `json:",omitempty"` // Is network disabled
// MacAddress string `json:",omitempty"` // Mac Address of the container
OnBuild []string // ONBUILD metadata that were defined on the image Dockerfile
StopTimeout *int `json:",omitempty"` // Timeout (in seconds) to stop a container
Shell strslice.StrSlice `json:",omitempty"` // Shell for shell-form of RUN, CMD, ENTRYPOINT
}
// Image is the JSON structure which describes some basic information about the image.
// This provides the `application/vnd.oci.image.config.v1+json` mediatype when marshalled to JSON.
type Image struct {
ocispecs.Image
// Config defines the execution parameters which should be used as a base when running a container using the image.
Config ImageConfig `json:"config,omitempty"`
}

View File

@ -0,0 +1,82 @@
package attestations
import (
"encoding/csv"
"strings"
"github.com/pkg/errors"
)
const (
KeyTypeSbom = "sbom"
KeyTypeProvenance = "provenance"
)
const (
// TODO: update this before next buildkit release
defaultSBOMGenerator = "jedevc/buildkit-syft-scanner:master@sha256:de630f621eb0ab1bb1245cea76d01c5bddfe78af4f5b9adecde424cb7ec5605e"
)
func Filter(v map[string]string) map[string]string {
attests := make(map[string]string)
for k, v := range v {
if strings.HasPrefix(k, "attest:") {
attests[k] = v
continue
}
if strings.HasPrefix(k, "build-arg:BUILDKIT_ATTEST_") {
attests[k] = v
continue
}
}
return attests
}
func Validate(values map[string]map[string]string) (map[string]map[string]string, error) {
for k := range values {
if k != KeyTypeSbom && k != KeyTypeProvenance {
return nil, errors.Errorf("unknown attestation type %q", k)
}
}
return values, nil
}
func Parse(values map[string]string) (map[string]map[string]string, error) {
attests := make(map[string]string)
for k, v := range values {
if strings.HasPrefix(k, "attest:") {
attests[strings.ToLower(strings.TrimPrefix(k, "attest:"))] = v
continue
}
if strings.HasPrefix(k, "build-arg:BUILDKIT_ATTEST_") {
attests[strings.ToLower(strings.TrimPrefix(k, "build-arg:BUILDKIT_ATTEST_"))] = v
continue
}
}
out := make(map[string]map[string]string)
for k, v := range attests {
attrs := make(map[string]string)
out[k] = attrs
if k == KeyTypeSbom {
attrs["generator"] = defaultSBOMGenerator
}
if v == "" {
continue
}
csvReader := csv.NewReader(strings.NewReader(v))
fields, err := csvReader.Read()
if err != nil {
return nil, errors.Wrapf(err, "failed to parse %s", k)
}
for _, field := range fields {
parts := strings.SplitN(field, "=", 2)
if len(parts) != 2 {
parts = append(parts, "")
}
attrs[parts[0]] = parts[1]
}
}
return Validate(out)
}

View File

@ -0,0 +1,46 @@
package client
import (
pb "github.com/moby/buildkit/frontend/gateway/pb"
"github.com/moby/buildkit/solver/result"
)
func AttestationToPB(a *result.Attestation) (*pb.Attestation, error) {
subjects := make([]*pb.InTotoSubject, len(a.InToto.Subjects))
for i, subject := range a.InToto.Subjects {
subjects[i] = &pb.InTotoSubject{
Kind: subject.Kind,
Name: subject.Name,
Digest: subject.Digest,
}
}
return &pb.Attestation{
Kind: a.Kind,
Path: a.Path,
Ref: a.Ref,
InTotoPredicateType: a.InToto.PredicateType,
InTotoSubjects: subjects,
}, nil
}
func AttestationFromPB(a *pb.Attestation) (*result.Attestation, error) {
subjects := make([]result.InTotoSubject, len(a.InTotoSubjects))
for i, subject := range a.InTotoSubjects {
subjects[i] = result.InTotoSubject{
Kind: subject.Kind,
Name: subject.Name,
Digest: subject.Digest,
}
}
return &result.Attestation{
Kind: a.Kind,
Path: a.Path,
Ref: a.Ref,
InToto: result.InTotoAttestation{
PredicateType: a.InTotoPredicateType,
Subjects: subjects,
},
}, nil
}

View File

@ -7,12 +7,21 @@ import (
"github.com/moby/buildkit/client/llb"
"github.com/moby/buildkit/solver/pb"
"github.com/moby/buildkit/solver/result"
"github.com/moby/buildkit/util/apicaps"
digest "github.com/opencontainers/go-digest"
ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
fstypes "github.com/tonistiigi/fsutil/types"
)
type Result = result.Result[Reference]
type BuildFunc func(context.Context, Client) (*Result, error)
func NewResult() *Result {
return &Result{}
}
type Client interface {
Solve(ctx context.Context, req SolveRequest) (*Result, error)
ResolveImageConfig(ctx context.Context, ref string, opt llb.ResolveImageConfigOpt) (digest.Digest, []byte, error)
@ -82,6 +91,7 @@ type ContainerProcess interface {
type Reference interface {
ToState() (llb.State, error)
Evaluate(ctx context.Context) error
ReadFile(ctx context.Context, req ReadRequest) ([]byte, error)
StatFile(ctx context.Context, req StatRequest) (*fstypes.Stat, error)
ReadDir(ctx context.Context, req ReadDirRequest) ([]*fstypes.Stat, error)

View File

@ -1,54 +0,0 @@
package client
import (
"context"
"sync"
"github.com/pkg/errors"
)
type BuildFunc func(context.Context, Client) (*Result, error)
type Result struct {
mu sync.Mutex
Ref Reference
Refs map[string]Reference
Metadata map[string][]byte
}
func NewResult() *Result {
return &Result{}
}
func (r *Result) AddMeta(k string, v []byte) {
r.mu.Lock()
if r.Metadata == nil {
r.Metadata = map[string][]byte{}
}
r.Metadata[k] = v
r.mu.Unlock()
}
func (r *Result) AddRef(k string, ref Reference) {
r.mu.Lock()
if r.Refs == nil {
r.Refs = map[string]Reference{}
}
r.Refs[k] = ref
r.mu.Unlock()
}
func (r *Result) SetRef(ref Reference) {
r.Ref = ref
}
func (r *Result) SingleRef() (Reference, error) {
r.mu.Lock()
defer r.mu.Unlock()
if r.Refs != nil && r.Ref == nil {
return nil, errors.Errorf("invalid map result")
}
return r.Ref, nil
}

View File

@ -115,7 +115,7 @@ func (c *grpcClient) Run(ctx context.Context, f client.BuildFunc) (retError erro
req := &pb.ReturnRequest{}
if retError == nil {
if res == nil {
res = &client.Result{}
res = client.NewResult()
}
pbRes := &pb.Result{
Metadata: res.Metadata,
@ -160,6 +160,25 @@ func (c *grpcClient) Run(ctx context.Context, f client.BuildFunc) (retError erro
}
}
}
if res.Attestations != nil && c.caps.Supports(pb.CapAttestations) == nil {
attestations := map[string]*pb.Attestations{}
for k, as := range res.Attestations {
for _, a := range as {
pbAtt, err := client.AttestationToPB(&a)
if err != nil {
retError = err
continue
}
if attestations[k] == nil {
attestations[k] = &pb.Attestations{}
}
attestations[k].Attestation = append(attestations[k].Attestation, pbAtt)
}
}
pbRes.Attestations = attestations
}
if retError == nil {
req.Result = pbRes
}
@ -368,30 +387,15 @@ func (c *grpcClient) Solve(ctx context.Context, creq client.SolveRequest) (res *
if c.caps.Supports(pb.CapGatewayEvaluateSolve) == nil {
req.Evaluate = creq.Evaluate
} else {
// If evaluate is not supported, fallback to running Stat(".") in order to
// trigger an evaluation of the result.
// If evaluate is not supported, fallback to running Stat(".") in
// order to trigger an evaluation of the result.
defer func() {
if res == nil {
return
}
var (
id string
ref client.Reference
)
ref, err = res.SingleRef()
if err != nil {
for refID := range res.Refs {
id = refID
break
}
} else {
id = ref.(*reference).id
}
_, err = c.client.StatFile(ctx, &pb.StatFileRequest{
Ref: id,
Path: ".",
err = res.EachRef(func(ref client.Reference) error {
_, err := ref.StatFile(ctx, client.StatRequest{Path: "."})
return err
})
}()
}
@ -402,7 +406,7 @@ func (c *grpcClient) Solve(ctx context.Context, creq client.SolveRequest) (res *
return nil, err
}
res = &client.Result{}
res = client.NewResult()
if resp.Result == nil {
if id := resp.Ref; id != "" {
c.requests[id] = req
@ -443,6 +447,18 @@ func (c *grpcClient) Solve(ctx context.Context, creq client.SolveRequest) (res *
res.AddRef(k, ref)
}
}
if resp.Result.Attestations != nil {
for p, as := range resp.Result.Attestations {
for _, a := range as.Attestation {
att, err := client.AttestationFromPB(a)
if err != nil {
return nil, err
}
res.AddAttestation(p, *att, nil)
}
}
}
}
return res, nil
@ -1023,6 +1039,15 @@ func (r *reference) ToState() (st llb.State, err error) {
return llb.NewState(defop), nil
}
func (r *reference) Evaluate(ctx context.Context) error {
req := &pb.EvaluateRequest{Ref: r.id}
_, err := r.c.client.Evaluate(ctx, req)
if err != nil {
return err
}
return nil
}
func (r *reference) ReadFile(ctx context.Context, req client.ReadRequest) ([]byte, error) {
rfr := &pb.ReadFileRequest{FilePath: req.Filename, Ref: r.id}
if r := req.Range; r != nil {

View File

@ -56,8 +56,14 @@ const (
// errors.
CapGatewayEvaluateSolve apicaps.CapID = "gateway.solve.evaluate"
CapGatewayEvaluate apicaps.CapID = "gateway.evaluate"
// CapGatewayWarnings is the capability to log warnings from frontend
CapGatewayWarnings apicaps.CapID = "gateway.warnings"
// CapAttestations is the capability to indicate that attestation
// references will be attached to results
CapAttestations apicaps.CapID = "reference.attestations"
)
func init() {
@ -194,10 +200,24 @@ func init() {
Status: apicaps.CapStatusExperimental,
})
Caps.Init(apicaps.Cap{
ID: CapGatewayEvaluate,
Name: "gateway evaluate",
Enabled: true,
Status: apicaps.CapStatusExperimental,
})
Caps.Init(apicaps.Cap{
ID: CapGatewayWarnings,
Name: "logging warnings",
Enabled: true,
Status: apicaps.CapStatusExperimental,
})
Caps.Init(apicaps.Cap{
ID: CapAttestations,
Name: "reference attestations",
Enabled: true,
Status: apicaps.CapStatusExperimental,
})
}

File diff suppressed because it is too large Load Diff

View File

@ -25,6 +25,8 @@ service LLBBridge {
rpc ReadDir(ReadDirRequest) returns (ReadDirResponse);
// apicaps:CapStatFile
rpc StatFile(StatFileRequest) returns (StatFileResponse);
// apicaps:CapGatewayEvaluate
rpc Evaluate(EvaluateRequest) returns (EvaluateResponse);
rpc Ping(PingRequest) returns (PongResponse);
rpc Return(ReturnRequest) returns (ReturnResponse);
// apicaps:CapFrontendInputs
@ -48,6 +50,7 @@ message Result {
RefMap refs = 4;
}
map<string, bytes> metadata = 10;
map<string, Attestations> attestations = 11;
}
message RefMapDeprecated {
@ -63,6 +66,38 @@ message RefMap {
map<string, Ref> refs = 1;
}
message Attestations {
repeated Attestation attestation = 1;
}
message Attestation {
AttestationKind kind = 1;
string ref = 2;
string path = 3;
string inTotoPredicateType = 4;
repeated InTotoSubject inTotoSubjects = 5;
}
enum AttestationKind {
option (gogoproto.goproto_enum_prefix) = false;
InToto = 0 [(gogoproto.enumvalue_customname) = "AttestationKindInToto"];
Bundle = 1 [(gogoproto.enumvalue_customname) = "AttestationKindBundle"];
}
message InTotoSubject {
InTotoSubjectKind kind = 1;
repeated string digest = 2 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false];
string name = 3;
}
enum InTotoSubjectKind {
option (gogoproto.goproto_enum_prefix) = false;
Self = 0 [(gogoproto.enumvalue_customname) = "InTotoSubjectKindSelf"];
Raw = 1 [(gogoproto.enumvalue_customname) = "InTotoSubjectKindRaw"];
}
message ReturnRequest {
Result result = 1;
google.rpc.Status error = 2;
@ -163,6 +198,13 @@ message StatFileResponse {
fsutil.types.Stat stat = 1;
}
message EvaluateRequest {
string Ref = 1;
}
message EvaluateResponse {
}
message PingRequest{
}
message PongResponse{

View File

@ -2,8 +2,8 @@ package auth
import (
"context"
"crypto/rand"
"crypto/subtle"
"math/rand"
"sync"
"github.com/moby/buildkit/session"

View File

@ -27,27 +27,35 @@ const (
)
type fsSyncProvider struct {
dirs map[string]SyncedDir
dirs DirSource
p progressCb
doneCh chan error
}
type SyncedDir struct {
Name string
Dir string
Excludes []string
Map func(string, *fstypes.Stat) fsutil.MapResult
}
type DirSource interface {
LookupDir(string) (SyncedDir, bool)
}
type StaticDirSource map[string]SyncedDir
var _ DirSource = StaticDirSource{}
func (dirs StaticDirSource) LookupDir(name string) (SyncedDir, bool) {
dir, found := dirs[name]
return dir, found
}
// NewFSSyncProvider creates a new provider for sending files from client
func NewFSSyncProvider(dirs []SyncedDir) session.Attachable {
p := &fsSyncProvider{
dirs: map[string]SyncedDir{},
func NewFSSyncProvider(dirs DirSource) session.Attachable {
return &fsSyncProvider{
dirs: dirs,
}
for _, d := range dirs {
p.dirs[d.Name] = d
}
return p
}
func (sp *fsSyncProvider) Register(server *grpc.Server) {
@ -81,7 +89,7 @@ func (sp *fsSyncProvider) handle(method string, stream grpc.ServerStream) (retEr
dirName = name[0]
}
dir, ok := sp.dirs[dirName]
dir, ok := sp.dirs.LookupDir(dirName)
if !ok {
return InvalidSessionError{status.Errorf(codes.NotFound, "no access allowed to dir %q", dirName)}
}

View File

@ -2,6 +2,7 @@ package session
import (
"context"
"math"
"net"
"sync/atomic"
"time"
@ -10,6 +11,7 @@ import (
"github.com/moby/buildkit/util/bklog"
"github.com/moby/buildkit/util/grpcerrors"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
"go.opentelemetry.io/otel/trace"
"golang.org/x/net/http2"
@ -79,21 +81,55 @@ func monitorHealth(ctx context.Context, cc *grpc.ClientConn, cancelConn func())
defer cancelConn()
defer cc.Close()
ticker := time.NewTicker(1 * time.Second)
ticker := time.NewTicker(5 * time.Second)
defer ticker.Stop()
healthClient := grpc_health_v1.NewHealthClient(cc)
failedBefore := false
consecutiveSuccessful := 0
defaultHealthcheckDuration := 30 * time.Second
lastHealthcheckDuration := time.Duration(0)
for {
select {
case <-ctx.Done():
return
case <-ticker.C:
ctx, cancel := context.WithTimeout(ctx, 10*time.Second)
// This healthcheck can erroneously fail in some instances, such as receiving lots of data in a low-bandwidth scenario or too many concurrent builds.
// So, this healthcheck is purposely long, and can tolerate some failures on purpose.
healthcheckStart := time.Now()
timeout := time.Duration(math.Max(float64(defaultHealthcheckDuration), float64(lastHealthcheckDuration)*1.5))
ctx, cancel := context.WithTimeout(ctx, timeout)
_, err := healthClient.Check(ctx, &grpc_health_v1.HealthCheckRequest{})
cancel()
if err != nil {
return
lastHealthcheckDuration = time.Since(healthcheckStart)
logFields := logrus.Fields{
"timeout": timeout,
"actualDuration": lastHealthcheckDuration,
}
if err != nil {
if failedBefore {
bklog.G(ctx).Error("healthcheck failed fatally")
return
}
failedBefore = true
consecutiveSuccessful = 0
bklog.G(ctx).WithFields(logFields).Warn("healthcheck failed")
} else {
consecutiveSuccessful++
if consecutiveSuccessful >= 5 && failedBefore {
failedBefore = false
bklog.G(ctx).WithFields(logFields).Debug("reset healthcheck failure")
}
}
bklog.G(ctx).WithFields(logFields).Debug("healthcheck completed")
}
}
}

View File

@ -186,6 +186,7 @@ type Solve struct {
MountIDs []string `protobuf:"bytes,2,rep,name=mountIDs,proto3" json:"mountIDs,omitempty"`
Op *pb.Op `protobuf:"bytes,3,opt,name=op,proto3" json:"op,omitempty"`
// Types that are valid to be assigned to Subject:
//
// *Solve_File
// *Solve_Cache
Subject isSolve_Subject `protobuf_oneof:"subject"`

View File

@ -73,13 +73,18 @@ const (
CapMetaDescription apicaps.CapID = "meta.description"
CapMetaExportCache apicaps.CapID = "meta.exportcache"
CapRemoteCacheGHA apicaps.CapID = "cache.gha"
CapRemoteCacheS3 apicaps.CapID = "cache.s3"
CapRemoteCacheGHA apicaps.CapID = "cache.gha"
CapRemoteCacheS3 apicaps.CapID = "cache.s3"
CapRemoteCacheAzBlob apicaps.CapID = "cache.azblob"
CapMergeOp apicaps.CapID = "mergeop"
CapDiffOp apicaps.CapID = "diffop"
CapAnnotations apicaps.CapID = "exporter.image.annotations"
CapAnnotations apicaps.CapID = "exporter.image.annotations"
CapAttestations apicaps.CapID = "exporter.image.attestations"
// CapSourceDateEpoch is the capability to automatically handle the date epoch
CapSourceDateEpoch apicaps.CapID = "exporter.sourcedateepoch"
)
func init() {
@ -423,6 +428,12 @@ func init() {
Status: apicaps.CapStatusExperimental,
})
Caps.Init(apicaps.Cap{
ID: CapRemoteCacheAzBlob,
Enabled: true,
Status: apicaps.CapStatusExperimental,
})
Caps.Init(apicaps.Cap{
ID: CapMergeOp,
Enabled: true,
@ -440,4 +451,17 @@ func init() {
Enabled: true,
Status: apicaps.CapStatusExperimental,
})
Caps.Init(apicaps.Cap{
ID: CapAttestations,
Enabled: true,
Status: apicaps.CapStatusExperimental,
})
Caps.Init(apicaps.Cap{
ID: CapSourceDateEpoch,
Name: "source date epoch",
Enabled: true,
Status: apicaps.CapStatusExperimental,
})
}

View File

@ -154,6 +154,7 @@ type Op struct {
// inputs is a set of input edges.
Inputs []*Input `protobuf:"bytes,1,rep,name=inputs,proto3" json:"inputs,omitempty"`
// Types that are valid to be assigned to Op:
//
// *Op_Exec
// *Op_Source
// *Op_File
@ -1948,6 +1949,7 @@ type FileAction struct {
SecondaryInput InputIndex `protobuf:"varint,2,opt,name=secondaryInput,proto3,customtype=InputIndex" json:"secondaryInput"`
Output OutputIndex `protobuf:"varint,3,opt,name=output,proto3,customtype=OutputIndex" json:"output"`
// Types that are valid to be assigned to Action:
//
// *FileAction_Copy
// *FileAction_Mkfile
// *FileAction_Mkdir
@ -2465,6 +2467,7 @@ func (m *ChownOpt) GetGroup() *UserOpt {
type UserOpt struct {
// Types that are valid to be assigned to User:
//
// *UserOpt_ByName
// *UserOpt_ByID
User isUserOpt_User `protobuf_oneof:"user"`

View File

@ -0,0 +1,36 @@
package result
import (
pb "github.com/moby/buildkit/frontend/gateway/pb"
digest "github.com/opencontainers/go-digest"
)
type Attestation struct {
Kind pb.AttestationKind
Ref string
Path string
InToto InTotoAttestation
ContentFunc func() ([]byte, error)
}
type InTotoAttestation struct {
PredicateType string
Subjects []InTotoSubject
}
type InTotoSubject struct {
Kind pb.InTotoSubjectKind
Name string
Digest []digest.Digest
}
func DigestMap(ds ...digest.Digest) map[string]string {
m := map[string]string{}
for _, d := range ds {
m[d.Algorithm().String()] = d.Encoded()
}
return m
}

131
vendor/github.com/moby/buildkit/solver/result/result.go generated vendored Normal file
View File

@ -0,0 +1,131 @@
package result
import (
"reflect"
"strings"
"sync"
"github.com/moby/buildkit/identity"
"github.com/pkg/errors"
)
const (
attestationRefPrefix = "attestation:"
)
type Result[T any] struct {
mu sync.Mutex
Ref T
Refs map[string]T
Metadata map[string][]byte
Attestations map[string][]Attestation
}
func (r *Result[T]) AddMeta(k string, v []byte) {
r.mu.Lock()
if r.Metadata == nil {
r.Metadata = map[string][]byte{}
}
r.Metadata[k] = v
r.mu.Unlock()
}
func (r *Result[T]) AddRef(k string, ref T) {
r.mu.Lock()
if r.Refs == nil {
r.Refs = map[string]T{}
}
r.Refs[k] = ref
r.mu.Unlock()
}
func (r *Result[T]) AddAttestation(k string, v Attestation, ref T) {
r.mu.Lock()
if r.Refs == nil {
r.Refs = map[string]T{}
}
if r.Attestations == nil {
r.Attestations = map[string][]Attestation{}
}
if v.ContentFunc == nil && !strings.HasPrefix(v.Ref, attestationRefPrefix) {
v.Ref = "attestation:" + identity.NewID()
r.Refs[v.Ref] = ref
}
r.Attestations[k] = append(r.Attestations[k], v)
r.mu.Unlock()
}
func (r *Result[T]) SetRef(ref T) {
r.Ref = ref
}
func (r *Result[T]) SingleRef() (T, error) {
r.mu.Lock()
defer r.mu.Unlock()
if r.Refs != nil && !reflect.ValueOf(r.Ref).IsValid() {
var t T
return t, errors.Errorf("invalid map result")
}
return r.Ref, nil
}
func (r *Result[T]) EachRef(fn func(T) error) (err error) {
if reflect.ValueOf(r.Ref).IsValid() {
err = fn(r.Ref)
}
for _, r := range r.Refs {
if reflect.ValueOf(r).IsValid() {
if err1 := fn(r); err1 != nil && err == nil {
err = err1
}
}
}
return err
}
// EachRef iterates over references in both a and b.
// a and b are assumed to be of the same size and map their references
// to the same set of keys
func EachRef[U any, V any](a *Result[U], b *Result[V], fn func(U, V) error) (err error) {
if reflect.ValueOf(a.Ref).IsValid() && reflect.ValueOf(b.Ref).IsValid() {
err = fn(a.Ref, b.Ref)
}
for k, r := range a.Refs {
if reflect.ValueOf(r).IsValid() && reflect.ValueOf(b.Refs[k]).IsValid() {
if err1 := fn(r, b.Refs[k]); err1 != nil && err == nil {
err = err1
}
}
}
return err
}
func ConvertResult[U any, V any](r *Result[U], fn func(U) (V, error)) (*Result[V], error) {
r2 := &Result[V]{}
var err error
if reflect.ValueOf(r.Ref).IsValid() {
r2.Ref, err = fn(r.Ref)
if err != nil {
return nil, err
}
}
if r.Refs != nil {
r2.Refs = map[string]V{}
}
for k, r := range r.Refs {
if reflect.ValueOf(r).IsValid() {
r2.Refs[k], err = fn(r)
if err != nil {
return nil, err
}
}
}
r2.Attestations = r.Attestations
r2.Metadata = r.Metadata
return r2, nil
}

View File

@ -0,0 +1,11 @@
package attestation
const (
MediaTypeDockerSchema2AttestationType = "application/vnd.in-toto+json"
DockerAnnotationReferenceType = "vnd.docker.reference.type"
DockerAnnotationReferenceDigest = "vnd.docker.reference.digest"
DockerAnnotationReferenceDescription = "vnd.docker.reference.description"
DockerAnnotationReferenceTypeDefault = "attestation-manifest"
)

View File

@ -4,6 +4,7 @@ import (
"bytes"
"context"
"io"
"strings"
"sync"
"time"
@ -18,12 +19,14 @@ import (
type Buffer interface {
content.Provider
content.Ingester
content.Manager
}
// NewBuffer returns a new buffer
func NewBuffer() Buffer {
return &buffer{
buffers: map[digest.Digest][]byte{},
infos: map[digest.Digest]content.Info{},
refs: map[string]struct{}{},
}
}
@ -31,9 +34,59 @@ func NewBuffer() Buffer {
type buffer struct {
mu sync.Mutex
buffers map[digest.Digest][]byte
infos map[digest.Digest]content.Info
refs map[string]struct{}
}
func (b *buffer) Info(ctx context.Context, dgst digest.Digest) (content.Info, error) {
b.mu.Lock()
v, ok := b.infos[dgst]
b.mu.Unlock()
if !ok {
return content.Info{}, errdefs.ErrNotFound
}
return v, nil
}
func (b *buffer) Update(ctx context.Context, new content.Info, fieldpaths ...string) (content.Info, error) {
b.mu.Lock()
defer b.mu.Unlock()
updated, ok := b.infos[new.Digest]
if !ok {
return content.Info{}, errdefs.ErrNotFound
}
if len(fieldpaths) == 0 {
fieldpaths = []string{"labels"}
}
for _, path := range fieldpaths {
if strings.HasPrefix(path, "labels.") {
if updated.Labels == nil {
updated.Labels = map[string]string{}
}
key := strings.TrimPrefix(path, "labels.")
updated.Labels[key] = new.Labels[key]
continue
}
if path == "labels" {
updated.Labels = new.Labels
}
}
b.infos[new.Digest] = updated
return updated, nil
}
func (b *buffer) Walk(ctx context.Context, fn content.WalkFunc, filters ...string) error {
return nil // not implemented
}
func (b *buffer) Delete(ctx context.Context, dgst digest.Digest) error {
return nil // not implemented
}
func (b *buffer) Writer(ctx context.Context, opts ...content.WriterOpt) (content.Writer, error) {
var wOpts content.WriterOpts
for _, opt := range opts {
@ -82,6 +135,7 @@ func (b *buffer) addValue(k digest.Digest, dt []byte) {
b.mu.Lock()
defer b.mu.Unlock()
b.buffers[k] = dt
b.infos[k] = content.Info{Digest: k, Size: int64(len(dt))}
}
type bufferedWriter struct {

View File

@ -0,0 +1,34 @@
package contentutil
import (
"net/url"
"strings"
"github.com/containerd/containerd/content"
"github.com/containerd/containerd/reference"
)
func HasSource(info content.Info, refspec reference.Spec) (bool, error) {
u, err := url.Parse("dummy://" + refspec.Locator)
if err != nil {
return false, err
}
if info.Labels == nil {
return false, nil
}
source, target := u.Hostname(), strings.TrimPrefix(u.Path, "/")
repoLabel, ok := info.Labels["containerd.io/distribution.source."+source]
if !ok || repoLabel == "" {
return false, nil
}
for _, repo := range strings.Split(repoLabel, ",") {
// the target repo is not a candidate
if repo == target {
return true, nil
}
}
return false, nil
}

View File

@ -10,8 +10,8 @@ import (
// GitRef represents a git ref.
//
// Examples:
// - "https://github.com/foo/bar.git#baz/qux:quux/quuz" is parsed into:
// {Remote: "https://github.com/foo/bar.git", ShortName: "bar", Commit:"baz/qux", SubDir: "quux/quuz"}.
// - "https://github.com/foo/bar.git#baz/qux:quux/quuz" is parsed into:
// {Remote: "https://github.com/foo/bar.git", ShortName: "bar", Commit:"baz/qux", SubDir: "quux/quuz"}.
type GitRef struct {
// Remote is the remote repository path.
Remote string

View File

@ -6,7 +6,7 @@ import (
"github.com/containerd/typeurl"
gogotypes "github.com/gogo/protobuf/types"
"github.com/golang/protobuf/proto" // nolint:staticcheck
"github.com/golang/protobuf/proto" //nolint:staticcheck
"github.com/golang/protobuf/ptypes/any"
"github.com/moby/buildkit/util/stack"
"github.com/sirupsen/logrus"

View File

@ -13,6 +13,8 @@ import (
"github.com/containerd/containerd/reference"
"github.com/containerd/containerd/remotes"
"github.com/containerd/containerd/remotes/docker"
"github.com/moby/buildkit/util/attestation"
"github.com/moby/buildkit/util/contentutil"
"github.com/moby/buildkit/util/leaseutil"
"github.com/moby/buildkit/util/resolver/limited"
"github.com/moby/buildkit/util/resolver/retryhandler"
@ -24,6 +26,7 @@ import (
type ContentCache interface {
content.Ingester
content.Provider
content.Manager
}
var leasesMu sync.Mutex
@ -75,10 +78,15 @@ func Config(ctx context.Context, str string, resolver remotes.Resolver, cache Co
if desc.Digest != "" {
ra, err := cache.ReaderAt(ctx, desc)
if err == nil {
desc.Size = ra.Size()
mt, err := DetectManifestMediaType(ra)
info, err := cache.Info(ctx, desc.Digest)
if err == nil {
desc.MediaType = mt
if ok, err := contentutil.HasSource(info, ref); err == nil && ok {
desc.Size = ra.Size()
mt, err := DetectManifestMediaType(ra)
if err == nil {
desc.MediaType = mt
}
}
}
}
}
@ -101,8 +109,14 @@ func Config(ctx context.Context, str string, resolver remotes.Resolver, cache Co
children := childrenConfigHandler(cache, platform)
dslHandler, err := docker.AppendDistributionSourceLabel(cache, ref.String())
if err != nil {
return "", nil, err
}
handlers := []images.Handler{
retryhandler.New(limited.FetchHandler(cache, fetcher, str), func(_ []byte) {}),
dslHandler,
children,
}
if err := images.Dispatch(ctx, images.Handlers(handlers...), nil, desc); err != nil {
@ -159,7 +173,8 @@ func childrenConfigHandler(provider content.Provider, platform platforms.MatchCo
} else {
descs = append(descs, index.Manifests...)
}
case images.MediaTypeDockerSchema2Config, ocispecs.MediaTypeImageConfig, docker.LegacyConfigMediaType:
case images.MediaTypeDockerSchema2Config, ocispecs.MediaTypeImageConfig, docker.LegacyConfigMediaType,
attestation.MediaTypeDockerSchema2AttestationType:
// childless data types.
return nil, nil
default:

View File

@ -8,6 +8,7 @@ import (
"time"
"github.com/containerd/containerd/remotes"
"github.com/moby/buildkit/exporter/containerimage/image"
digest "github.com/opencontainers/go-digest"
ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
@ -44,7 +45,7 @@ func convertSchema1ConfigMeta(in []byte) ([]byte, error) {
return nil, errors.Errorf("invalid schema1 manifest")
}
var img ocispecs.Image
var img image.Image
if err := json.Unmarshal([]byte(m.History[0].V1Compatibility), &img); err != nil {
return nil, errors.Wrap(err, "failed to unmarshal image from schema 1 history")
}
@ -68,7 +69,7 @@ func convertSchema1ConfigMeta(in []byte) ([]byte, error) {
}
}
dt, err := json.MarshalIndent(img, "", " ")
dt, err := json.MarshalIndent(img, "", " ")
if err != nil {
return nil, errors.Wrap(err, "failed to marshal schema1 config")
}

View File

@ -38,14 +38,13 @@ func setUserDefinedTermColors(colorsEnv string) {
return
}
for _, field := range fields {
parts := strings.SplitN(field, "=", 2)
if len(parts) != 2 || strings.Contains(parts[1], "=") {
k, v, ok := strings.Cut(field, "=")
if !ok || strings.Contains(v, "=") {
err := errors.New("A valid entry must have exactly two fields")
logrus.WithError(err).Warnf("Could not parse BUILDKIT_COLORS component: %s", field)
continue
}
k := strings.ToLower(parts[0])
v := parts[1]
k = strings.ToLower(k)
if c, ok := termColorMap[strings.ToLower(v)]; ok {
parseKeys(k, c)
} else if strings.Contains(v, ",") {
@ -94,8 +93,7 @@ func readRGB(v string) aec.ANSI {
}
func parseKeys(k string, c aec.ANSI) {
key := strings.ToLower(k)
switch key {
switch strings.ToLower(k) {
case "run":
colorRun = c
case "cancel":

View File

@ -170,10 +170,10 @@ func (p *textMux) printVtx(t *trace, dgst digest.Digest) {
p.current = ""
v.count = 0
if v.logsPartial {
fmt.Fprintln(p.w, "")
}
if v.Error != "" {
if v.logsPartial {
fmt.Fprintln(p.w, "")
}
if strings.HasSuffix(v.Error, context.Canceled.Error()) {
fmt.Fprintf(p.w, "#%d CANCELED\n", v.index)
} else {

View File

@ -151,7 +151,7 @@ func convertStack(s errors.StackTrace) *Stack {
if idx == -1 {
continue
}
line, err := strconv.Atoi(p[1][idx+1:])
line, err := strconv.ParseInt(p[1][idx+1:], 10, 32)
if err != nil {
continue
}

View File

@ -119,9 +119,7 @@ func (c *Connection) indefiniteBackgroundConnection() {
connReattemptPeriod := defaultConnReattemptPeriod
// No strong seeding required, nano time can
// already help with pseudo uniqueness.
rng := rand.New(rand.NewSource(time.Now().UnixNano() + rand.Int63n(1024)))
rng := rand.New(rand.NewSource(time.Now().UnixNano() + rand.Int63n(1024))) //nolint:gosec // No strong seeding required, nano time can already help with pseudo uniqueness.
// maxJitterNanos: 70% of the connectionReattemptPeriod
maxJitterNanos := int64(0.7 * float64(connReattemptPeriod))

191
vendor/github.com/moby/patternmatcher/LICENSE generated vendored Normal file
View File

@ -0,0 +1,191 @@
Apache License
Version 2.0, January 2004
https://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
Copyright 2013-2018 Docker, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
https://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

16
vendor/github.com/moby/patternmatcher/NOTICE generated vendored Normal file
View File

@ -0,0 +1,16 @@
Docker
Copyright 2012-2017 Docker, Inc.
This product includes software developed at Docker, Inc. (https://www.docker.com).
The following is courtesy of our legal counsel:
Use and transfer of Docker may be subject to certain restrictions by the
United States and other governments.
It is your responsibility to ensure that your use and/or transfer does not
violate applicable laws.
For more information, please see https://www.bis.doc.gov
See also https://www.apache.org/dev/crypto.html and/or seek legal counsel.

474
vendor/github.com/moby/patternmatcher/patternmatcher.go generated vendored Normal file
View File

@ -0,0 +1,474 @@
package patternmatcher
import (
"errors"
"os"
"path/filepath"
"regexp"
"strings"
"text/scanner"
"unicode/utf8"
)
// escapeBytes is a bitmap used to check whether a character should be escaped when creating the regex.
var escapeBytes [8]byte
// shouldEscape reports whether a rune should be escaped as part of the regex.
//
// This only includes characters that require escaping in regex but are also NOT valid filepath pattern characters.
// Additionally, '\' is not excluded because there is specific logic to properly handle this, as it's a path separator
// on Windows.
//
// Adapted from regexp::QuoteMeta in go stdlib.
// See https://cs.opensource.google/go/go/+/refs/tags/go1.17.2:src/regexp/regexp.go;l=703-715;drc=refs%2Ftags%2Fgo1.17.2
func shouldEscape(b rune) bool {
return b < utf8.RuneSelf && escapeBytes[b%8]&(1<<(b/8)) != 0
}
func init() {
for _, b := range []byte(`.+()|{}$`) {
escapeBytes[b%8] |= 1 << (b / 8)
}
}
// PatternMatcher allows checking paths against a list of patterns
type PatternMatcher struct {
patterns []*Pattern
exclusions bool
}
// New creates a new matcher object for specific patterns that can
// be used later to match against patterns against paths
func New(patterns []string) (*PatternMatcher, error) {
pm := &PatternMatcher{
patterns: make([]*Pattern, 0, len(patterns)),
}
for _, p := range patterns {
// Eliminate leading and trailing whitespace.
p = strings.TrimSpace(p)
if p == "" {
continue
}
p = filepath.Clean(p)
newp := &Pattern{}
if p[0] == '!' {
if len(p) == 1 {
return nil, errors.New("illegal exclusion pattern: \"!\"")
}
newp.exclusion = true
p = p[1:]
pm.exclusions = true
}
// Do some syntax checking on the pattern.
// filepath's Match() has some really weird rules that are inconsistent
// so instead of trying to dup their logic, just call Match() for its
// error state and if there is an error in the pattern return it.
// If this becomes an issue we can remove this since its really only
// needed in the error (syntax) case - which isn't really critical.
if _, err := filepath.Match(p, "."); err != nil {
return nil, err
}
newp.cleanedPattern = p
newp.dirs = strings.Split(p, string(os.PathSeparator))
pm.patterns = append(pm.patterns, newp)
}
return pm, nil
}
// Matches returns true if "file" matches any of the patterns
// and isn't excluded by any of the subsequent patterns.
//
// The "file" argument should be a slash-delimited path.
//
// Matches is not safe to call concurrently.
//
// Deprecated: This implementation is buggy (it only checks a single parent dir
// against the pattern) and will be removed soon. Use either
// MatchesOrParentMatches or MatchesUsingParentResults instead.
func (pm *PatternMatcher) Matches(file string) (bool, error) {
matched := false
file = filepath.FromSlash(file)
parentPath := filepath.Dir(file)
parentPathDirs := strings.Split(parentPath, string(os.PathSeparator))
for _, pattern := range pm.patterns {
// Skip evaluation if this is an inclusion and the filename
// already matched the pattern, or it's an exclusion and it has
// not matched the pattern yet.
if pattern.exclusion != matched {
continue
}
match, err := pattern.match(file)
if err != nil {
return false, err
}
if !match && parentPath != "." {
// Check to see if the pattern matches one of our parent dirs.
if len(pattern.dirs) <= len(parentPathDirs) {
match, _ = pattern.match(strings.Join(parentPathDirs[:len(pattern.dirs)], string(os.PathSeparator)))
}
}
if match {
matched = !pattern.exclusion
}
}
return matched, nil
}
// MatchesOrParentMatches returns true if "file" matches any of the patterns
// and isn't excluded by any of the subsequent patterns.
//
// The "file" argument should be a slash-delimited path.
//
// Matches is not safe to call concurrently.
func (pm *PatternMatcher) MatchesOrParentMatches(file string) (bool, error) {
matched := false
file = filepath.FromSlash(file)
parentPath := filepath.Dir(file)
parentPathDirs := strings.Split(parentPath, string(os.PathSeparator))
for _, pattern := range pm.patterns {
// Skip evaluation if this is an inclusion and the filename
// already matched the pattern, or it's an exclusion and it has
// not matched the pattern yet.
if pattern.exclusion != matched {
continue
}
match, err := pattern.match(file)
if err != nil {
return false, err
}
if !match && parentPath != "." {
// Check to see if the pattern matches one of our parent dirs.
for i := range parentPathDirs {
match, _ = pattern.match(strings.Join(parentPathDirs[:i+1], string(os.PathSeparator)))
if match {
break
}
}
}
if match {
matched = !pattern.exclusion
}
}
return matched, nil
}
// MatchesUsingParentResult returns true if "file" matches any of the patterns
// and isn't excluded by any of the subsequent patterns. The functionality is
// the same as Matches, but as an optimization, the caller keeps track of
// whether the parent directory matched.
//
// The "file" argument should be a slash-delimited path.
//
// MatchesUsingParentResult is not safe to call concurrently.
//
// Deprecated: this function does behave correctly in some cases (see
// https://github.com/docker/buildx/issues/850).
//
// Use MatchesUsingParentResults instead.
func (pm *PatternMatcher) MatchesUsingParentResult(file string, parentMatched bool) (bool, error) {
matched := parentMatched
file = filepath.FromSlash(file)
for _, pattern := range pm.patterns {
// Skip evaluation if this is an inclusion and the filename
// already matched the pattern, or it's an exclusion and it has
// not matched the pattern yet.
if pattern.exclusion != matched {
continue
}
match, err := pattern.match(file)
if err != nil {
return false, err
}
if match {
matched = !pattern.exclusion
}
}
return matched, nil
}
// MatchInfo tracks information about parent dir matches while traversing a
// filesystem.
type MatchInfo struct {
parentMatched []bool
}
// MatchesUsingParentResults returns true if "file" matches any of the patterns
// and isn't excluded by any of the subsequent patterns. The functionality is
// the same as Matches, but as an optimization, the caller passes in
// intermediate results from matching the parent directory.
//
// The "file" argument should be a slash-delimited path.
//
// MatchesUsingParentResults is not safe to call concurrently.
func (pm *PatternMatcher) MatchesUsingParentResults(file string, parentMatchInfo MatchInfo) (bool, MatchInfo, error) {
parentMatched := parentMatchInfo.parentMatched
if len(parentMatched) != 0 && len(parentMatched) != len(pm.patterns) {
return false, MatchInfo{}, errors.New("wrong number of values in parentMatched")
}
file = filepath.FromSlash(file)
matched := false
matchInfo := MatchInfo{
parentMatched: make([]bool, len(pm.patterns)),
}
for i, pattern := range pm.patterns {
match := false
// If the parent matched this pattern, we don't need to recheck.
if len(parentMatched) != 0 {
match = parentMatched[i]
}
if !match {
// Skip evaluation if this is an inclusion and the filename
// already matched the pattern, or it's an exclusion and it has
// not matched the pattern yet.
if pattern.exclusion != matched {
continue
}
var err error
match, err = pattern.match(file)
if err != nil {
return false, matchInfo, err
}
// If the zero value of MatchInfo was passed in, we don't have
// any information about the parent dir's match results, and we
// apply the same logic as MatchesOrParentMatches.
if !match && len(parentMatched) == 0 {
if parentPath := filepath.Dir(file); parentPath != "." {
parentPathDirs := strings.Split(parentPath, string(os.PathSeparator))
// Check to see if the pattern matches one of our parent dirs.
for i := range parentPathDirs {
match, _ = pattern.match(strings.Join(parentPathDirs[:i+1], string(os.PathSeparator)))
if match {
break
}
}
}
}
}
matchInfo.parentMatched[i] = match
if match {
matched = !pattern.exclusion
}
}
return matched, matchInfo, nil
}
// Exclusions returns true if any of the patterns define exclusions
func (pm *PatternMatcher) Exclusions() bool {
return pm.exclusions
}
// Patterns returns array of active patterns
func (pm *PatternMatcher) Patterns() []*Pattern {
return pm.patterns
}
// Pattern defines a single regexp used to filter file paths.
type Pattern struct {
matchType matchType
cleanedPattern string
dirs []string
regexp *regexp.Regexp
exclusion bool
}
type matchType int
const (
unknownMatch matchType = iota
exactMatch
prefixMatch
suffixMatch
regexpMatch
)
func (p *Pattern) String() string {
return p.cleanedPattern
}
// Exclusion returns true if this pattern defines exclusion
func (p *Pattern) Exclusion() bool {
return p.exclusion
}
func (p *Pattern) match(path string) (bool, error) {
if p.matchType == unknownMatch {
if err := p.compile(string(os.PathSeparator)); err != nil {
return false, filepath.ErrBadPattern
}
}
switch p.matchType {
case exactMatch:
return path == p.cleanedPattern, nil
case prefixMatch:
// strip trailing **
return strings.HasPrefix(path, p.cleanedPattern[:len(p.cleanedPattern)-2]), nil
case suffixMatch:
// strip leading **
suffix := p.cleanedPattern[2:]
if strings.HasSuffix(path, suffix) {
return true, nil
}
// **/foo matches "foo"
return suffix[0] == os.PathSeparator && path == suffix[1:], nil
case regexpMatch:
return p.regexp.MatchString(path), nil
}
return false, nil
}
func (p *Pattern) compile(sl string) error {
regStr := "^"
pattern := p.cleanedPattern
// Go through the pattern and convert it to a regexp.
// We use a scanner so we can support utf-8 chars.
var scan scanner.Scanner
scan.Init(strings.NewReader(pattern))
escSL := sl
if sl == `\` {
escSL += `\`
}
p.matchType = exactMatch
for i := 0; scan.Peek() != scanner.EOF; i++ {
ch := scan.Next()
if ch == '*' {
if scan.Peek() == '*' {
// is some flavor of "**"
scan.Next()
// Treat **/ as ** so eat the "/"
if string(scan.Peek()) == sl {
scan.Next()
}
if scan.Peek() == scanner.EOF {
// is "**EOF" - to align with .gitignore just accept all
if p.matchType == exactMatch {
p.matchType = prefixMatch
} else {
regStr += ".*"
p.matchType = regexpMatch
}
} else {
// is "**"
// Note that this allows for any # of /'s (even 0) because
// the .* will eat everything, even /'s
regStr += "(.*" + escSL + ")?"
p.matchType = regexpMatch
}
if i == 0 {
p.matchType = suffixMatch
}
} else {
// is "*" so map it to anything but "/"
regStr += "[^" + escSL + "]*"
p.matchType = regexpMatch
}
} else if ch == '?' {
// "?" is any char except "/"
regStr += "[^" + escSL + "]"
p.matchType = regexpMatch
} else if shouldEscape(ch) {
// Escape some regexp special chars that have no meaning
// in golang's filepath.Match
regStr += `\` + string(ch)
} else if ch == '\\' {
// escape next char. Note that a trailing \ in the pattern
// will be left alone (but need to escape it)
if sl == `\` {
// On windows map "\" to "\\", meaning an escaped backslash,
// and then just continue because filepath.Match on
// Windows doesn't allow escaping at all
regStr += escSL
continue
}
if scan.Peek() != scanner.EOF {
regStr += `\` + string(scan.Next())
p.matchType = regexpMatch
} else {
regStr += `\`
}
} else if ch == '[' || ch == ']' {
regStr += string(ch)
p.matchType = regexpMatch
} else {
regStr += string(ch)
}
}
if p.matchType != regexpMatch {
return nil
}
regStr += "$"
re, err := regexp.Compile(regStr)
if err != nil {
return err
}
p.regexp = re
p.matchType = regexpMatch
return nil
}
// Matches returns true if file matches any of the patterns
// and isn't excluded by any of the subsequent patterns.
//
// This implementation is buggy (it only checks a single parent dir against the
// pattern) and will be removed soon. Use MatchesOrParentMatches instead.
func Matches(file string, patterns []string) (bool, error) {
pm, err := New(patterns)
if err != nil {
return false, err
}
file = filepath.Clean(file)
if file == "." {
// Don't let them exclude everything, kind of silly.
return false, nil
}
return pm.Matches(file)
}
// MatchesOrParentMatches returns true if file matches any of the patterns
// and isn't excluded by any of the subsequent patterns.
func MatchesOrParentMatches(file string, patterns []string) (bool, error) {
pm, err := New(patterns)
if err != nil {
return false, err
}
file = filepath.Clean(file)
if file == "." {
// Don't let them exclude everything, kind of silly.
return false, nil
}
return pm.MatchesOrParentMatches(file)
}

202
vendor/github.com/moby/sys/sequential/LICENSE generated vendored Normal file
View File

@ -0,0 +1,202 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

15
vendor/github.com/moby/sys/sequential/doc.go generated vendored Normal file
View File

@ -0,0 +1,15 @@
// Package sequential provides a set of functions for managing sequential
// files on Windows.
//
// The origin of these functions are the golang OS and windows packages,
// slightly modified to only cope with files, not directories due to the
// specific use case.
//
// The alteration is to allow a file on Windows to be opened with
// FILE_FLAG_SEQUENTIAL_SCAN (particular for docker load), to avoid eating
// the standby list, particularly when accessing large files such as layer.tar.
//
// For non-Windows platforms, the package provides wrappers for the equivalents
// in the os packages. They are passthrough on Unix platforms, and only relevant
// on Windows.
package sequential

View File

@ -0,0 +1,45 @@
//go:build !windows
// +build !windows
package sequential
import "os"
// Create creates the named file with mode 0666 (before umask), truncating
// it if it already exists. If successful, methods on the returned
// File can be used for I/O; the associated file descriptor has mode
// O_RDWR.
// If there is an error, it will be of type *PathError.
func Create(name string) (*os.File, error) {
return os.Create(name)
}
// Open opens the named file for reading. If successful, methods on
// the returned file can be used for reading; the associated file
// descriptor has mode O_RDONLY.
// If there is an error, it will be of type *PathError.
func Open(name string) (*os.File, error) {
return os.Open(name)
}
// OpenFile is the generalized open call; most users will use Open
// or Create instead. It opens the named file with specified flag
// (O_RDONLY etc.) and perm, (0666 etc.) if applicable. If successful,
// methods on the returned File can be used for I/O.
// If there is an error, it will be of type *PathError.
func OpenFile(name string, flag int, perm os.FileMode) (*os.File, error) {
return os.OpenFile(name, flag, perm)
}
// CreateTemp creates a new temporary file in the directory dir
// with a name beginning with prefix, opens the file for reading
// and writing, and returns the resulting *os.File.
// If dir is the empty string, TempFile uses the default directory
// for temporary files (see os.TempDir).
// Multiple programs calling TempFile simultaneously
// will not choose the same file. The caller can use f.Name()
// to find the pathname of the file. It is the caller's responsibility
// to remove the file when no longer needed.
func CreateTemp(dir, prefix string) (f *os.File, err error) {
return os.CreateTemp(dir, prefix)
}

View File

@ -0,0 +1,161 @@
package sequential
import (
"os"
"path/filepath"
"strconv"
"sync"
"syscall"
"time"
"unsafe"
"golang.org/x/sys/windows"
)
// Create creates the named file with mode 0666 (before umask), truncating
// it if it already exists. If successful, methods on the returned
// File can be used for I/O; the associated file descriptor has mode
// O_RDWR.
// If there is an error, it will be of type *PathError.
func Create(name string) (*os.File, error) {
return OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0)
}
// Open opens the named file for reading. If successful, methods on
// the returned file can be used for reading; the associated file
// descriptor has mode O_RDONLY.
// If there is an error, it will be of type *PathError.
func Open(name string) (*os.File, error) {
return OpenFile(name, os.O_RDONLY, 0)
}
// OpenFile is the generalized open call; most users will use Open
// or Create instead.
// If there is an error, it will be of type *PathError.
func OpenFile(name string, flag int, _ os.FileMode) (*os.File, error) {
if name == "" {
return nil, &os.PathError{Op: "open", Path: name, Err: syscall.ENOENT}
}
r, err := openFileSequential(name, flag, 0)
if err == nil {
return r, nil
}
return nil, &os.PathError{Op: "open", Path: name, Err: err}
}
func openFileSequential(name string, flag int, _ os.FileMode) (file *os.File, err error) {
r, e := openSequential(name, flag|windows.O_CLOEXEC, 0)
if e != nil {
return nil, e
}
return os.NewFile(uintptr(r), name), nil
}
func makeInheritSa() *windows.SecurityAttributes {
var sa windows.SecurityAttributes
sa.Length = uint32(unsafe.Sizeof(sa))
sa.InheritHandle = 1
return &sa
}
func openSequential(path string, mode int, _ uint32) (fd windows.Handle, err error) {
if len(path) == 0 {
return windows.InvalidHandle, windows.ERROR_FILE_NOT_FOUND
}
pathp, err := windows.UTF16PtrFromString(path)
if err != nil {
return windows.InvalidHandle, err
}
var access uint32
switch mode & (windows.O_RDONLY | windows.O_WRONLY | windows.O_RDWR) {
case windows.O_RDONLY:
access = windows.GENERIC_READ
case windows.O_WRONLY:
access = windows.GENERIC_WRITE
case windows.O_RDWR:
access = windows.GENERIC_READ | windows.GENERIC_WRITE
}
if mode&windows.O_CREAT != 0 {
access |= windows.GENERIC_WRITE
}
if mode&windows.O_APPEND != 0 {
access &^= windows.GENERIC_WRITE
access |= windows.FILE_APPEND_DATA
}
sharemode := uint32(windows.FILE_SHARE_READ | windows.FILE_SHARE_WRITE)
var sa *windows.SecurityAttributes
if mode&windows.O_CLOEXEC == 0 {
sa = makeInheritSa()
}
var createmode uint32
switch {
case mode&(windows.O_CREAT|windows.O_EXCL) == (windows.O_CREAT | windows.O_EXCL):
createmode = windows.CREATE_NEW
case mode&(windows.O_CREAT|windows.O_TRUNC) == (windows.O_CREAT | windows.O_TRUNC):
createmode = windows.CREATE_ALWAYS
case mode&windows.O_CREAT == windows.O_CREAT:
createmode = windows.OPEN_ALWAYS
case mode&windows.O_TRUNC == windows.O_TRUNC:
createmode = windows.TRUNCATE_EXISTING
default:
createmode = windows.OPEN_EXISTING
}
// Use FILE_FLAG_SEQUENTIAL_SCAN rather than FILE_ATTRIBUTE_NORMAL as implemented in golang.
// https://msdn.microsoft.com/en-us/library/windows/desktop/aa363858(v=vs.85).aspx
const fileFlagSequentialScan = 0x08000000 // FILE_FLAG_SEQUENTIAL_SCAN
h, e := windows.CreateFile(pathp, access, sharemode, sa, createmode, fileFlagSequentialScan, 0)
return h, e
}
// Helpers for CreateTemp
var rand uint32
var randmu sync.Mutex
func reseed() uint32 {
return uint32(time.Now().UnixNano() + int64(os.Getpid()))
}
func nextSuffix() string {
randmu.Lock()
r := rand
if r == 0 {
r = reseed()
}
r = r*1664525 + 1013904223 // constants from Numerical Recipes
rand = r
randmu.Unlock()
return strconv.Itoa(int(1e9 + r%1e9))[1:]
}
// CreateTemp is a copy of os.CreateTemp, modified to use sequential
// file access. Below is the original comment from golang:
// TempFile creates a new temporary file in the directory dir
// with a name beginning with prefix, opens the file for reading
// and writing, and returns the resulting *os.File.
// If dir is the empty string, TempFile uses the default directory
// for temporary files (see os.TempDir).
// Multiple programs calling TempFile simultaneously
// will not choose the same file. The caller can use f.Name()
// to find the pathname of the file. It is the caller's responsibility
// to remove the file when no longer needed.
func CreateTemp(dir, prefix string) (f *os.File, err error) {
if dir == "" {
dir = os.TempDir()
}
nconflict := 0
for i := 0; i < 10000; i++ {
name := filepath.Join(dir, prefix+nextSuffix())
f, err = OpenFile(name, os.O_RDWR|os.O_CREATE|os.O_EXCL, 0o600)
if os.IsExist(err) {
if nconflict++; nconflict > 10 {
randmu.Lock()
rand = reseed()
randmu.Unlock()
}
continue
}
break
}
return
}

View File

@ -31,4 +31,27 @@ var SignalMap = map[string]syscall.Signal{
"SEGV": syscall.Signal(windows.SIGSEGV),
"TERM": syscall.Signal(windows.SIGTERM),
"TRAP": syscall.Signal(windows.SIGTRAP),
// additional linux signals supported for LCOW
"CHLD": syscall.Signal(0x11),
"CLD": syscall.Signal(0x11),
"CONT": syscall.Signal(0x12),
"IO": syscall.Signal(0x1d),
"IOT": syscall.Signal(0x6),
"POLL": syscall.Signal(0x1d),
"PROF": syscall.Signal(0x1b),
"PWR": syscall.Signal(0x1e),
"STKFLT": syscall.Signal(0x10),
"STOP": syscall.Signal(0x13),
"SYS": syscall.Signal(0x1f),
"TSTP": syscall.Signal(0x14),
"TTIN": syscall.Signal(0x15),
"TTOU": syscall.Signal(0x16),
"URG": syscall.Signal(0x17),
"USR1": syscall.Signal(0xa),
"USR2": syscall.Signal(0xc),
"VTALRM": syscall.Signal(0x1a),
"WINCH": syscall.Signal(0x1c),
"XCPU": syscall.Signal(0x18),
"XFSZ": syscall.Signal(0x19),
}