vendor: update buildkit to master@ae9d0f5

Signed-off-by: Justin Chadwell <me@jedevc.com>
This commit is contained in:
Justin Chadwell
2022-11-22 14:39:36 +00:00
parent 6e9b743296
commit 36e663edda
375 changed files with 14834 additions and 13552 deletions

View File

@ -4,6 +4,7 @@ import (
"context"
"github.com/moby/buildkit/client/buildid"
"github.com/moby/buildkit/frontend/attestations"
gateway "github.com/moby/buildkit/frontend/gateway/client"
"github.com/moby/buildkit/frontend/gateway/grpcclient"
gatewayapi "github.com/moby/buildkit/frontend/gateway/pb"
@ -20,17 +21,15 @@ func (c *Client) Build(ctx context.Context, opt SolveOpt, product string, buildF
}
}()
if opt.Frontend != "" {
return nil, errors.New("invalid SolveOpt, Build interface cannot use Frontend")
}
feOpts := opt.FrontendAttrs
opt.Frontend = ""
opt.FrontendAttrs = attestations.Filter(opt.FrontendAttrs)
if product == "" {
product = apicaps.ExportedProduct
}
feOpts := opt.FrontendAttrs
opt.FrontendAttrs = nil
workers, err := c.ListWorkers(ctx)
if err != nil {
return nil, errors.Wrap(err, "listing workers for Build")
@ -113,6 +112,19 @@ func (g *gatewayClientForBuild) StatFile(ctx context.Context, in *gatewayapi.Sta
return g.gateway.StatFile(ctx, in, opts...)
}
func (g *gatewayClientForBuild) Evaluate(ctx context.Context, in *gatewayapi.EvaluateRequest, opts ...grpc.CallOption) (*gatewayapi.EvaluateResponse, error) {
if err := g.caps.Supports(gatewayapi.CapGatewayEvaluate); err != nil {
if err2 := g.caps.Supports(gatewayapi.CapStatFile); err2 != nil {
return nil, err
}
ctx = buildid.AppendToOutgoingContext(ctx, g.buildID)
_, err := g.gateway.StatFile(ctx, &gatewayapi.StatFileRequest{Ref: in.Ref, Path: "."}, opts...)
return &gatewayapi.EvaluateResponse{}, err
}
ctx = buildid.AppendToOutgoingContext(ctx, g.buildID)
return g.gateway.Evaluate(ctx, in, opts...)
}
func (g *gatewayClientForBuild) Ping(ctx context.Context, in *gatewayapi.PingRequest, opts ...grpc.CallOption) (*gatewayapi.PongResponse, error) {
ctx = buildid.AppendToOutgoingContext(ctx, g.buildID)
return g.gateway.Ping(ctx, in, opts...)

View File

@ -234,7 +234,6 @@ func loadCredentials(opts *withCredentials) (grpc.DialOption, error) {
return nil, errors.Wrap(err, "could not read certificate/key")
}
cfg.Certificates = []tls.Certificate{cert}
cfg.BuildNameToCertificate()
}
return grpc.WithTransportCredentials(credentials.NewTLS(cfg)), nil

View File

@ -617,6 +617,7 @@ var (
LinuxArmel = Platform(ocispecs.Platform{OS: "linux", Architecture: "arm", Variant: "v6"})
LinuxArm64 = Platform(ocispecs.Platform{OS: "linux", Architecture: "arm64"})
LinuxS390x = Platform(ocispecs.Platform{OS: "linux", Architecture: "s390x"})
LinuxPpc64 = Platform(ocispecs.Platform{OS: "linux", Architecture: "ppc64"})
LinuxPpc64le = Platform(ocispecs.Platform{OS: "linux", Architecture: "ppc64le"})
Darwin = Platform(ocispecs.Platform{OS: "darwin", Architecture: "amd64"})
Windows = Platform(ocispecs.Platform{OS: "windows", Architecture: "amd64"})

View File

@ -2,6 +2,7 @@ package client
import (
"context"
"encoding/base64"
"encoding/json"
"io"
"os"
@ -14,6 +15,7 @@ import (
controlapi "github.com/moby/buildkit/api/services/control"
"github.com/moby/buildkit/client/llb"
"github.com/moby/buildkit/client/ociindex"
"github.com/moby/buildkit/exporter/containerimage/exptypes"
"github.com/moby/buildkit/identity"
"github.com/moby/buildkit/session"
sessioncontent "github.com/moby/buildkit/session/content"
@ -124,6 +126,8 @@ func (c *Client) solve(ctx context.Context, def *llb.Definition, runGateway runG
ex = opt.Exports[0]
}
indicesToUpdate := []string{}
if !opt.SessionPreInitialized {
if len(syncedDirs) > 0 {
s.Allow(filesync.NewFSSyncProvider(syncedDirs))
@ -133,49 +137,64 @@ func (c *Client) solve(ctx context.Context, def *llb.Definition, runGateway runG
s.Allow(a)
}
contentStores := map[string]content.Store{}
for key, store := range cacheOpt.contentStores {
contentStores[key] = store
}
for key, store := range opt.OCIStores {
key2 := "oci:" + key
if _, ok := contentStores[key2]; ok {
return nil, errors.Errorf("oci store key %q already exists", key)
}
contentStores[key2] = store
}
var supportFile bool
var supportDir bool
switch ex.Type {
case ExporterLocal:
if ex.Output != nil {
return nil, errors.New("output file writer is not supported by local exporter")
}
if ex.OutputDir == "" {
return nil, errors.New("output directory is required for local exporter")
}
s.Allow(filesync.NewFSSyncTargetDir(ex.OutputDir))
case ExporterOCI, ExporterDocker, ExporterTar:
if ex.OutputDir != "" {
return nil, errors.Errorf("output directory %s is not supported by %s exporter", ex.OutputDir, ex.Type)
}
supportDir = true
case ExporterTar:
supportFile = true
case ExporterOCI, ExporterDocker:
supportDir = ex.OutputDir != ""
supportFile = ex.Output != nil
}
if supportFile && supportDir {
return nil, errors.Errorf("both file and directory output is not support by %s exporter", ex.Type)
}
if !supportFile && ex.Output != nil {
return nil, errors.Errorf("output file writer is not supported by %s exporter", ex.Type)
}
if !supportDir && ex.OutputDir != "" {
return nil, errors.Errorf("output directory is not supported by %s exporter", ex.Type)
}
if supportFile {
if ex.Output == nil {
return nil, errors.Errorf("output file writer is required for %s exporter", ex.Type)
}
s.Allow(filesync.NewFSSyncTarget(ex.Output))
default:
if ex.Output != nil {
return nil, errors.Errorf("output file writer is not supported by %s exporter", ex.Type)
}
if ex.OutputDir != "" {
return nil, errors.Errorf("output directory %s is not supported by %s exporter", ex.OutputDir, ex.Type)
}
}
// this is a new map that contains both cacheOpt stores and OCILayout stores
contentStores := make(map[string]content.Store, len(cacheOpt.contentStores)+len(opt.OCIStores))
// copy over the stores references from cacheOpt
for key, store := range cacheOpt.contentStores {
contentStores[key] = store
}
// copy over the stores references from ociLayout opts
for key, store := range opt.OCIStores {
// conflicts are not allowed
if _, ok := contentStores[key]; ok {
// we probably should check if the store is identical, but given that
// https://pkg.go.dev/github.com/containerd/containerd/content#Store
// is just an interface, composing 4 others, that is rather hard to do.
// For a future iteration.
return nil, errors.Errorf("contentStore key %s exists in both cache and OCI layouts", key)
if supportDir {
if ex.OutputDir == "" {
return nil, errors.Errorf("output directory is required for %s exporter", ex.Type)
}
switch ex.Type {
case ExporterOCI, ExporterDocker:
if err := os.MkdirAll(ex.OutputDir, 0755); err != nil {
return nil, err
}
cs, err := contentlocal.NewStore(ex.OutputDir)
if err != nil {
return nil, err
}
contentStores["export"] = cs
indicesToUpdate = append(indicesToUpdate, filepath.Join(ex.OutputDir, "index.json"))
default:
s.Allow(filesync.NewFSSyncTargetDir(ex.OutputDir))
}
contentStores[key] = store
}
if len(contentStores) > 0 {
@ -352,10 +371,29 @@ func (c *Client) solve(ctx context.Context, def *llb.Definition, runGateway runG
}
}
}
if manifestDescDt := res.ExporterResponse[exptypes.ExporterImageDescriptorKey]; manifestDescDt != "" {
manifestDescDt, err := base64.StdEncoding.DecodeString(manifestDescDt)
if err != nil {
return nil, err
}
var manifestDesc ocispecs.Descriptor
if err = json.Unmarshal([]byte(manifestDescDt), &manifestDesc); err != nil {
return nil, err
}
for _, indexJSONPath := range indicesToUpdate {
tag := "latest"
if t, ok := res.ExporterResponse["image.name"]; ok {
tag = t
}
if err = ociindex.PutDescToIndexJSONFileLocked(indexJSONPath, manifestDesc, tag); err != nil {
return nil, err
}
}
}
return res, nil
}
func prepareSyncedDirs(def *llb.Definition, localDirs map[string]string) ([]filesync.SyncedDir, error) {
func prepareSyncedDirs(def *llb.Definition, localDirs map[string]string) (filesync.StaticDirSource, error) {
for _, d := range localDirs {
fi, err := os.Stat(d)
if err != nil {
@ -371,10 +409,10 @@ func prepareSyncedDirs(def *llb.Definition, localDirs map[string]string) ([]file
return fsutil.MapResultKeep
}
dirs := make([]filesync.SyncedDir, 0, len(localDirs))
dirs := make(filesync.StaticDirSource, len(localDirs))
if def == nil {
for name, d := range localDirs {
dirs = append(dirs, filesync.SyncedDir{Name: name, Dir: d, Map: resetUIDAndGID})
dirs[name] = filesync.SyncedDir{Dir: d, Map: resetUIDAndGID}
}
} else {
for _, dt := range def.Def {
@ -389,7 +427,7 @@ func prepareSyncedDirs(def *llb.Definition, localDirs map[string]string) ([]file
if !ok {
return nil, errors.Errorf("local directory %s not enabled", name)
}
dirs = append(dirs, filesync.SyncedDir{Name: name, Dir: d, Map: resetUIDAndGID})
dirs[name] = filesync.SyncedDir{Dir: d, Map: resetUIDAndGID}
}
}
}
@ -416,14 +454,10 @@ func parseCacheOptions(ctx context.Context, isGateway bool, opt SolveOpt) (*cach
var (
cacheExports []*controlapi.CacheOptionsEntry
cacheImports []*controlapi.CacheOptionsEntry
// legacy API is used for registry caches, because the daemon might not support the new API
legacyExportRef string
legacyImportRefs []string
)
contentStores := make(map[string]content.Store)
indicesToUpdate := make(map[string]string) // key: index.JSON file name, value: tag
frontendAttrs := make(map[string]string)
legacyExportAttrs := make(map[string]string)
for _, ex := range opt.CacheExports {
if ex.Type == "local" {
csDir := ex.Attrs["dest"]
@ -438,26 +472,27 @@ func parseCacheOptions(ctx context.Context, isGateway bool, opt SolveOpt) (*cach
return nil, err
}
contentStores["local:"+csDir] = cs
tag := "latest"
if t, ok := ex.Attrs["tag"]; ok {
tag = t
}
// TODO(AkihiroSuda): support custom index JSON path and tag
indexJSONPath := filepath.Join(csDir, "index.json")
indicesToUpdate[indexJSONPath] = "latest"
indicesToUpdate[indexJSONPath] = tag
}
if ex.Type == "registry" && legacyExportRef == "" {
legacyExportRef = ex.Attrs["ref"]
for k, v := range ex.Attrs {
if k != "ref" {
legacyExportAttrs[k] = v
}
if ex.Type == "registry" {
regRef := ex.Attrs["ref"]
if regRef == "" {
return nil, errors.New("registry cache exporter requires ref")
}
} else {
cacheExports = append(cacheExports, &controlapi.CacheOptionsEntry{
Type: ex.Type,
Attrs: ex.Attrs,
})
}
cacheExports = append(cacheExports, &controlapi.CacheOptionsEntry{
Type: ex.Type,
Attrs: ex.Attrs,
})
}
for _, im := range opt.CacheImports {
attrs := im.Attrs
if im.Type == "local" {
csDir := im.Attrs["src"]
if csDir == "" {
@ -469,40 +504,40 @@ func parseCacheOptions(ctx context.Context, isGateway bool, opt SolveOpt) (*cach
continue
}
// if digest is not specified, load from "latest" tag
if attrs["digest"] == "" {
if im.Attrs["digest"] == "" {
idx, err := ociindex.ReadIndexJSONFileLocked(filepath.Join(csDir, "index.json"))
if err != nil {
bklog.G(ctx).Warning("local cache import at " + csDir + " not found due to err: " + err.Error())
continue
}
for _, m := range idx.Manifests {
if (m.Annotations[ocispecs.AnnotationRefName] == "latest" && attrs["tag"] == "") || (attrs["tag"] != "" && m.Annotations[ocispecs.AnnotationRefName] == attrs["tag"]) {
attrs["digest"] = string(m.Digest)
tag := "latest"
if t, ok := im.Attrs["tag"]; ok {
tag = t
}
if m.Annotations[ocispecs.AnnotationRefName] == tag {
im.Attrs["digest"] = string(m.Digest)
break
}
}
if attrs["digest"] == "" {
if im.Attrs["digest"] == "" {
return nil, errors.New("local cache importer requires either explicit digest, \"latest\" tag or custom tag on index.json")
}
}
contentStores["local:"+csDir] = cs
}
if im.Type == "registry" {
legacyImportRef := attrs["ref"]
legacyImportRefs = append(legacyImportRefs, legacyImportRef)
} else {
cacheImports = append(cacheImports, &controlapi.CacheOptionsEntry{
Type: im.Type,
Attrs: attrs,
})
regRef := im.Attrs["ref"]
if regRef == "" {
return nil, errors.New("registry cache importer requires ref")
}
}
cacheImports = append(cacheImports, &controlapi.CacheOptionsEntry{
Type: im.Type,
Attrs: im.Attrs,
})
}
if opt.Frontend != "" || isGateway {
// use legacy API for registry importers, because the frontend might not support the new API
if len(legacyImportRefs) > 0 {
frontendAttrs["cache-from"] = strings.Join(legacyImportRefs, ",")
}
// use new API for other importers
if len(cacheImports) > 0 {
s, err := json.Marshal(cacheImports)
if err != nil {
@ -513,11 +548,6 @@ func parseCacheOptions(ctx context.Context, isGateway bool, opt SolveOpt) (*cach
}
res := cacheOptions{
options: controlapi.CacheOptions{
// old API (for registry caches, planned to be removed in early 2019)
ExportRefDeprecated: legacyExportRef,
ExportAttrsDeprecated: legacyExportAttrs,
ImportRefsDeprecated: legacyImportRefs,
// new API
Exports: cacheExports,
Imports: cacheImports,
},

View File

@ -53,6 +53,7 @@ type NetworkConfig struct {
Mode string `toml:"networkMode"`
CNIConfigPath string `toml:"cniConfigPath"`
CNIBinaryPath string `toml:"cniBinaryPath"`
CNIPoolSize int `toml:"cniPoolSize"`
}
type OCIConfig struct {
@ -81,6 +82,9 @@ type OCIConfig struct {
// The profile should already be loaded (by a higher level system) before creating a worker.
ApparmorProfile string `toml:"apparmor-profile"`
// SELinux enables applying SELinux labels.
SELinux bool `toml:"selinux"`
// MaxParallelism is the maximum number of parallel build steps that can be run at the same time.
MaxParallelism int `toml:"max-parallelism"`
}
@ -99,6 +103,9 @@ type ContainerdConfig struct {
// The profile should already be loaded (by a higher level system) before creating a worker.
ApparmorProfile string `toml:"apparmor-profile"`
// SELinux enables applying SELinux labels.
SELinux bool `toml:"selinux"`
MaxParallelism int `toml:"max-parallelism"`
Rootless bool `toml:"rootless"`

View File

@ -13,8 +13,17 @@ const (
ExporterInlineCache = "containerimage.inlinecache"
ExporterBuildInfo = "containerimage.buildinfo"
ExporterPlatformsKey = "refs.platforms"
ExporterEpochKey = "source.date.epoch"
)
// KnownRefMetadataKeys are the subset of exporter keys that can be suffixed by
// a platform to become platform specific
var KnownRefMetadataKeys = []string{
ExporterImageConfigKey,
ExporterInlineCache,
ExporterBuildInfo,
}
type Platforms struct {
Platforms []Platform
}

View File

@ -0,0 +1,52 @@
package image
import (
"time"
"github.com/docker/docker/api/types/strslice"
ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
)
// HealthConfig holds configuration settings for the HEALTHCHECK feature.
type HealthConfig struct {
// Test is the test to perform to check that the container is healthy.
// An empty slice means to inherit the default.
// The options are:
// {} : inherit healthcheck
// {"NONE"} : disable healthcheck
// {"CMD", args...} : exec arguments directly
// {"CMD-SHELL", command} : run command with system's default shell
Test []string `json:",omitempty"`
// Zero means to inherit. Durations are expressed as integer nanoseconds.
Interval time.Duration `json:",omitempty"` // Interval is the time to wait between checks.
Timeout time.Duration `json:",omitempty"` // Timeout is the time to wait before considering the check to have hung.
StartPeriod time.Duration `json:",omitempty"` // The start period for the container to initialize before the retries starts to count down.
// Retries is the number of consecutive failures needed to consider a container as unhealthy.
// Zero means inherit.
Retries int `json:",omitempty"`
}
// ImageConfig is a docker compatible config for an image
type ImageConfig struct {
ocispecs.ImageConfig
Healthcheck *HealthConfig `json:",omitempty"` // Healthcheck describes how to check the container is healthy
ArgsEscaped bool `json:",omitempty"` // True if command is already escaped (Windows specific)
// NetworkDisabled bool `json:",omitempty"` // Is network disabled
// MacAddress string `json:",omitempty"` // Mac Address of the container
OnBuild []string // ONBUILD metadata that were defined on the image Dockerfile
StopTimeout *int `json:",omitempty"` // Timeout (in seconds) to stop a container
Shell strslice.StrSlice `json:",omitempty"` // Shell for shell-form of RUN, CMD, ENTRYPOINT
}
// Image is the JSON structure which describes some basic information about the image.
// This provides the `application/vnd.oci.image.config.v1+json` mediatype when marshalled to JSON.
type Image struct {
ocispecs.Image
// Config defines the execution parameters which should be used as a base when running a container using the image.
Config ImageConfig `json:"config,omitempty"`
}

View File

@ -0,0 +1,82 @@
package attestations
import (
"encoding/csv"
"strings"
"github.com/pkg/errors"
)
const (
KeyTypeSbom = "sbom"
KeyTypeProvenance = "provenance"
)
const (
// TODO: update this before next buildkit release
defaultSBOMGenerator = "jedevc/buildkit-syft-scanner:master@sha256:de630f621eb0ab1bb1245cea76d01c5bddfe78af4f5b9adecde424cb7ec5605e"
)
func Filter(v map[string]string) map[string]string {
attests := make(map[string]string)
for k, v := range v {
if strings.HasPrefix(k, "attest:") {
attests[k] = v
continue
}
if strings.HasPrefix(k, "build-arg:BUILDKIT_ATTEST_") {
attests[k] = v
continue
}
}
return attests
}
func Validate(values map[string]map[string]string) (map[string]map[string]string, error) {
for k := range values {
if k != KeyTypeSbom && k != KeyTypeProvenance {
return nil, errors.Errorf("unknown attestation type %q", k)
}
}
return values, nil
}
func Parse(values map[string]string) (map[string]map[string]string, error) {
attests := make(map[string]string)
for k, v := range values {
if strings.HasPrefix(k, "attest:") {
attests[strings.ToLower(strings.TrimPrefix(k, "attest:"))] = v
continue
}
if strings.HasPrefix(k, "build-arg:BUILDKIT_ATTEST_") {
attests[strings.ToLower(strings.TrimPrefix(k, "build-arg:BUILDKIT_ATTEST_"))] = v
continue
}
}
out := make(map[string]map[string]string)
for k, v := range attests {
attrs := make(map[string]string)
out[k] = attrs
if k == KeyTypeSbom {
attrs["generator"] = defaultSBOMGenerator
}
if v == "" {
continue
}
csvReader := csv.NewReader(strings.NewReader(v))
fields, err := csvReader.Read()
if err != nil {
return nil, errors.Wrapf(err, "failed to parse %s", k)
}
for _, field := range fields {
parts := strings.SplitN(field, "=", 2)
if len(parts) != 2 {
parts = append(parts, "")
}
attrs[parts[0]] = parts[1]
}
}
return Validate(out)
}

View File

@ -0,0 +1,46 @@
package client
import (
pb "github.com/moby/buildkit/frontend/gateway/pb"
"github.com/moby/buildkit/solver/result"
)
func AttestationToPB(a *result.Attestation) (*pb.Attestation, error) {
subjects := make([]*pb.InTotoSubject, len(a.InToto.Subjects))
for i, subject := range a.InToto.Subjects {
subjects[i] = &pb.InTotoSubject{
Kind: subject.Kind,
Name: subject.Name,
Digest: subject.Digest,
}
}
return &pb.Attestation{
Kind: a.Kind,
Path: a.Path,
Ref: a.Ref,
InTotoPredicateType: a.InToto.PredicateType,
InTotoSubjects: subjects,
}, nil
}
func AttestationFromPB(a *pb.Attestation) (*result.Attestation, error) {
subjects := make([]result.InTotoSubject, len(a.InTotoSubjects))
for i, subject := range a.InTotoSubjects {
subjects[i] = result.InTotoSubject{
Kind: subject.Kind,
Name: subject.Name,
Digest: subject.Digest,
}
}
return &result.Attestation{
Kind: a.Kind,
Path: a.Path,
Ref: a.Ref,
InToto: result.InTotoAttestation{
PredicateType: a.InTotoPredicateType,
Subjects: subjects,
},
}, nil
}

View File

@ -7,12 +7,21 @@ import (
"github.com/moby/buildkit/client/llb"
"github.com/moby/buildkit/solver/pb"
"github.com/moby/buildkit/solver/result"
"github.com/moby/buildkit/util/apicaps"
digest "github.com/opencontainers/go-digest"
ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
fstypes "github.com/tonistiigi/fsutil/types"
)
type Result = result.Result[Reference]
type BuildFunc func(context.Context, Client) (*Result, error)
func NewResult() *Result {
return &Result{}
}
type Client interface {
Solve(ctx context.Context, req SolveRequest) (*Result, error)
ResolveImageConfig(ctx context.Context, ref string, opt llb.ResolveImageConfigOpt) (digest.Digest, []byte, error)
@ -82,6 +91,7 @@ type ContainerProcess interface {
type Reference interface {
ToState() (llb.State, error)
Evaluate(ctx context.Context) error
ReadFile(ctx context.Context, req ReadRequest) ([]byte, error)
StatFile(ctx context.Context, req StatRequest) (*fstypes.Stat, error)
ReadDir(ctx context.Context, req ReadDirRequest) ([]*fstypes.Stat, error)

View File

@ -1,54 +0,0 @@
package client
import (
"context"
"sync"
"github.com/pkg/errors"
)
type BuildFunc func(context.Context, Client) (*Result, error)
type Result struct {
mu sync.Mutex
Ref Reference
Refs map[string]Reference
Metadata map[string][]byte
}
func NewResult() *Result {
return &Result{}
}
func (r *Result) AddMeta(k string, v []byte) {
r.mu.Lock()
if r.Metadata == nil {
r.Metadata = map[string][]byte{}
}
r.Metadata[k] = v
r.mu.Unlock()
}
func (r *Result) AddRef(k string, ref Reference) {
r.mu.Lock()
if r.Refs == nil {
r.Refs = map[string]Reference{}
}
r.Refs[k] = ref
r.mu.Unlock()
}
func (r *Result) SetRef(ref Reference) {
r.Ref = ref
}
func (r *Result) SingleRef() (Reference, error) {
r.mu.Lock()
defer r.mu.Unlock()
if r.Refs != nil && r.Ref == nil {
return nil, errors.Errorf("invalid map result")
}
return r.Ref, nil
}

View File

@ -115,7 +115,7 @@ func (c *grpcClient) Run(ctx context.Context, f client.BuildFunc) (retError erro
req := &pb.ReturnRequest{}
if retError == nil {
if res == nil {
res = &client.Result{}
res = client.NewResult()
}
pbRes := &pb.Result{
Metadata: res.Metadata,
@ -160,6 +160,25 @@ func (c *grpcClient) Run(ctx context.Context, f client.BuildFunc) (retError erro
}
}
}
if res.Attestations != nil && c.caps.Supports(pb.CapAttestations) == nil {
attestations := map[string]*pb.Attestations{}
for k, as := range res.Attestations {
for _, a := range as {
pbAtt, err := client.AttestationToPB(&a)
if err != nil {
retError = err
continue
}
if attestations[k] == nil {
attestations[k] = &pb.Attestations{}
}
attestations[k].Attestation = append(attestations[k].Attestation, pbAtt)
}
}
pbRes.Attestations = attestations
}
if retError == nil {
req.Result = pbRes
}
@ -368,30 +387,15 @@ func (c *grpcClient) Solve(ctx context.Context, creq client.SolveRequest) (res *
if c.caps.Supports(pb.CapGatewayEvaluateSolve) == nil {
req.Evaluate = creq.Evaluate
} else {
// If evaluate is not supported, fallback to running Stat(".") in order to
// trigger an evaluation of the result.
// If evaluate is not supported, fallback to running Stat(".") in
// order to trigger an evaluation of the result.
defer func() {
if res == nil {
return
}
var (
id string
ref client.Reference
)
ref, err = res.SingleRef()
if err != nil {
for refID := range res.Refs {
id = refID
break
}
} else {
id = ref.(*reference).id
}
_, err = c.client.StatFile(ctx, &pb.StatFileRequest{
Ref: id,
Path: ".",
err = res.EachRef(func(ref client.Reference) error {
_, err := ref.StatFile(ctx, client.StatRequest{Path: "."})
return err
})
}()
}
@ -402,7 +406,7 @@ func (c *grpcClient) Solve(ctx context.Context, creq client.SolveRequest) (res *
return nil, err
}
res = &client.Result{}
res = client.NewResult()
if resp.Result == nil {
if id := resp.Ref; id != "" {
c.requests[id] = req
@ -443,6 +447,18 @@ func (c *grpcClient) Solve(ctx context.Context, creq client.SolveRequest) (res *
res.AddRef(k, ref)
}
}
if resp.Result.Attestations != nil {
for p, as := range resp.Result.Attestations {
for _, a := range as.Attestation {
att, err := client.AttestationFromPB(a)
if err != nil {
return nil, err
}
res.AddAttestation(p, *att, nil)
}
}
}
}
return res, nil
@ -1023,6 +1039,15 @@ func (r *reference) ToState() (st llb.State, err error) {
return llb.NewState(defop), nil
}
func (r *reference) Evaluate(ctx context.Context) error {
req := &pb.EvaluateRequest{Ref: r.id}
_, err := r.c.client.Evaluate(ctx, req)
if err != nil {
return err
}
return nil
}
func (r *reference) ReadFile(ctx context.Context, req client.ReadRequest) ([]byte, error) {
rfr := &pb.ReadFileRequest{FilePath: req.Filename, Ref: r.id}
if r := req.Range; r != nil {

View File

@ -56,8 +56,14 @@ const (
// errors.
CapGatewayEvaluateSolve apicaps.CapID = "gateway.solve.evaluate"
CapGatewayEvaluate apicaps.CapID = "gateway.evaluate"
// CapGatewayWarnings is the capability to log warnings from frontend
CapGatewayWarnings apicaps.CapID = "gateway.warnings"
// CapAttestations is the capability to indicate that attestation
// references will be attached to results
CapAttestations apicaps.CapID = "reference.attestations"
)
func init() {
@ -194,10 +200,24 @@ func init() {
Status: apicaps.CapStatusExperimental,
})
Caps.Init(apicaps.Cap{
ID: CapGatewayEvaluate,
Name: "gateway evaluate",
Enabled: true,
Status: apicaps.CapStatusExperimental,
})
Caps.Init(apicaps.Cap{
ID: CapGatewayWarnings,
Name: "logging warnings",
Enabled: true,
Status: apicaps.CapStatusExperimental,
})
Caps.Init(apicaps.Cap{
ID: CapAttestations,
Name: "reference attestations",
Enabled: true,
Status: apicaps.CapStatusExperimental,
})
}

File diff suppressed because it is too large Load Diff

View File

@ -25,6 +25,8 @@ service LLBBridge {
rpc ReadDir(ReadDirRequest) returns (ReadDirResponse);
// apicaps:CapStatFile
rpc StatFile(StatFileRequest) returns (StatFileResponse);
// apicaps:CapGatewayEvaluate
rpc Evaluate(EvaluateRequest) returns (EvaluateResponse);
rpc Ping(PingRequest) returns (PongResponse);
rpc Return(ReturnRequest) returns (ReturnResponse);
// apicaps:CapFrontendInputs
@ -48,6 +50,7 @@ message Result {
RefMap refs = 4;
}
map<string, bytes> metadata = 10;
map<string, Attestations> attestations = 11;
}
message RefMapDeprecated {
@ -63,6 +66,38 @@ message RefMap {
map<string, Ref> refs = 1;
}
message Attestations {
repeated Attestation attestation = 1;
}
message Attestation {
AttestationKind kind = 1;
string ref = 2;
string path = 3;
string inTotoPredicateType = 4;
repeated InTotoSubject inTotoSubjects = 5;
}
enum AttestationKind {
option (gogoproto.goproto_enum_prefix) = false;
InToto = 0 [(gogoproto.enumvalue_customname) = "AttestationKindInToto"];
Bundle = 1 [(gogoproto.enumvalue_customname) = "AttestationKindBundle"];
}
message InTotoSubject {
InTotoSubjectKind kind = 1;
repeated string digest = 2 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false];
string name = 3;
}
enum InTotoSubjectKind {
option (gogoproto.goproto_enum_prefix) = false;
Self = 0 [(gogoproto.enumvalue_customname) = "InTotoSubjectKindSelf"];
Raw = 1 [(gogoproto.enumvalue_customname) = "InTotoSubjectKindRaw"];
}
message ReturnRequest {
Result result = 1;
google.rpc.Status error = 2;
@ -163,6 +198,13 @@ message StatFileResponse {
fsutil.types.Stat stat = 1;
}
message EvaluateRequest {
string Ref = 1;
}
message EvaluateResponse {
}
message PingRequest{
}
message PongResponse{

View File

@ -2,8 +2,8 @@ package auth
import (
"context"
"crypto/rand"
"crypto/subtle"
"math/rand"
"sync"
"github.com/moby/buildkit/session"

View File

@ -27,27 +27,35 @@ const (
)
type fsSyncProvider struct {
dirs map[string]SyncedDir
dirs DirSource
p progressCb
doneCh chan error
}
type SyncedDir struct {
Name string
Dir string
Excludes []string
Map func(string, *fstypes.Stat) fsutil.MapResult
}
type DirSource interface {
LookupDir(string) (SyncedDir, bool)
}
type StaticDirSource map[string]SyncedDir
var _ DirSource = StaticDirSource{}
func (dirs StaticDirSource) LookupDir(name string) (SyncedDir, bool) {
dir, found := dirs[name]
return dir, found
}
// NewFSSyncProvider creates a new provider for sending files from client
func NewFSSyncProvider(dirs []SyncedDir) session.Attachable {
p := &fsSyncProvider{
dirs: map[string]SyncedDir{},
func NewFSSyncProvider(dirs DirSource) session.Attachable {
return &fsSyncProvider{
dirs: dirs,
}
for _, d := range dirs {
p.dirs[d.Name] = d
}
return p
}
func (sp *fsSyncProvider) Register(server *grpc.Server) {
@ -81,7 +89,7 @@ func (sp *fsSyncProvider) handle(method string, stream grpc.ServerStream) (retEr
dirName = name[0]
}
dir, ok := sp.dirs[dirName]
dir, ok := sp.dirs.LookupDir(dirName)
if !ok {
return InvalidSessionError{status.Errorf(codes.NotFound, "no access allowed to dir %q", dirName)}
}

View File

@ -2,6 +2,7 @@ package session
import (
"context"
"math"
"net"
"sync/atomic"
"time"
@ -10,6 +11,7 @@ import (
"github.com/moby/buildkit/util/bklog"
"github.com/moby/buildkit/util/grpcerrors"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
"go.opentelemetry.io/otel/trace"
"golang.org/x/net/http2"
@ -79,21 +81,55 @@ func monitorHealth(ctx context.Context, cc *grpc.ClientConn, cancelConn func())
defer cancelConn()
defer cc.Close()
ticker := time.NewTicker(1 * time.Second)
ticker := time.NewTicker(5 * time.Second)
defer ticker.Stop()
healthClient := grpc_health_v1.NewHealthClient(cc)
failedBefore := false
consecutiveSuccessful := 0
defaultHealthcheckDuration := 30 * time.Second
lastHealthcheckDuration := time.Duration(0)
for {
select {
case <-ctx.Done():
return
case <-ticker.C:
ctx, cancel := context.WithTimeout(ctx, 10*time.Second)
// This healthcheck can erroneously fail in some instances, such as receiving lots of data in a low-bandwidth scenario or too many concurrent builds.
// So, this healthcheck is purposely long, and can tolerate some failures on purpose.
healthcheckStart := time.Now()
timeout := time.Duration(math.Max(float64(defaultHealthcheckDuration), float64(lastHealthcheckDuration)*1.5))
ctx, cancel := context.WithTimeout(ctx, timeout)
_, err := healthClient.Check(ctx, &grpc_health_v1.HealthCheckRequest{})
cancel()
if err != nil {
return
lastHealthcheckDuration = time.Since(healthcheckStart)
logFields := logrus.Fields{
"timeout": timeout,
"actualDuration": lastHealthcheckDuration,
}
if err != nil {
if failedBefore {
bklog.G(ctx).Error("healthcheck failed fatally")
return
}
failedBefore = true
consecutiveSuccessful = 0
bklog.G(ctx).WithFields(logFields).Warn("healthcheck failed")
} else {
consecutiveSuccessful++
if consecutiveSuccessful >= 5 && failedBefore {
failedBefore = false
bklog.G(ctx).WithFields(logFields).Debug("reset healthcheck failure")
}
}
bklog.G(ctx).WithFields(logFields).Debug("healthcheck completed")
}
}
}

View File

@ -186,6 +186,7 @@ type Solve struct {
MountIDs []string `protobuf:"bytes,2,rep,name=mountIDs,proto3" json:"mountIDs,omitempty"`
Op *pb.Op `protobuf:"bytes,3,opt,name=op,proto3" json:"op,omitempty"`
// Types that are valid to be assigned to Subject:
//
// *Solve_File
// *Solve_Cache
Subject isSolve_Subject `protobuf_oneof:"subject"`

View File

@ -73,13 +73,18 @@ const (
CapMetaDescription apicaps.CapID = "meta.description"
CapMetaExportCache apicaps.CapID = "meta.exportcache"
CapRemoteCacheGHA apicaps.CapID = "cache.gha"
CapRemoteCacheS3 apicaps.CapID = "cache.s3"
CapRemoteCacheGHA apicaps.CapID = "cache.gha"
CapRemoteCacheS3 apicaps.CapID = "cache.s3"
CapRemoteCacheAzBlob apicaps.CapID = "cache.azblob"
CapMergeOp apicaps.CapID = "mergeop"
CapDiffOp apicaps.CapID = "diffop"
CapAnnotations apicaps.CapID = "exporter.image.annotations"
CapAnnotations apicaps.CapID = "exporter.image.annotations"
CapAttestations apicaps.CapID = "exporter.image.attestations"
// CapSourceDateEpoch is the capability to automatically handle the date epoch
CapSourceDateEpoch apicaps.CapID = "exporter.sourcedateepoch"
)
func init() {
@ -423,6 +428,12 @@ func init() {
Status: apicaps.CapStatusExperimental,
})
Caps.Init(apicaps.Cap{
ID: CapRemoteCacheAzBlob,
Enabled: true,
Status: apicaps.CapStatusExperimental,
})
Caps.Init(apicaps.Cap{
ID: CapMergeOp,
Enabled: true,
@ -440,4 +451,17 @@ func init() {
Enabled: true,
Status: apicaps.CapStatusExperimental,
})
Caps.Init(apicaps.Cap{
ID: CapAttestations,
Enabled: true,
Status: apicaps.CapStatusExperimental,
})
Caps.Init(apicaps.Cap{
ID: CapSourceDateEpoch,
Name: "source date epoch",
Enabled: true,
Status: apicaps.CapStatusExperimental,
})
}

View File

@ -154,6 +154,7 @@ type Op struct {
// inputs is a set of input edges.
Inputs []*Input `protobuf:"bytes,1,rep,name=inputs,proto3" json:"inputs,omitempty"`
// Types that are valid to be assigned to Op:
//
// *Op_Exec
// *Op_Source
// *Op_File
@ -1948,6 +1949,7 @@ type FileAction struct {
SecondaryInput InputIndex `protobuf:"varint,2,opt,name=secondaryInput,proto3,customtype=InputIndex" json:"secondaryInput"`
Output OutputIndex `protobuf:"varint,3,opt,name=output,proto3,customtype=OutputIndex" json:"output"`
// Types that are valid to be assigned to Action:
//
// *FileAction_Copy
// *FileAction_Mkfile
// *FileAction_Mkdir
@ -2465,6 +2467,7 @@ func (m *ChownOpt) GetGroup() *UserOpt {
type UserOpt struct {
// Types that are valid to be assigned to User:
//
// *UserOpt_ByName
// *UserOpt_ByID
User isUserOpt_User `protobuf_oneof:"user"`

View File

@ -0,0 +1,36 @@
package result
import (
pb "github.com/moby/buildkit/frontend/gateway/pb"
digest "github.com/opencontainers/go-digest"
)
type Attestation struct {
Kind pb.AttestationKind
Ref string
Path string
InToto InTotoAttestation
ContentFunc func() ([]byte, error)
}
type InTotoAttestation struct {
PredicateType string
Subjects []InTotoSubject
}
type InTotoSubject struct {
Kind pb.InTotoSubjectKind
Name string
Digest []digest.Digest
}
func DigestMap(ds ...digest.Digest) map[string]string {
m := map[string]string{}
for _, d := range ds {
m[d.Algorithm().String()] = d.Encoded()
}
return m
}

131
vendor/github.com/moby/buildkit/solver/result/result.go generated vendored Normal file
View File

@ -0,0 +1,131 @@
package result
import (
"reflect"
"strings"
"sync"
"github.com/moby/buildkit/identity"
"github.com/pkg/errors"
)
const (
attestationRefPrefix = "attestation:"
)
type Result[T any] struct {
mu sync.Mutex
Ref T
Refs map[string]T
Metadata map[string][]byte
Attestations map[string][]Attestation
}
func (r *Result[T]) AddMeta(k string, v []byte) {
r.mu.Lock()
if r.Metadata == nil {
r.Metadata = map[string][]byte{}
}
r.Metadata[k] = v
r.mu.Unlock()
}
func (r *Result[T]) AddRef(k string, ref T) {
r.mu.Lock()
if r.Refs == nil {
r.Refs = map[string]T{}
}
r.Refs[k] = ref
r.mu.Unlock()
}
func (r *Result[T]) AddAttestation(k string, v Attestation, ref T) {
r.mu.Lock()
if r.Refs == nil {
r.Refs = map[string]T{}
}
if r.Attestations == nil {
r.Attestations = map[string][]Attestation{}
}
if v.ContentFunc == nil && !strings.HasPrefix(v.Ref, attestationRefPrefix) {
v.Ref = "attestation:" + identity.NewID()
r.Refs[v.Ref] = ref
}
r.Attestations[k] = append(r.Attestations[k], v)
r.mu.Unlock()
}
func (r *Result[T]) SetRef(ref T) {
r.Ref = ref
}
func (r *Result[T]) SingleRef() (T, error) {
r.mu.Lock()
defer r.mu.Unlock()
if r.Refs != nil && !reflect.ValueOf(r.Ref).IsValid() {
var t T
return t, errors.Errorf("invalid map result")
}
return r.Ref, nil
}
func (r *Result[T]) EachRef(fn func(T) error) (err error) {
if reflect.ValueOf(r.Ref).IsValid() {
err = fn(r.Ref)
}
for _, r := range r.Refs {
if reflect.ValueOf(r).IsValid() {
if err1 := fn(r); err1 != nil && err == nil {
err = err1
}
}
}
return err
}
// EachRef iterates over references in both a and b.
// a and b are assumed to be of the same size and map their references
// to the same set of keys
func EachRef[U any, V any](a *Result[U], b *Result[V], fn func(U, V) error) (err error) {
if reflect.ValueOf(a.Ref).IsValid() && reflect.ValueOf(b.Ref).IsValid() {
err = fn(a.Ref, b.Ref)
}
for k, r := range a.Refs {
if reflect.ValueOf(r).IsValid() && reflect.ValueOf(b.Refs[k]).IsValid() {
if err1 := fn(r, b.Refs[k]); err1 != nil && err == nil {
err = err1
}
}
}
return err
}
func ConvertResult[U any, V any](r *Result[U], fn func(U) (V, error)) (*Result[V], error) {
r2 := &Result[V]{}
var err error
if reflect.ValueOf(r.Ref).IsValid() {
r2.Ref, err = fn(r.Ref)
if err != nil {
return nil, err
}
}
if r.Refs != nil {
r2.Refs = map[string]V{}
}
for k, r := range r.Refs {
if reflect.ValueOf(r).IsValid() {
r2.Refs[k], err = fn(r)
if err != nil {
return nil, err
}
}
}
r2.Attestations = r.Attestations
r2.Metadata = r.Metadata
return r2, nil
}

View File

@ -0,0 +1,11 @@
package attestation
const (
MediaTypeDockerSchema2AttestationType = "application/vnd.in-toto+json"
DockerAnnotationReferenceType = "vnd.docker.reference.type"
DockerAnnotationReferenceDigest = "vnd.docker.reference.digest"
DockerAnnotationReferenceDescription = "vnd.docker.reference.description"
DockerAnnotationReferenceTypeDefault = "attestation-manifest"
)

View File

@ -4,6 +4,7 @@ import (
"bytes"
"context"
"io"
"strings"
"sync"
"time"
@ -18,12 +19,14 @@ import (
type Buffer interface {
content.Provider
content.Ingester
content.Manager
}
// NewBuffer returns a new buffer
func NewBuffer() Buffer {
return &buffer{
buffers: map[digest.Digest][]byte{},
infos: map[digest.Digest]content.Info{},
refs: map[string]struct{}{},
}
}
@ -31,9 +34,59 @@ func NewBuffer() Buffer {
type buffer struct {
mu sync.Mutex
buffers map[digest.Digest][]byte
infos map[digest.Digest]content.Info
refs map[string]struct{}
}
func (b *buffer) Info(ctx context.Context, dgst digest.Digest) (content.Info, error) {
b.mu.Lock()
v, ok := b.infos[dgst]
b.mu.Unlock()
if !ok {
return content.Info{}, errdefs.ErrNotFound
}
return v, nil
}
func (b *buffer) Update(ctx context.Context, new content.Info, fieldpaths ...string) (content.Info, error) {
b.mu.Lock()
defer b.mu.Unlock()
updated, ok := b.infos[new.Digest]
if !ok {
return content.Info{}, errdefs.ErrNotFound
}
if len(fieldpaths) == 0 {
fieldpaths = []string{"labels"}
}
for _, path := range fieldpaths {
if strings.HasPrefix(path, "labels.") {
if updated.Labels == nil {
updated.Labels = map[string]string{}
}
key := strings.TrimPrefix(path, "labels.")
updated.Labels[key] = new.Labels[key]
continue
}
if path == "labels" {
updated.Labels = new.Labels
}
}
b.infos[new.Digest] = updated
return updated, nil
}
func (b *buffer) Walk(ctx context.Context, fn content.WalkFunc, filters ...string) error {
return nil // not implemented
}
func (b *buffer) Delete(ctx context.Context, dgst digest.Digest) error {
return nil // not implemented
}
func (b *buffer) Writer(ctx context.Context, opts ...content.WriterOpt) (content.Writer, error) {
var wOpts content.WriterOpts
for _, opt := range opts {
@ -82,6 +135,7 @@ func (b *buffer) addValue(k digest.Digest, dt []byte) {
b.mu.Lock()
defer b.mu.Unlock()
b.buffers[k] = dt
b.infos[k] = content.Info{Digest: k, Size: int64(len(dt))}
}
type bufferedWriter struct {

View File

@ -0,0 +1,34 @@
package contentutil
import (
"net/url"
"strings"
"github.com/containerd/containerd/content"
"github.com/containerd/containerd/reference"
)
func HasSource(info content.Info, refspec reference.Spec) (bool, error) {
u, err := url.Parse("dummy://" + refspec.Locator)
if err != nil {
return false, err
}
if info.Labels == nil {
return false, nil
}
source, target := u.Hostname(), strings.TrimPrefix(u.Path, "/")
repoLabel, ok := info.Labels["containerd.io/distribution.source."+source]
if !ok || repoLabel == "" {
return false, nil
}
for _, repo := range strings.Split(repoLabel, ",") {
// the target repo is not a candidate
if repo == target {
return true, nil
}
}
return false, nil
}

View File

@ -10,8 +10,8 @@ import (
// GitRef represents a git ref.
//
// Examples:
// - "https://github.com/foo/bar.git#baz/qux:quux/quuz" is parsed into:
// {Remote: "https://github.com/foo/bar.git", ShortName: "bar", Commit:"baz/qux", SubDir: "quux/quuz"}.
// - "https://github.com/foo/bar.git#baz/qux:quux/quuz" is parsed into:
// {Remote: "https://github.com/foo/bar.git", ShortName: "bar", Commit:"baz/qux", SubDir: "quux/quuz"}.
type GitRef struct {
// Remote is the remote repository path.
Remote string

View File

@ -6,7 +6,7 @@ import (
"github.com/containerd/typeurl"
gogotypes "github.com/gogo/protobuf/types"
"github.com/golang/protobuf/proto" // nolint:staticcheck
"github.com/golang/protobuf/proto" //nolint:staticcheck
"github.com/golang/protobuf/ptypes/any"
"github.com/moby/buildkit/util/stack"
"github.com/sirupsen/logrus"

View File

@ -13,6 +13,8 @@ import (
"github.com/containerd/containerd/reference"
"github.com/containerd/containerd/remotes"
"github.com/containerd/containerd/remotes/docker"
"github.com/moby/buildkit/util/attestation"
"github.com/moby/buildkit/util/contentutil"
"github.com/moby/buildkit/util/leaseutil"
"github.com/moby/buildkit/util/resolver/limited"
"github.com/moby/buildkit/util/resolver/retryhandler"
@ -24,6 +26,7 @@ import (
type ContentCache interface {
content.Ingester
content.Provider
content.Manager
}
var leasesMu sync.Mutex
@ -75,10 +78,15 @@ func Config(ctx context.Context, str string, resolver remotes.Resolver, cache Co
if desc.Digest != "" {
ra, err := cache.ReaderAt(ctx, desc)
if err == nil {
desc.Size = ra.Size()
mt, err := DetectManifestMediaType(ra)
info, err := cache.Info(ctx, desc.Digest)
if err == nil {
desc.MediaType = mt
if ok, err := contentutil.HasSource(info, ref); err == nil && ok {
desc.Size = ra.Size()
mt, err := DetectManifestMediaType(ra)
if err == nil {
desc.MediaType = mt
}
}
}
}
}
@ -101,8 +109,14 @@ func Config(ctx context.Context, str string, resolver remotes.Resolver, cache Co
children := childrenConfigHandler(cache, platform)
dslHandler, err := docker.AppendDistributionSourceLabel(cache, ref.String())
if err != nil {
return "", nil, err
}
handlers := []images.Handler{
retryhandler.New(limited.FetchHandler(cache, fetcher, str), func(_ []byte) {}),
dslHandler,
children,
}
if err := images.Dispatch(ctx, images.Handlers(handlers...), nil, desc); err != nil {
@ -159,7 +173,8 @@ func childrenConfigHandler(provider content.Provider, platform platforms.MatchCo
} else {
descs = append(descs, index.Manifests...)
}
case images.MediaTypeDockerSchema2Config, ocispecs.MediaTypeImageConfig, docker.LegacyConfigMediaType:
case images.MediaTypeDockerSchema2Config, ocispecs.MediaTypeImageConfig, docker.LegacyConfigMediaType,
attestation.MediaTypeDockerSchema2AttestationType:
// childless data types.
return nil, nil
default:

View File

@ -8,6 +8,7 @@ import (
"time"
"github.com/containerd/containerd/remotes"
"github.com/moby/buildkit/exporter/containerimage/image"
digest "github.com/opencontainers/go-digest"
ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
@ -44,7 +45,7 @@ func convertSchema1ConfigMeta(in []byte) ([]byte, error) {
return nil, errors.Errorf("invalid schema1 manifest")
}
var img ocispecs.Image
var img image.Image
if err := json.Unmarshal([]byte(m.History[0].V1Compatibility), &img); err != nil {
return nil, errors.Wrap(err, "failed to unmarshal image from schema 1 history")
}
@ -68,7 +69,7 @@ func convertSchema1ConfigMeta(in []byte) ([]byte, error) {
}
}
dt, err := json.MarshalIndent(img, "", " ")
dt, err := json.MarshalIndent(img, "", " ")
if err != nil {
return nil, errors.Wrap(err, "failed to marshal schema1 config")
}

View File

@ -38,14 +38,13 @@ func setUserDefinedTermColors(colorsEnv string) {
return
}
for _, field := range fields {
parts := strings.SplitN(field, "=", 2)
if len(parts) != 2 || strings.Contains(parts[1], "=") {
k, v, ok := strings.Cut(field, "=")
if !ok || strings.Contains(v, "=") {
err := errors.New("A valid entry must have exactly two fields")
logrus.WithError(err).Warnf("Could not parse BUILDKIT_COLORS component: %s", field)
continue
}
k := strings.ToLower(parts[0])
v := parts[1]
k = strings.ToLower(k)
if c, ok := termColorMap[strings.ToLower(v)]; ok {
parseKeys(k, c)
} else if strings.Contains(v, ",") {
@ -94,8 +93,7 @@ func readRGB(v string) aec.ANSI {
}
func parseKeys(k string, c aec.ANSI) {
key := strings.ToLower(k)
switch key {
switch strings.ToLower(k) {
case "run":
colorRun = c
case "cancel":

View File

@ -170,10 +170,10 @@ func (p *textMux) printVtx(t *trace, dgst digest.Digest) {
p.current = ""
v.count = 0
if v.logsPartial {
fmt.Fprintln(p.w, "")
}
if v.Error != "" {
if v.logsPartial {
fmt.Fprintln(p.w, "")
}
if strings.HasSuffix(v.Error, context.Canceled.Error()) {
fmt.Fprintf(p.w, "#%d CANCELED\n", v.index)
} else {

View File

@ -151,7 +151,7 @@ func convertStack(s errors.StackTrace) *Stack {
if idx == -1 {
continue
}
line, err := strconv.Atoi(p[1][idx+1:])
line, err := strconv.ParseInt(p[1][idx+1:], 10, 32)
if err != nil {
continue
}

View File

@ -119,9 +119,7 @@ func (c *Connection) indefiniteBackgroundConnection() {
connReattemptPeriod := defaultConnReattemptPeriod
// No strong seeding required, nano time can
// already help with pseudo uniqueness.
rng := rand.New(rand.NewSource(time.Now().UnixNano() + rand.Int63n(1024)))
rng := rand.New(rand.NewSource(time.Now().UnixNano() + rand.Int63n(1024))) //nolint:gosec // No strong seeding required, nano time can already help with pseudo uniqueness.
// maxJitterNanos: 70% of the connectionReattemptPeriod
maxJitterNanos := int64(0.7 * float64(connReattemptPeriod))