vendor: update buildkit to b124b0c3

Signed-off-by: Tonis Tiigi <tonistiigi@gmail.com>
This commit is contained in:
Tonis Tiigi
2022-02-23 12:15:16 -08:00
parent 10debb577e
commit 64ce211ba4
27 changed files with 415 additions and 180 deletions

View File

@ -23,7 +23,7 @@ var (
Package = "github.com/containerd/containerd"
// Version holds the complete version number. Filled in at linking time.
Version = "1.6.0-rc.2+unknown"
Version = "1.6.0+unknown"
// Revision is filled with the VCS (e.g. git) revision being used to build
// the program at linking time.

View File

@ -17,6 +17,14 @@ This package provides various compression algorithms.
# changelog
* Jan 25, 2022 (v1.14.2)
* zstd: improve header decoder by @dsnet [#476](https://github.com/klauspost/compress/pull/476)
* zstd: Add bigger default blocks [#469](https://github.com/klauspost/compress/pull/469)
* zstd: Remove unused decompression buffer [#470](https://github.com/klauspost/compress/pull/470)
* zstd: Fix logically dead code by @ningmingxiao [#472](https://github.com/klauspost/compress/pull/472)
* flate: Improve level 7-9 [#471](https://github.com/klauspost/compress/pull/471) [#473](https://github.com/klauspost/compress/pull/473)
* zstd: Add noasm tag for xxhash [#475](https://github.com/klauspost/compress/pull/475)
* Jan 11, 2022 (v1.14.1)
* s2: Add stream index in [#462](https://github.com/klauspost/compress/pull/462)
* flate: Speed and efficiency improvements in [#439](https://github.com/klauspost/compress/pull/439) [#461](https://github.com/klauspost/compress/pull/461) [#455](https://github.com/klauspost/compress/pull/455) [#452](https://github.com/klauspost/compress/pull/452) [#458](https://github.com/klauspost/compress/pull/458)

View File

@ -85,7 +85,7 @@ func (e *fastEncoder) Encode(blk *blockEnc, src []byte) {
// TEMPLATE
const hashLog = tableBits
// seems global, but would be nice to tweak.
const kSearchStrength = 7
const kSearchStrength = 6
// nextEmit is where in src the next emitLiteral should start from.
nextEmit := s
@ -334,7 +334,7 @@ func (e *fastEncoder) EncodeNoHist(blk *blockEnc, src []byte) {
// TEMPLATE
const hashLog = tableBits
// seems global, but would be nice to tweak.
const kSearchStrength = 8
const kSearchStrength = 6
// nextEmit is where in src the next emitLiteral should start from.
nextEmit := s

View File

@ -44,7 +44,13 @@ func (c *Client) Build(ctx context.Context, opt SolveOpt, product string, buildF
})
}
cb := func(ref string, s *session.Session) error {
cb := func(ref string, s *session.Session, opts map[string]string) error {
for k, v := range opts {
if feOpts == nil {
feOpts = map[string]string{}
}
feOpts[k] = v
}
gwClient := c.gatewayClientForBuild(ref)
g, err := grpcclient.New(ctx, feOpts, s.ID(), product, gwClient, gworkers)
if err != nil {

View File

@ -75,7 +75,7 @@ func (c *Client) Solve(ctx context.Context, def *llb.Definition, opt SolveOpt, s
return c.solve(ctx, def, nil, opt, statusChan)
}
type runGatewayCB func(ref string, s *session.Session) error
type runGatewayCB func(ref string, s *session.Session, opts map[string]string) error
func (c *Client) solve(ctx context.Context, def *llb.Definition, runGateway runGatewayCB, opt SolveOpt, statusChan chan *SolveStatus) (*SolveResponse, error) {
if def != nil && runGateway != nil {
@ -109,7 +109,7 @@ func (c *Client) solve(ctx context.Context, def *llb.Definition, runGateway runG
}
}
cacheOpt, err := parseCacheOptions(ctx, opt)
cacheOpt, err := parseCacheOptions(ctx, runGateway != nil, opt)
if err != nil {
return nil, err
}
@ -171,6 +171,9 @@ func (c *Client) solve(ctx context.Context, def *llb.Definition, runGateway runG
}
for k, v := range cacheOpt.frontendAttrs {
if opt.FrontendAttrs == nil {
opt.FrontendAttrs = map[string]string{}
}
opt.FrontendAttrs[k] = v
}
@ -225,7 +228,7 @@ func (c *Client) solve(ctx context.Context, def *llb.Definition, runGateway runG
if runGateway != nil {
eg.Go(func() error {
err := runGateway(ref, s)
err := runGateway(ref, s, opt.FrontendAttrs)
if err == nil {
return nil
}
@ -386,7 +389,7 @@ type cacheOptions struct {
frontendAttrs map[string]string
}
func parseCacheOptions(ctx context.Context, opt SolveOpt) (*cacheOptions, error) {
func parseCacheOptions(ctx context.Context, isGateway bool, opt SolveOpt) (*cacheOptions, error) {
var (
cacheExports []*controlapi.CacheOptionsEntry
cacheImports []*controlapi.CacheOptionsEntry
@ -471,7 +474,7 @@ func parseCacheOptions(ctx context.Context, opt SolveOpt) (*cacheOptions, error)
})
}
}
if opt.Frontend != "" {
if opt.Frontend != "" || isGateway {
// use legacy API for registry importers, because the frontend might not support the new API
if len(legacyImportRefs) > 0 {
frontendAttrs["cache-from"] = strings.Join(legacyImportRefs, ",")

View File

@ -99,6 +99,8 @@ type ContainerdConfig struct {
ApparmorProfile string `toml:"apparmor-profile"`
MaxParallelism int `toml:"max-parallelism"`
Rootless bool `toml:"rootless"`
}
type GCPolicy struct {

View File

@ -341,6 +341,24 @@ func (c *grpcClient) Solve(ctx context.Context, creq client.SolveRequest) (res *
}
}
// these options are added by go client in solve()
if _, ok := creq.FrontendOpt["cache-imports"]; !ok {
if v, ok := c.opts["cache-imports"]; ok {
if creq.FrontendOpt == nil {
creq.FrontendOpt = map[string]string{}
}
creq.FrontendOpt["cache-imports"] = v
}
}
if _, ok := creq.FrontendOpt["cache-from"]; !ok {
if v, ok := c.opts["cache-from"]; ok {
if creq.FrontendOpt == nil {
creq.FrontendOpt = map[string]string{}
}
creq.FrontendOpt["cache-from"] = v
}
}
req := &pb.SolveRequest{
Definition: creq.Definition,
Frontend: creq.Frontend,

View File

@ -3,10 +3,17 @@ package bklog
import (
"context"
"github.com/containerd/containerd/log"
"github.com/sirupsen/logrus"
"go.opentelemetry.io/otel/trace"
)
func init() {
// overwrites containerd/log
log.G = GetLogger
log.L = L
}
var (
G = GetLogger
L = logrus.NewEntry(logrus.StandardLogger())
@ -37,6 +44,8 @@ func GetLogger(ctx context.Context) (l *logrus.Entry) {
if logger != nil {
l = logger.(*logrus.Entry)
} else if logger := log.GetLogger(ctx); logger != nil {
l = logger
} else {
l = L
}

View File

@ -7,6 +7,34 @@ import (
"golang.org/x/sys/unix"
)
// MountedFast is a method of detecting a mount point without reading
// mountinfo from procfs. A caller can only trust the result if no error
// and sure == true are returned. Otherwise, other methods (e.g. parsing
// /proc/mounts) have to be used. If unsure, use Mounted instead (which
// uses MountedFast, but falls back to parsing mountinfo if needed).
//
// If a non-existent path is specified, an appropriate error is returned.
// In case the caller is not interested in this particular error, it should
// be handled separately using e.g. errors.Is(err, os.ErrNotExist).
//
// This function is only available on Linux. When available (since kernel
// v5.6), openat2(2) syscall is used to reliably detect all mounts. Otherwise,
// the implementation falls back to using stat(2), which can reliably detect
// normal (but not bind) mounts.
func MountedFast(path string) (mounted, sure bool, err error) {
// Root is always mounted.
if path == string(os.PathSeparator) {
return true, true, nil
}
path, err = normalizePath(path)
if err != nil {
return false, false, err
}
mounted, sure, err = mountedFast(path)
return
}
// mountedByOpenat2 is a method of detecting a mount that works for all kinds
// of mounts (incl. bind mounts), but requires a recent (v5.6+) linux kernel.
func mountedByOpenat2(path string) (bool, error) {
@ -34,24 +62,40 @@ func mountedByOpenat2(path string) (bool, error) {
return false, &os.PathError{Op: "openat2", Path: path, Err: err}
}
func mounted(path string) (bool, error) {
path, err := normalizePath(path)
if err != nil {
return false, err
// mountedFast is similar to MountedFast, except it expects a normalized path.
func mountedFast(path string) (mounted, sure bool, err error) {
// Root is always mounted.
if path == string(os.PathSeparator) {
return true, true, nil
}
// Try a fast path, using openat2() with RESOLVE_NO_XDEV.
mounted, err := mountedByOpenat2(path)
mounted, err = mountedByOpenat2(path)
if err == nil {
return mounted, nil
return mounted, true, nil
}
// Another fast path: compare st.st_dev fields.
mounted, err = mountedByStat(path)
// This does not work for bind mounts, so false negative
// is possible, therefore only trust if return is true.
if mounted && err == nil {
return true, true, nil
}
return
}
func mounted(path string) (bool, error) {
path, err := normalizePath(path)
if err != nil {
return false, err
}
mounted, sure, err := mountedFast(path)
if sure && err == nil {
return mounted, nil
}
// Fallback to parsing mountinfo
// Fallback to parsing mountinfo.
return mountedByMountinfo(path)
}

View File

@ -13,9 +13,9 @@ func GetMounts(f FilterFunc) ([]*Info, error) {
// Mounted determines if a specified path is a mount point. In case of any
// error, false (and an error) is returned.
//
// The non-existent path returns an error. If a caller is not interested
// in this particular error, it should handle it separately using e.g.
// errors.Is(err, os.ErrNotExist).
// If a non-existent path is specified, an appropriate error is returned.
// In case the caller is not interested in this particular error, it should
// be handled separately using e.g. errors.Is(err, os.ErrNotExist).
func Mounted(path string) (bool, error) {
// root is always mounted
if path == string(os.PathSeparator) {

View File

@ -118,3 +118,11 @@ func (c *selfCollector) Describe(ch chan<- *Desc) {
func (c *selfCollector) Collect(ch chan<- Metric) {
ch <- c.self
}
// collectorMetric is a metric that is also a collector.
// Because of selfCollector, most (if not all) Metrics in
// this package are also collectors.
type collectorMetric interface {
Metric
Collector
}

View File

@ -20,6 +20,7 @@ import (
"math"
"runtime"
"runtime/metrics"
"strings"
"sync"
//nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
@ -31,10 +32,14 @@ import (
type goCollector struct {
base baseGoCollector
// mu protects updates to all fields ensuring a consistent
// snapshot is always produced by Collect.
mu sync.Mutex
// rm... fields all pertain to the runtime/metrics package.
rmSampleBuf []metrics.Sample
rmSampleMap map[string]*metrics.Sample
rmMetrics []Metric
rmMetrics []collectorMetric
// With Go 1.17, the runtime/metrics package was introduced.
// From that point on, metric names produced by the runtime/metrics
@ -52,13 +57,24 @@ type goCollector struct {
// Deprecated: Use collectors.NewGoCollector instead.
func NewGoCollector() Collector {
descriptions := metrics.All()
descMap := make(map[string]*metrics.Description)
for i := range descriptions {
descMap[descriptions[i].Name] = &descriptions[i]
// Collect all histogram samples so that we can get their buckets.
// The API guarantees that the buckets are always fixed for the lifetime
// of the process.
var histograms []metrics.Sample
for _, d := range descriptions {
if d.Kind == metrics.KindFloat64Histogram {
histograms = append(histograms, metrics.Sample{Name: d.Name})
}
}
metrics.Read(histograms)
bucketsMap := make(map[string][]float64)
for i := range histograms {
bucketsMap[histograms[i].Name] = histograms[i].Value.Float64Histogram().Buckets
}
// Generate a Desc and ValueType for each runtime/metrics metric.
metricSet := make([]Metric, 0, len(descriptions))
metricSet := make([]collectorMetric, 0, len(descriptions))
sampleBuf := make([]metrics.Sample, 0, len(descriptions))
sampleMap := make(map[string]*metrics.Sample, len(descriptions))
for i := range descriptions {
@ -76,9 +92,10 @@ func NewGoCollector() Collector {
sampleBuf = append(sampleBuf, metrics.Sample{Name: d.Name})
sampleMap[d.Name] = &sampleBuf[len(sampleBuf)-1]
var m Metric
var m collectorMetric
if d.Kind == metrics.KindFloat64Histogram {
_, hasSum := rmExactSumMap[d.Name]
unit := d.Name[strings.IndexRune(d.Name, ':')+1:]
m = newBatchHistogram(
NewDesc(
BuildFQName(namespace, subsystem, name),
@ -86,6 +103,7 @@ func NewGoCollector() Collector {
nil,
nil,
),
internal.RuntimeMetricsBucketsForUnit(bucketsMap[d.Name], unit),
hasSum,
)
} else if d.Cumulative {
@ -130,9 +148,25 @@ func (c *goCollector) Collect(ch chan<- Metric) {
// Collect base non-memory metrics.
c.base.Collect(ch)
// Collect must be thread-safe, so prevent concurrent use of
// rmSampleBuf. Just read into rmSampleBuf but write all the data
// we get into our Metrics or MemStats.
//
// This lock also ensures that the Metrics we send out are all from
// the same updates, ensuring their mutual consistency insofar as
// is guaranteed by the runtime/metrics package.
//
// N.B. This locking is heavy-handed, but Collect is expected to be called
// relatively infrequently. Also the core operation here, metrics.Read,
// is fast (O(tens of microseconds)) so contention should certainly be
// low, though channel operations and any allocations may add to that.
c.mu.Lock()
defer c.mu.Unlock()
// Populate runtime/metrics sample buffer.
metrics.Read(c.rmSampleBuf)
// Update all our metrics from rmSampleBuf.
for i, sample := range c.rmSampleBuf {
// N.B. switch on concrete type because it's significantly more efficient
// than checking for the Counter and Gauge interface implementations. In
@ -157,7 +191,6 @@ func (c *goCollector) Collect(ch chan<- Metric) {
panic("unexpected metric type")
}
}
// ms is a dummy MemStats that we populate ourselves so that we can
// populate the old metrics from it.
var ms runtime.MemStats
@ -280,13 +313,27 @@ type batchHistogram struct {
// but Write calls may operate concurrently with updates.
// Contention between these two sources should be rare.
mu sync.Mutex
buckets []float64 // Inclusive lower bounds.
buckets []float64 // Inclusive lower bounds, like runtime/metrics.
counts []uint64
sum float64 // Used if hasSum is true.
}
func newBatchHistogram(desc *Desc, hasSum bool) *batchHistogram {
h := &batchHistogram{desc: desc, hasSum: hasSum}
// newBatchHistogram creates a new batch histogram value with the given
// Desc, buckets, and whether or not it has an exact sum available.
//
// buckets must always be from the runtime/metrics package, following
// the same conventions.
func newBatchHistogram(desc *Desc, buckets []float64, hasSum bool) *batchHistogram {
h := &batchHistogram{
desc: desc,
buckets: buckets,
// Because buckets follows runtime/metrics conventions, there's
// 1 more value in the buckets list than there are buckets represented,
// because in runtime/metrics, the bucket values represent *boundaries*,
// and non-Inf boundaries are inclusive lower bounds for that bucket.
counts: make([]uint64, len(buckets)-1),
hasSum: hasSum,
}
h.init(h)
return h
}
@ -294,28 +341,25 @@ func newBatchHistogram(desc *Desc, hasSum bool) *batchHistogram {
// update updates the batchHistogram from a runtime/metrics histogram.
//
// sum must be provided if the batchHistogram was created to have an exact sum.
// h.buckets must be a strict subset of his.Buckets.
func (h *batchHistogram) update(his *metrics.Float64Histogram, sum float64) {
counts, buckets := his.Counts, his.Buckets
// Skip a -Inf bucket altogether. It's not clear how to represent that.
if math.IsInf(buckets[0], -1) {
buckets = buckets[1:]
counts = counts[1:]
}
h.mu.Lock()
defer h.mu.Unlock()
// Check if we're initialized.
if h.buckets == nil {
// Make copies of counts and buckets. It's really important
// that we don't retain his.Counts or his.Buckets anywhere since
// it's going to get reused.
h.buckets = make([]float64, len(buckets))
copy(h.buckets, buckets)
h.counts = make([]uint64, len(counts))
// Clear buckets.
for i := range h.counts {
h.counts[i] = 0
}
// Copy and reduce buckets.
var j int
for i, count := range counts {
h.counts[j] += count
if buckets[i+1] == h.buckets[j+1] {
j++
}
}
copy(h.counts, counts)
if h.hasSum {
h.sum = sum
}

View File

@ -17,6 +17,7 @@
package internal
import (
"math"
"path"
"runtime/metrics"
"strings"
@ -75,3 +76,67 @@ func RuntimeMetricsToProm(d *metrics.Description) (string, string, string, bool)
}
return namespace, subsystem, name, valid
}
// RuntimeMetricsBucketsForUnit takes a set of buckets obtained for a runtime/metrics histogram
// type (so, lower-bound inclusive) and a unit from a runtime/metrics name, and produces
// a reduced set of buckets. This function always removes any -Inf bucket as it's represented
// as the bottom-most upper-bound inclusive bucket in Prometheus.
func RuntimeMetricsBucketsForUnit(buckets []float64, unit string) []float64 {
switch unit {
case "bytes":
// Rebucket as powers of 2.
return rebucketExp(buckets, 2)
case "seconds":
// Rebucket as powers of 10 and then merge all buckets greater
// than 1 second into the +Inf bucket.
b := rebucketExp(buckets, 10)
for i := range b {
if b[i] <= 1 {
continue
}
b[i] = math.Inf(1)
b = b[:i+1]
break
}
return b
}
return buckets
}
// rebucketExp takes a list of bucket boundaries (lower bound inclusive) and
// downsamples the buckets to those a multiple of base apart. The end result
// is a roughly exponential (in many cases, perfectly exponential) bucketing
// scheme.
func rebucketExp(buckets []float64, base float64) []float64 {
bucket := buckets[0]
var newBuckets []float64
// We may see a -Inf here, in which case, add it and skip it
// since we risk producing NaNs otherwise.
//
// We need to preserve -Inf values to maintain runtime/metrics
// conventions. We'll strip it out later.
if bucket == math.Inf(-1) {
newBuckets = append(newBuckets, bucket)
buckets = buckets[1:]
bucket = buckets[0]
}
// From now on, bucket should always have a non-Inf value because
// Infs are only ever at the ends of the bucket lists, so
// arithmetic operations on it are non-NaN.
for i := 1; i < len(buckets); i++ {
if bucket >= 0 && buckets[i] < bucket*base {
// The next bucket we want to include is at least bucket*base.
continue
} else if bucket < 0 && buckets[i] < bucket/base {
// In this case the bucket we're targeting is negative, and since
// we're ascending through buckets here, we need to divide to get
// closer to zero exponentially.
continue
}
// The +Inf bucket will always be the last one, and we'll always
// end up including it here because bucket
newBuckets = append(newBuckets, bucket)
bucket = buckets[i]
}
return append(newBuckets, bucket)
}