vendor: update buildkit to master@ae9d0f5

Signed-off-by: Justin Chadwell <me@jedevc.com>
This commit is contained in:
Justin Chadwell
2022-11-22 14:39:36 +00:00
parent 6e9b743296
commit 36e663edda
375 changed files with 14834 additions and 13552 deletions

View File

@ -2,8 +2,8 @@ package auth
import (
"context"
"crypto/rand"
"crypto/subtle"
"math/rand"
"sync"
"github.com/moby/buildkit/session"

View File

@ -27,27 +27,35 @@ const (
)
type fsSyncProvider struct {
dirs map[string]SyncedDir
dirs DirSource
p progressCb
doneCh chan error
}
type SyncedDir struct {
Name string
Dir string
Excludes []string
Map func(string, *fstypes.Stat) fsutil.MapResult
}
type DirSource interface {
LookupDir(string) (SyncedDir, bool)
}
type StaticDirSource map[string]SyncedDir
var _ DirSource = StaticDirSource{}
func (dirs StaticDirSource) LookupDir(name string) (SyncedDir, bool) {
dir, found := dirs[name]
return dir, found
}
// NewFSSyncProvider creates a new provider for sending files from client
func NewFSSyncProvider(dirs []SyncedDir) session.Attachable {
p := &fsSyncProvider{
dirs: map[string]SyncedDir{},
func NewFSSyncProvider(dirs DirSource) session.Attachable {
return &fsSyncProvider{
dirs: dirs,
}
for _, d := range dirs {
p.dirs[d.Name] = d
}
return p
}
func (sp *fsSyncProvider) Register(server *grpc.Server) {
@ -81,7 +89,7 @@ func (sp *fsSyncProvider) handle(method string, stream grpc.ServerStream) (retEr
dirName = name[0]
}
dir, ok := sp.dirs[dirName]
dir, ok := sp.dirs.LookupDir(dirName)
if !ok {
return InvalidSessionError{status.Errorf(codes.NotFound, "no access allowed to dir %q", dirName)}
}

View File

@ -2,6 +2,7 @@ package session
import (
"context"
"math"
"net"
"sync/atomic"
"time"
@ -10,6 +11,7 @@ import (
"github.com/moby/buildkit/util/bklog"
"github.com/moby/buildkit/util/grpcerrors"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"
"go.opentelemetry.io/otel/trace"
"golang.org/x/net/http2"
@ -79,21 +81,55 @@ func monitorHealth(ctx context.Context, cc *grpc.ClientConn, cancelConn func())
defer cancelConn()
defer cc.Close()
ticker := time.NewTicker(1 * time.Second)
ticker := time.NewTicker(5 * time.Second)
defer ticker.Stop()
healthClient := grpc_health_v1.NewHealthClient(cc)
failedBefore := false
consecutiveSuccessful := 0
defaultHealthcheckDuration := 30 * time.Second
lastHealthcheckDuration := time.Duration(0)
for {
select {
case <-ctx.Done():
return
case <-ticker.C:
ctx, cancel := context.WithTimeout(ctx, 10*time.Second)
// This healthcheck can erroneously fail in some instances, such as receiving lots of data in a low-bandwidth scenario or too many concurrent builds.
// So, this healthcheck is purposely long, and can tolerate some failures on purpose.
healthcheckStart := time.Now()
timeout := time.Duration(math.Max(float64(defaultHealthcheckDuration), float64(lastHealthcheckDuration)*1.5))
ctx, cancel := context.WithTimeout(ctx, timeout)
_, err := healthClient.Check(ctx, &grpc_health_v1.HealthCheckRequest{})
cancel()
if err != nil {
return
lastHealthcheckDuration = time.Since(healthcheckStart)
logFields := logrus.Fields{
"timeout": timeout,
"actualDuration": lastHealthcheckDuration,
}
if err != nil {
if failedBefore {
bklog.G(ctx).Error("healthcheck failed fatally")
return
}
failedBefore = true
consecutiveSuccessful = 0
bklog.G(ctx).WithFields(logFields).Warn("healthcheck failed")
} else {
consecutiveSuccessful++
if consecutiveSuccessful >= 5 && failedBefore {
failedBefore = false
bklog.G(ctx).WithFields(logFields).Debug("reset healthcheck failure")
}
}
bklog.G(ctx).WithFields(logFields).Debug("healthcheck completed")
}
}
}