mirror of
https://gitea.com/Lydanne/buildx.git
synced 2025-07-09 21:17:09 +08:00
vendor: update buildkit to v0.14.0-rc1
Update buildkit dependency to v0.14.0-rc1. Update the tracing infrastructure to use the new detect API which updates how the delegated exporter is configured. Signed-off-by: Jonathan A. Sternberg <jonathan.sternberg@docker.com>
This commit is contained in:
2
vendor/github.com/containerd/containerd/images/diffid.go
generated
vendored
2
vendor/github.com/containerd/containerd/images/diffid.go
generated
vendored
@ -36,7 +36,7 @@ func GetDiffID(ctx context.Context, cs content.Store, desc ocispec.Descriptor) (
|
||||
MediaTypeDockerSchema2Layer,
|
||||
ocispec.MediaTypeImageLayer,
|
||||
MediaTypeDockerSchema2LayerForeign,
|
||||
ocispec.MediaTypeImageLayerNonDistributable:
|
||||
ocispec.MediaTypeImageLayerNonDistributable: //nolint:staticcheck // deprecated
|
||||
return desc.Digest, nil
|
||||
}
|
||||
info, err := cs.Info(ctx, desc.Digest)
|
||||
|
2
vendor/github.com/containerd/containerd/images/mediatypes.go
generated
vendored
2
vendor/github.com/containerd/containerd/images/mediatypes.go
generated
vendored
@ -81,7 +81,7 @@ func DiffCompression(ctx context.Context, mediaType string) (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
return "gzip", nil
|
||||
case ocispec.MediaTypeImageLayer, ocispec.MediaTypeImageLayerNonDistributable:
|
||||
case ocispec.MediaTypeImageLayer, ocispec.MediaTypeImageLayerNonDistributable: //nolint:staticcheck // Non-distributable layers are deprecated
|
||||
if len(ext) > 0 {
|
||||
switch ext[len(ext)-1] {
|
||||
case "gzip":
|
||||
|
18
vendor/github.com/containerd/containerd/remotes/docker/authorizer.go
generated
vendored
18
vendor/github.com/containerd/containerd/remotes/docker/authorizer.go
generated
vendored
@ -148,9 +148,11 @@ func (a *dockerAuthorizer) AddResponses(ctx context.Context, responses []*http.R
|
||||
defer a.mu.Unlock()
|
||||
for _, c := range auth.ParseAuthHeader(last.Header) {
|
||||
if c.Scheme == auth.BearerAuth {
|
||||
if err := invalidAuthorization(c, responses); err != nil {
|
||||
if retry, err := invalidAuthorization(ctx, c, responses); err != nil {
|
||||
delete(a.handlers, host)
|
||||
return err
|
||||
} else if retry {
|
||||
delete(a.handlers, host)
|
||||
}
|
||||
|
||||
// reuse existing handler
|
||||
@ -328,18 +330,24 @@ func (ah *authHandler) doBearerAuth(ctx context.Context) (token, refreshToken st
|
||||
return resp.Token, resp.RefreshToken, nil
|
||||
}
|
||||
|
||||
func invalidAuthorization(c auth.Challenge, responses []*http.Response) error {
|
||||
func invalidAuthorization(ctx context.Context, c auth.Challenge, responses []*http.Response) (retry bool, _ error) {
|
||||
errStr := c.Parameters["error"]
|
||||
if errStr == "" {
|
||||
return nil
|
||||
return retry, nil
|
||||
}
|
||||
|
||||
n := len(responses)
|
||||
if n == 1 || (n > 1 && !sameRequest(responses[n-2].Request, responses[n-1].Request)) {
|
||||
return nil
|
||||
limitedErr := errStr
|
||||
errLenghLimit := 64
|
||||
if len(limitedErr) > errLenghLimit {
|
||||
limitedErr = limitedErr[:errLenghLimit] + "..."
|
||||
}
|
||||
log.G(ctx).WithField("error", limitedErr).Debug("authorization error using bearer token, retrying")
|
||||
return true, nil
|
||||
}
|
||||
|
||||
return fmt.Errorf("server message: %s: %w", errStr, ErrInvalidAuthorization)
|
||||
return retry, fmt.Errorf("server message: %s: %w", errStr, ErrInvalidAuthorization)
|
||||
}
|
||||
|
||||
func sameRequest(r1, r2 *http.Request) bool {
|
||||
|
70
vendor/github.com/containerd/containerd/remotes/docker/resolver.go
generated
vendored
70
vendor/github.com/containerd/containerd/remotes/docker/resolver.go
generated
vendored
@ -704,9 +704,71 @@ func IsLocalhost(host string) bool {
|
||||
return ip.IsLoopback()
|
||||
}
|
||||
|
||||
// NewHTTPFallback returns http.RoundTripper which allows fallback from https to
|
||||
// http for registry endpoints with configurations for both http and TLS,
|
||||
// such as defaulted localhost endpoints.
|
||||
func NewHTTPFallback(transport http.RoundTripper) http.RoundTripper {
|
||||
return &httpFallback{
|
||||
super: transport,
|
||||
}
|
||||
}
|
||||
|
||||
type httpFallback struct {
|
||||
super http.RoundTripper
|
||||
host string
|
||||
}
|
||||
|
||||
func (f *httpFallback) RoundTrip(r *http.Request) (*http.Response, error) {
|
||||
// only fall back if the same host had previously fell back
|
||||
if f.host != r.URL.Host {
|
||||
resp, err := f.super.RoundTrip(r)
|
||||
if !isTLSError(err) {
|
||||
return resp, err
|
||||
}
|
||||
}
|
||||
|
||||
plainHTTPUrl := *r.URL
|
||||
plainHTTPUrl.Scheme = "http"
|
||||
|
||||
plainHTTPRequest := *r
|
||||
plainHTTPRequest.URL = &plainHTTPUrl
|
||||
|
||||
if f.host != r.URL.Host {
|
||||
f.host = r.URL.Host
|
||||
|
||||
// update body on the second attempt
|
||||
if r.Body != nil && r.GetBody != nil {
|
||||
body, err := r.GetBody()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
plainHTTPRequest.Body = body
|
||||
}
|
||||
}
|
||||
|
||||
return f.super.RoundTrip(&plainHTTPRequest)
|
||||
}
|
||||
|
||||
func isTLSError(err error) bool {
|
||||
if err == nil {
|
||||
return false
|
||||
}
|
||||
var tlsErr tls.RecordHeaderError
|
||||
if errors.As(err, &tlsErr) && string(tlsErr.RecordHeader[:]) == "HTTP/" {
|
||||
return true
|
||||
}
|
||||
if strings.Contains(err.Error(), "TLS handshake timeout") {
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// HTTPFallback is an http.RoundTripper which allows fallback from https to http
|
||||
// for registry endpoints with configurations for both http and TLS, such as
|
||||
// defaulted localhost endpoints.
|
||||
//
|
||||
// Deprecated: Use NewHTTPFallback instead.
|
||||
type HTTPFallback struct {
|
||||
http.RoundTripper
|
||||
}
|
||||
@ -722,6 +784,14 @@ func (f HTTPFallback) RoundTrip(r *http.Request) (*http.Response, error) {
|
||||
plainHTTPRequest := *r
|
||||
plainHTTPRequest.URL = &plainHTTPUrl
|
||||
|
||||
if r.Body != nil && r.GetBody != nil {
|
||||
body, err := r.GetBody()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
plainHTTPRequest.Body = body
|
||||
}
|
||||
|
||||
return f.RoundTripper.RoundTrip(&plainHTTPRequest)
|
||||
}
|
||||
|
||||
|
2
vendor/github.com/containerd/containerd/version/version.go
generated
vendored
2
vendor/github.com/containerd/containerd/version/version.go
generated
vendored
@ -23,7 +23,7 @@ var (
|
||||
Package = "github.com/containerd/containerd"
|
||||
|
||||
// Version holds the complete version number. Filled in at linking time.
|
||||
Version = "1.7.15+unknown"
|
||||
Version = "1.7.17+unknown"
|
||||
|
||||
// Revision is filled with the VCS (e.g. git) revision being used to build
|
||||
// the program at linking time.
|
||||
|
37
vendor/github.com/containerd/ttrpc/client.go
generated
vendored
37
vendor/github.com/containerd/ttrpc/client.go
generated
vendored
@ -386,25 +386,44 @@ func (c *Client) receiveLoop() error {
|
||||
// createStream creates a new stream and registers it with the client
|
||||
// Introduce stream types for multiple or single response
|
||||
func (c *Client) createStream(flags uint8, b []byte) (*stream, error) {
|
||||
c.streamLock.Lock()
|
||||
// sendLock must be held across both allocation of the stream ID and sending it across the wire.
|
||||
// This ensures that new stream IDs sent on the wire are always increasing, which is a
|
||||
// requirement of the TTRPC protocol.
|
||||
// This use of sendLock could be split into another mutex that covers stream creation + first send,
|
||||
// and just use sendLock to guard writing to the wire, but for now it seems simpler to have fewer mutexes.
|
||||
c.sendLock.Lock()
|
||||
defer c.sendLock.Unlock()
|
||||
|
||||
// Check if closed since lock acquired to prevent adding
|
||||
// anything after cleanup completes
|
||||
select {
|
||||
case <-c.ctx.Done():
|
||||
c.streamLock.Unlock()
|
||||
return nil, ErrClosed
|
||||
default:
|
||||
}
|
||||
|
||||
// Stream ID should be allocated at same time
|
||||
s := newStream(c.nextStreamID, c)
|
||||
c.streams[s.id] = s
|
||||
c.nextStreamID = c.nextStreamID + 2
|
||||
var s *stream
|
||||
if err := func() error {
|
||||
// In the future this could be replaced with a sync.Map instead of streamLock+map.
|
||||
c.streamLock.Lock()
|
||||
defer c.streamLock.Unlock()
|
||||
|
||||
c.sendLock.Lock()
|
||||
defer c.sendLock.Unlock()
|
||||
c.streamLock.Unlock()
|
||||
// Check if closed since lock acquired to prevent adding
|
||||
// anything after cleanup completes
|
||||
select {
|
||||
case <-c.ctx.Done():
|
||||
return ErrClosed
|
||||
default:
|
||||
}
|
||||
|
||||
s = newStream(c.nextStreamID, c)
|
||||
c.streams[s.id] = s
|
||||
c.nextStreamID = c.nextStreamID + 2
|
||||
|
||||
return nil
|
||||
}(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := c.channel.send(uint32(s.id), messageTypeRequest, flags, b); err != nil {
|
||||
return s, filterCloseErr(err)
|
||||
|
Reference in New Issue
Block a user