mirror of
https://gitea.com/Lydanne/buildx.git
synced 2025-07-09 21:17:09 +08:00
vendor: update buildkit to v0.19.0-rc1
Signed-off-by: Tonis Tiigi <tonistiigi@gmail.com>
This commit is contained in:
2
vendor/github.com/containerd/containerd/api/services/content/v1/content_grpc.pb.go
generated
vendored
2
vendor/github.com/containerd/containerd/api/services/content/v1/content_grpc.pb.go
generated
vendored
@ -1,3 +1,5 @@
|
||||
//go:build !no_grpc
|
||||
|
||||
// Code generated by protoc-gen-go-grpc. DO NOT EDIT.
|
||||
// versions:
|
||||
// - protoc-gen-go-grpc v1.2.0
|
||||
|
311
vendor/github.com/containerd/containerd/api/services/content/v1/content_ttrpc.pb.go
generated
vendored
Normal file
311
vendor/github.com/containerd/containerd/api/services/content/v1/content_ttrpc.pb.go
generated
vendored
Normal file
@ -0,0 +1,311 @@
|
||||
// Code generated by protoc-gen-go-ttrpc. DO NOT EDIT.
|
||||
// source: github.com/containerd/containerd/api/services/content/v1/content.proto
|
||||
package content
|
||||
|
||||
import (
|
||||
context "context"
|
||||
ttrpc "github.com/containerd/ttrpc"
|
||||
emptypb "google.golang.org/protobuf/types/known/emptypb"
|
||||
)
|
||||
|
||||
type TTRPCContentService interface {
|
||||
Info(context.Context, *InfoRequest) (*InfoResponse, error)
|
||||
Update(context.Context, *UpdateRequest) (*UpdateResponse, error)
|
||||
List(context.Context, *ListContentRequest, TTRPCContent_ListServer) error
|
||||
Delete(context.Context, *DeleteContentRequest) (*emptypb.Empty, error)
|
||||
Read(context.Context, *ReadContentRequest, TTRPCContent_ReadServer) error
|
||||
Status(context.Context, *StatusRequest) (*StatusResponse, error)
|
||||
ListStatuses(context.Context, *ListStatusesRequest) (*ListStatusesResponse, error)
|
||||
Write(context.Context, TTRPCContent_WriteServer) error
|
||||
Abort(context.Context, *AbortRequest) (*emptypb.Empty, error)
|
||||
}
|
||||
|
||||
type TTRPCContent_ListServer interface {
|
||||
Send(*ListContentResponse) error
|
||||
ttrpc.StreamServer
|
||||
}
|
||||
|
||||
type ttrpccontentListServer struct {
|
||||
ttrpc.StreamServer
|
||||
}
|
||||
|
||||
func (x *ttrpccontentListServer) Send(m *ListContentResponse) error {
|
||||
return x.StreamServer.SendMsg(m)
|
||||
}
|
||||
|
||||
type TTRPCContent_ReadServer interface {
|
||||
Send(*ReadContentResponse) error
|
||||
ttrpc.StreamServer
|
||||
}
|
||||
|
||||
type ttrpccontentReadServer struct {
|
||||
ttrpc.StreamServer
|
||||
}
|
||||
|
||||
func (x *ttrpccontentReadServer) Send(m *ReadContentResponse) error {
|
||||
return x.StreamServer.SendMsg(m)
|
||||
}
|
||||
|
||||
type TTRPCContent_WriteServer interface {
|
||||
Send(*WriteContentResponse) error
|
||||
Recv() (*WriteContentRequest, error)
|
||||
ttrpc.StreamServer
|
||||
}
|
||||
|
||||
type ttrpccontentWriteServer struct {
|
||||
ttrpc.StreamServer
|
||||
}
|
||||
|
||||
func (x *ttrpccontentWriteServer) Send(m *WriteContentResponse) error {
|
||||
return x.StreamServer.SendMsg(m)
|
||||
}
|
||||
|
||||
func (x *ttrpccontentWriteServer) Recv() (*WriteContentRequest, error) {
|
||||
m := new(WriteContentRequest)
|
||||
if err := x.StreamServer.RecvMsg(m); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func RegisterTTRPCContentService(srv *ttrpc.Server, svc TTRPCContentService) {
|
||||
srv.RegisterService("containerd.services.content.v1.Content", &ttrpc.ServiceDesc{
|
||||
Methods: map[string]ttrpc.Method{
|
||||
"Info": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) {
|
||||
var req InfoRequest
|
||||
if err := unmarshal(&req); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return svc.Info(ctx, &req)
|
||||
},
|
||||
"Update": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) {
|
||||
var req UpdateRequest
|
||||
if err := unmarshal(&req); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return svc.Update(ctx, &req)
|
||||
},
|
||||
"Delete": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) {
|
||||
var req DeleteContentRequest
|
||||
if err := unmarshal(&req); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return svc.Delete(ctx, &req)
|
||||
},
|
||||
"Status": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) {
|
||||
var req StatusRequest
|
||||
if err := unmarshal(&req); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return svc.Status(ctx, &req)
|
||||
},
|
||||
"ListStatuses": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) {
|
||||
var req ListStatusesRequest
|
||||
if err := unmarshal(&req); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return svc.ListStatuses(ctx, &req)
|
||||
},
|
||||
"Abort": func(ctx context.Context, unmarshal func(interface{}) error) (interface{}, error) {
|
||||
var req AbortRequest
|
||||
if err := unmarshal(&req); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return svc.Abort(ctx, &req)
|
||||
},
|
||||
},
|
||||
Streams: map[string]ttrpc.Stream{
|
||||
"List": {
|
||||
Handler: func(ctx context.Context, stream ttrpc.StreamServer) (interface{}, error) {
|
||||
m := new(ListContentRequest)
|
||||
if err := stream.RecvMsg(m); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return nil, svc.List(ctx, m, &ttrpccontentListServer{stream})
|
||||
},
|
||||
StreamingClient: false,
|
||||
StreamingServer: true,
|
||||
},
|
||||
"Read": {
|
||||
Handler: func(ctx context.Context, stream ttrpc.StreamServer) (interface{}, error) {
|
||||
m := new(ReadContentRequest)
|
||||
if err := stream.RecvMsg(m); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return nil, svc.Read(ctx, m, &ttrpccontentReadServer{stream})
|
||||
},
|
||||
StreamingClient: false,
|
||||
StreamingServer: true,
|
||||
},
|
||||
"Write": {
|
||||
Handler: func(ctx context.Context, stream ttrpc.StreamServer) (interface{}, error) {
|
||||
return nil, svc.Write(ctx, &ttrpccontentWriteServer{stream})
|
||||
},
|
||||
StreamingClient: true,
|
||||
StreamingServer: true,
|
||||
},
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
type TTRPCContentClient interface {
|
||||
Info(context.Context, *InfoRequest) (*InfoResponse, error)
|
||||
Update(context.Context, *UpdateRequest) (*UpdateResponse, error)
|
||||
List(context.Context, *ListContentRequest) (TTRPCContent_ListClient, error)
|
||||
Delete(context.Context, *DeleteContentRequest) (*emptypb.Empty, error)
|
||||
Read(context.Context, *ReadContentRequest) (TTRPCContent_ReadClient, error)
|
||||
Status(context.Context, *StatusRequest) (*StatusResponse, error)
|
||||
ListStatuses(context.Context, *ListStatusesRequest) (*ListStatusesResponse, error)
|
||||
Write(context.Context) (TTRPCContent_WriteClient, error)
|
||||
Abort(context.Context, *AbortRequest) (*emptypb.Empty, error)
|
||||
}
|
||||
|
||||
type ttrpccontentClient struct {
|
||||
client *ttrpc.Client
|
||||
}
|
||||
|
||||
func NewTTRPCContentClient(client *ttrpc.Client) TTRPCContentClient {
|
||||
return &ttrpccontentClient{
|
||||
client: client,
|
||||
}
|
||||
}
|
||||
|
||||
func (c *ttrpccontentClient) Info(ctx context.Context, req *InfoRequest) (*InfoResponse, error) {
|
||||
var resp InfoResponse
|
||||
if err := c.client.Call(ctx, "containerd.services.content.v1.Content", "Info", req, &resp); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &resp, nil
|
||||
}
|
||||
|
||||
func (c *ttrpccontentClient) Update(ctx context.Context, req *UpdateRequest) (*UpdateResponse, error) {
|
||||
var resp UpdateResponse
|
||||
if err := c.client.Call(ctx, "containerd.services.content.v1.Content", "Update", req, &resp); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &resp, nil
|
||||
}
|
||||
|
||||
func (c *ttrpccontentClient) List(ctx context.Context, req *ListContentRequest) (TTRPCContent_ListClient, error) {
|
||||
stream, err := c.client.NewStream(ctx, &ttrpc.StreamDesc{
|
||||
StreamingClient: false,
|
||||
StreamingServer: true,
|
||||
}, "containerd.services.content.v1.Content", "List", req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
x := &ttrpccontentListClient{stream}
|
||||
return x, nil
|
||||
}
|
||||
|
||||
type TTRPCContent_ListClient interface {
|
||||
Recv() (*ListContentResponse, error)
|
||||
ttrpc.ClientStream
|
||||
}
|
||||
|
||||
type ttrpccontentListClient struct {
|
||||
ttrpc.ClientStream
|
||||
}
|
||||
|
||||
func (x *ttrpccontentListClient) Recv() (*ListContentResponse, error) {
|
||||
m := new(ListContentResponse)
|
||||
if err := x.ClientStream.RecvMsg(m); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func (c *ttrpccontentClient) Delete(ctx context.Context, req *DeleteContentRequest) (*emptypb.Empty, error) {
|
||||
var resp emptypb.Empty
|
||||
if err := c.client.Call(ctx, "containerd.services.content.v1.Content", "Delete", req, &resp); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &resp, nil
|
||||
}
|
||||
|
||||
func (c *ttrpccontentClient) Read(ctx context.Context, req *ReadContentRequest) (TTRPCContent_ReadClient, error) {
|
||||
stream, err := c.client.NewStream(ctx, &ttrpc.StreamDesc{
|
||||
StreamingClient: false,
|
||||
StreamingServer: true,
|
||||
}, "containerd.services.content.v1.Content", "Read", req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
x := &ttrpccontentReadClient{stream}
|
||||
return x, nil
|
||||
}
|
||||
|
||||
type TTRPCContent_ReadClient interface {
|
||||
Recv() (*ReadContentResponse, error)
|
||||
ttrpc.ClientStream
|
||||
}
|
||||
|
||||
type ttrpccontentReadClient struct {
|
||||
ttrpc.ClientStream
|
||||
}
|
||||
|
||||
func (x *ttrpccontentReadClient) Recv() (*ReadContentResponse, error) {
|
||||
m := new(ReadContentResponse)
|
||||
if err := x.ClientStream.RecvMsg(m); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func (c *ttrpccontentClient) Status(ctx context.Context, req *StatusRequest) (*StatusResponse, error) {
|
||||
var resp StatusResponse
|
||||
if err := c.client.Call(ctx, "containerd.services.content.v1.Content", "Status", req, &resp); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &resp, nil
|
||||
}
|
||||
|
||||
func (c *ttrpccontentClient) ListStatuses(ctx context.Context, req *ListStatusesRequest) (*ListStatusesResponse, error) {
|
||||
var resp ListStatusesResponse
|
||||
if err := c.client.Call(ctx, "containerd.services.content.v1.Content", "ListStatuses", req, &resp); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &resp, nil
|
||||
}
|
||||
|
||||
func (c *ttrpccontentClient) Write(ctx context.Context) (TTRPCContent_WriteClient, error) {
|
||||
stream, err := c.client.NewStream(ctx, &ttrpc.StreamDesc{
|
||||
StreamingClient: true,
|
||||
StreamingServer: true,
|
||||
}, "containerd.services.content.v1.Content", "Write", nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
x := &ttrpccontentWriteClient{stream}
|
||||
return x, nil
|
||||
}
|
||||
|
||||
type TTRPCContent_WriteClient interface {
|
||||
Send(*WriteContentRequest) error
|
||||
Recv() (*WriteContentResponse, error)
|
||||
ttrpc.ClientStream
|
||||
}
|
||||
|
||||
type ttrpccontentWriteClient struct {
|
||||
ttrpc.ClientStream
|
||||
}
|
||||
|
||||
func (x *ttrpccontentWriteClient) Send(m *WriteContentRequest) error {
|
||||
return x.ClientStream.SendMsg(m)
|
||||
}
|
||||
|
||||
func (x *ttrpccontentWriteClient) Recv() (*WriteContentResponse, error) {
|
||||
m := new(WriteContentResponse)
|
||||
if err := x.ClientStream.RecvMsg(m); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return m, nil
|
||||
}
|
||||
|
||||
func (c *ttrpccontentClient) Abort(ctx context.Context, req *AbortRequest) (*emptypb.Empty, error) {
|
||||
var resp emptypb.Empty
|
||||
if err := c.client.Call(ctx, "containerd.services.content.v1.Content", "Abort", req, &resp); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &resp, nil
|
||||
}
|
72
vendor/github.com/containerd/containerd/errdefs/errors.go
generated
vendored
72
vendor/github.com/containerd/containerd/errdefs/errors.go
generated
vendored
@ -1,72 +0,0 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package errdefs defines the common errors used throughout containerd
|
||||
// packages.
|
||||
//
|
||||
// Use with fmt.Errorf to add context to an error.
|
||||
//
|
||||
// To detect an error class, use the IsXXX functions to tell whether an error
|
||||
// is of a certain type.
|
||||
package errdefs
|
||||
|
||||
import (
|
||||
"github.com/containerd/errdefs"
|
||||
)
|
||||
|
||||
// Definitions of common error types used throughout containerd. All containerd
|
||||
// errors returned by most packages will map into one of these errors classes.
|
||||
// Packages should return errors of these types when they want to instruct a
|
||||
// client to take a particular action.
|
||||
//
|
||||
// These errors map closely to grpc errors.
|
||||
var (
|
||||
ErrUnknown = errdefs.ErrUnknown
|
||||
ErrInvalidArgument = errdefs.ErrInvalidArgument
|
||||
ErrNotFound = errdefs.ErrNotFound
|
||||
ErrAlreadyExists = errdefs.ErrAlreadyExists
|
||||
ErrPermissionDenied = errdefs.ErrPermissionDenied
|
||||
ErrResourceExhausted = errdefs.ErrResourceExhausted
|
||||
ErrFailedPrecondition = errdefs.ErrFailedPrecondition
|
||||
ErrConflict = errdefs.ErrConflict
|
||||
ErrNotModified = errdefs.ErrNotModified
|
||||
ErrAborted = errdefs.ErrAborted
|
||||
ErrOutOfRange = errdefs.ErrOutOfRange
|
||||
ErrNotImplemented = errdefs.ErrNotImplemented
|
||||
ErrInternal = errdefs.ErrInternal
|
||||
ErrUnavailable = errdefs.ErrUnavailable
|
||||
ErrDataLoss = errdefs.ErrDataLoss
|
||||
ErrUnauthenticated = errdefs.ErrUnauthenticated
|
||||
|
||||
IsCanceled = errdefs.IsCanceled
|
||||
IsUnknown = errdefs.IsUnknown
|
||||
IsInvalidArgument = errdefs.IsInvalidArgument
|
||||
IsDeadlineExceeded = errdefs.IsDeadlineExceeded
|
||||
IsNotFound = errdefs.IsNotFound
|
||||
IsAlreadyExists = errdefs.IsAlreadyExists
|
||||
IsPermissionDenied = errdefs.IsPermissionDenied
|
||||
IsResourceExhausted = errdefs.IsResourceExhausted
|
||||
IsFailedPrecondition = errdefs.IsFailedPrecondition
|
||||
IsConflict = errdefs.IsConflict
|
||||
IsNotModified = errdefs.IsNotModified
|
||||
IsAborted = errdefs.IsAborted
|
||||
IsOutOfRange = errdefs.IsOutOfRange
|
||||
IsNotImplemented = errdefs.IsNotImplemented
|
||||
IsInternal = errdefs.IsInternal
|
||||
IsUnavailable = errdefs.IsUnavailable
|
||||
IsDataLoss = errdefs.IsDataLoss
|
||||
IsUnauthorized = errdefs.IsUnauthorized
|
||||
)
|
147
vendor/github.com/containerd/containerd/errdefs/grpc.go
generated
vendored
147
vendor/github.com/containerd/containerd/errdefs/grpc.go
generated
vendored
@ -1,147 +0,0 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package errdefs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
|
||||
// ToGRPC will attempt to map the backend containerd error into a grpc error,
|
||||
// using the original error message as a description.
|
||||
//
|
||||
// Further information may be extracted from certain errors depending on their
|
||||
// type.
|
||||
//
|
||||
// If the error is unmapped, the original error will be returned to be handled
|
||||
// by the regular grpc error handling stack.
|
||||
func ToGRPC(err error) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if isGRPCError(err) {
|
||||
// error has already been mapped to grpc
|
||||
return err
|
||||
}
|
||||
|
||||
switch {
|
||||
case IsInvalidArgument(err):
|
||||
return status.Errorf(codes.InvalidArgument, err.Error())
|
||||
case IsNotFound(err):
|
||||
return status.Errorf(codes.NotFound, err.Error())
|
||||
case IsAlreadyExists(err):
|
||||
return status.Errorf(codes.AlreadyExists, err.Error())
|
||||
case IsFailedPrecondition(err):
|
||||
return status.Errorf(codes.FailedPrecondition, err.Error())
|
||||
case IsUnavailable(err):
|
||||
return status.Errorf(codes.Unavailable, err.Error())
|
||||
case IsNotImplemented(err):
|
||||
return status.Errorf(codes.Unimplemented, err.Error())
|
||||
case IsCanceled(err):
|
||||
return status.Errorf(codes.Canceled, err.Error())
|
||||
case IsDeadlineExceeded(err):
|
||||
return status.Errorf(codes.DeadlineExceeded, err.Error())
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// ToGRPCf maps the error to grpc error codes, assembling the formatting string
|
||||
// and combining it with the target error string.
|
||||
//
|
||||
// This is equivalent to errdefs.ToGRPC(fmt.Errorf("%s: %w", fmt.Sprintf(format, args...), err))
|
||||
func ToGRPCf(err error, format string, args ...interface{}) error {
|
||||
return ToGRPC(fmt.Errorf("%s: %w", fmt.Sprintf(format, args...), err))
|
||||
}
|
||||
|
||||
// FromGRPC returns the underlying error from a grpc service based on the grpc error code
|
||||
func FromGRPC(err error) error {
|
||||
if err == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
var cls error // divide these into error classes, becomes the cause
|
||||
|
||||
switch code(err) {
|
||||
case codes.InvalidArgument:
|
||||
cls = ErrInvalidArgument
|
||||
case codes.AlreadyExists:
|
||||
cls = ErrAlreadyExists
|
||||
case codes.NotFound:
|
||||
cls = ErrNotFound
|
||||
case codes.Unavailable:
|
||||
cls = ErrUnavailable
|
||||
case codes.FailedPrecondition:
|
||||
cls = ErrFailedPrecondition
|
||||
case codes.Unimplemented:
|
||||
cls = ErrNotImplemented
|
||||
case codes.Canceled:
|
||||
cls = context.Canceled
|
||||
case codes.DeadlineExceeded:
|
||||
cls = context.DeadlineExceeded
|
||||
default:
|
||||
cls = ErrUnknown
|
||||
}
|
||||
|
||||
msg := rebaseMessage(cls, err)
|
||||
if msg != "" {
|
||||
err = fmt.Errorf("%s: %w", msg, cls)
|
||||
} else {
|
||||
err = cls
|
||||
}
|
||||
|
||||
return err
|
||||
}
|
||||
|
||||
// rebaseMessage removes the repeats for an error at the end of an error
|
||||
// string. This will happen when taking an error over grpc then remapping it.
|
||||
//
|
||||
// Effectively, we just remove the string of cls from the end of err if it
|
||||
// appears there.
|
||||
func rebaseMessage(cls error, err error) string {
|
||||
desc := errDesc(err)
|
||||
clss := cls.Error()
|
||||
if desc == clss {
|
||||
return ""
|
||||
}
|
||||
|
||||
return strings.TrimSuffix(desc, ": "+clss)
|
||||
}
|
||||
|
||||
func isGRPCError(err error) bool {
|
||||
_, ok := status.FromError(err)
|
||||
return ok
|
||||
}
|
||||
|
||||
func code(err error) codes.Code {
|
||||
if s, ok := status.FromError(err); ok {
|
||||
return s.Code()
|
||||
}
|
||||
return codes.Unknown
|
||||
}
|
||||
|
||||
func errDesc(err error) string {
|
||||
if s, ok := status.FromError(err); ok {
|
||||
return s.Message()
|
||||
}
|
||||
return err.Error()
|
||||
}
|
43
vendor/github.com/containerd/containerd/pkg/seed/seed.go
generated
vendored
43
vendor/github.com/containerd/containerd/pkg/seed/seed.go
generated
vendored
@ -1,43 +0,0 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package seed provides an initializer for the global [math/rand] seed.
|
||||
//
|
||||
// Deprecated: Do not rely on the global seed.
|
||||
package seed
|
||||
|
||||
import (
|
||||
"math/rand"
|
||||
"time"
|
||||
)
|
||||
|
||||
// WithTimeAndRand seeds the global math rand generator with nanoseconds
|
||||
// XOR'ed with a crypto component if available for uniqueness.
|
||||
//
|
||||
// Deprecated: Do not rely on the global seed.
|
||||
func WithTimeAndRand() {
|
||||
var (
|
||||
b [4]byte
|
||||
u int64
|
||||
)
|
||||
|
||||
tryReadRandom(b[:])
|
||||
|
||||
// Set higher 32 bits, bottom 32 will be set with nanos
|
||||
u |= (int64(b[0]) << 56) | (int64(b[1]) << 48) | (int64(b[2]) << 40) | (int64(b[3]) << 32)
|
||||
|
||||
rand.Seed(u ^ time.Now().UnixNano())
|
||||
}
|
47
vendor/github.com/containerd/containerd/protobuf/any.go
generated
vendored
47
vendor/github.com/containerd/containerd/protobuf/any.go
generated
vendored
@ -1,47 +0,0 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package protobuf
|
||||
|
||||
import (
|
||||
"github.com/containerd/typeurl/v2"
|
||||
"google.golang.org/protobuf/types/known/anypb"
|
||||
)
|
||||
|
||||
// FromAny converts typeurl.Any to github.com/containerd/containerd/protobuf/types.Any.
|
||||
func FromAny(from typeurl.Any) *anypb.Any {
|
||||
if from == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if pbany, ok := from.(*anypb.Any); ok {
|
||||
return pbany
|
||||
}
|
||||
|
||||
return &anypb.Any{
|
||||
TypeUrl: from.GetTypeUrl(),
|
||||
Value: from.GetValue(),
|
||||
}
|
||||
}
|
||||
|
||||
// FromAny converts an arbitrary interface to github.com/containerd/containerd/protobuf/types.Any.
|
||||
func MarshalAnyToProto(from interface{}) (*anypb.Any, error) {
|
||||
any, err := typeurl.MarshalAny(from)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return FromAny(any), nil
|
||||
}
|
@ -19,7 +19,7 @@ package content
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/containerd/containerd/filters"
|
||||
"github.com/containerd/containerd/v2/pkg/filters"
|
||||
)
|
||||
|
||||
// AdaptInfo returns `filters.Adaptor` that handles `content.Info`.
|
@ -165,6 +165,11 @@ type Writer interface {
|
||||
Truncate(size int64) error
|
||||
}
|
||||
|
||||
type Syncer interface {
|
||||
// Sync flushes the in-flight writes to the disk (when applicable)
|
||||
Sync() error
|
||||
}
|
||||
|
||||
// Opt is used to alter the mutable properties of content
|
||||
type Opt func(*Info) error
|
||||
|
@ -17,6 +17,7 @@
|
||||
package content
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
@ -24,12 +25,11 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/containerd/containerd/v2/internal/randutil"
|
||||
"github.com/containerd/errdefs"
|
||||
"github.com/containerd/log"
|
||||
"github.com/opencontainers/go-digest"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
"github.com/containerd/containerd/pkg/randutil"
|
||||
)
|
||||
|
||||
var ErrReset = errors.New("writer has been reset")
|
||||
@ -53,6 +53,31 @@ func NewReader(ra ReaderAt) io.Reader {
|
||||
return io.NewSectionReader(ra, 0, ra.Size())
|
||||
}
|
||||
|
||||
type nopCloserBytesReader struct {
|
||||
*bytes.Reader
|
||||
}
|
||||
|
||||
func (*nopCloserBytesReader) Close() error { return nil }
|
||||
|
||||
type nopCloserSectionReader struct {
|
||||
*io.SectionReader
|
||||
}
|
||||
|
||||
func (*nopCloserSectionReader) Close() error { return nil }
|
||||
|
||||
// BlobReadSeeker returns a read seeker for the blob from the provider.
|
||||
func BlobReadSeeker(ctx context.Context, provider Provider, desc ocispec.Descriptor) (io.ReadSeekCloser, error) {
|
||||
if int64(len(desc.Data)) == desc.Size && digest.FromBytes(desc.Data) == desc.Digest {
|
||||
return &nopCloserBytesReader{bytes.NewReader(desc.Data)}, nil
|
||||
}
|
||||
|
||||
ra, err := provider.ReaderAt(ctx, desc)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &nopCloserSectionReader{io.NewSectionReader(ra, 0, ra.Size())}, nil
|
||||
}
|
||||
|
||||
// ReadBlob retrieves the entire contents of the blob from the provider.
|
||||
//
|
||||
// Avoid using this for large blobs, such as layers.
|
||||
@ -146,32 +171,28 @@ func OpenWriter(ctx context.Context, cs Ingester, opts ...WriterOpt) (Writer, er
|
||||
//
|
||||
// Copy is buffered, so no need to wrap reader in buffered io.
|
||||
func Copy(ctx context.Context, cw Writer, or io.Reader, size int64, expected digest.Digest, opts ...Opt) error {
|
||||
ws, err := cw.Status()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get status: %w", err)
|
||||
}
|
||||
r := or
|
||||
if ws.Offset > 0 {
|
||||
r, err = seekReader(or, ws.Offset, size)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to resume write to %v: %w", ws.Ref, err)
|
||||
}
|
||||
}
|
||||
|
||||
for i := 0; ; i++ {
|
||||
if i >= 1 {
|
||||
log.G(ctx).WithField("digest", expected).Debugf("retrying copy due to reset")
|
||||
}
|
||||
copied, err := copyWithBuffer(cw, r)
|
||||
if errors.Is(err, ErrReset) {
|
||||
ws, err := cw.Status()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get status: %w", err)
|
||||
}
|
||||
|
||||
ws, err := cw.Status()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get status: %w", err)
|
||||
}
|
||||
// Reset the original reader if
|
||||
// 1. there is an offset, or
|
||||
// 2. this is a retry due to Reset error
|
||||
if ws.Offset > 0 || i > 0 {
|
||||
r, err = seekReader(or, ws.Offset, size)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to resume write to %v: %w", ws.Ref, err)
|
||||
}
|
||||
}
|
||||
|
||||
copied, err := copyWithBuffer(cw, r)
|
||||
if errors.Is(err, ErrReset) {
|
||||
continue
|
||||
}
|
||||
if err != nil {
|
||||
@ -183,14 +204,6 @@ func Copy(ctx context.Context, cw Writer, or io.Reader, size int64, expected dig
|
||||
}
|
||||
if err := cw.Commit(ctx, size, expected, opts...); err != nil {
|
||||
if errors.Is(err, ErrReset) {
|
||||
ws, err := cw.Status()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get status: %w", err)
|
||||
}
|
||||
r, err = seekReader(or, ws.Offset, size)
|
||||
if err != nil {
|
||||
return fmt.Errorf("unable to resume write to %v: %w", ws.Ref, err)
|
||||
}
|
||||
continue
|
||||
}
|
||||
if !errdefs.IsAlreadyExists(err) {
|
@ -27,7 +27,7 @@ type remoteReaderAt struct {
|
||||
ctx context.Context
|
||||
digest digest.Digest
|
||||
size int64
|
||||
client contentapi.ContentClient
|
||||
client contentapi.TTRPCContentClient
|
||||
}
|
||||
|
||||
func (ra *remoteReaderAt) Size() int64 {
|
@ -18,26 +18,51 @@ package proxy
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
contentapi "github.com/containerd/containerd/api/services/content/v1"
|
||||
"github.com/containerd/containerd/content"
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
"github.com/containerd/containerd/protobuf"
|
||||
protobuftypes "github.com/containerd/containerd/protobuf/types"
|
||||
"github.com/containerd/errdefs"
|
||||
"github.com/containerd/errdefs/pkg/errgrpc"
|
||||
"github.com/containerd/ttrpc"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/protobuf/types/known/emptypb"
|
||||
|
||||
"github.com/containerd/containerd/v2/core/content"
|
||||
"github.com/containerd/containerd/v2/pkg/protobuf"
|
||||
protobuftypes "github.com/containerd/containerd/v2/pkg/protobuf/types"
|
||||
)
|
||||
|
||||
type proxyContentStore struct {
|
||||
client contentapi.ContentClient
|
||||
// client is the rpc content client
|
||||
// NOTE: ttrpc is used because it is the smaller interface shared with grpc
|
||||
client contentapi.TTRPCContentClient
|
||||
}
|
||||
|
||||
// NewContentStore returns a new content store which communicates over a GRPC
|
||||
// connection using the containerd content GRPC API.
|
||||
func NewContentStore(client contentapi.ContentClient) content.Store {
|
||||
return &proxyContentStore{
|
||||
client: client,
|
||||
func NewContentStore(client any) content.Store {
|
||||
switch c := client.(type) {
|
||||
case contentapi.ContentClient:
|
||||
return &proxyContentStore{
|
||||
client: convertClient{c},
|
||||
}
|
||||
case grpc.ClientConnInterface:
|
||||
return &proxyContentStore{
|
||||
client: convertClient{contentapi.NewContentClient(c)},
|
||||
}
|
||||
case contentapi.TTRPCContentClient:
|
||||
return &proxyContentStore{
|
||||
client: c,
|
||||
}
|
||||
case *ttrpc.Client:
|
||||
return &proxyContentStore{
|
||||
client: contentapi.NewTTRPCContentClient(c),
|
||||
}
|
||||
default:
|
||||
panic(fmt.Errorf("unsupported content client %T: %w", client, errdefs.ErrNotImplemented))
|
||||
}
|
||||
}
|
||||
|
||||
@ -46,7 +71,7 @@ func (pcs *proxyContentStore) Info(ctx context.Context, dgst digest.Digest) (con
|
||||
Digest: dgst.String(),
|
||||
})
|
||||
if err != nil {
|
||||
return content.Info{}, errdefs.FromGRPC(err)
|
||||
return content.Info{}, errgrpc.ToNative(err)
|
||||
}
|
||||
|
||||
return infoFromGRPC(resp.Info), nil
|
||||
@ -57,14 +82,14 @@ func (pcs *proxyContentStore) Walk(ctx context.Context, fn content.WalkFunc, fil
|
||||
Filters: filters,
|
||||
})
|
||||
if err != nil {
|
||||
return errdefs.FromGRPC(err)
|
||||
return errgrpc.ToNative(err)
|
||||
}
|
||||
|
||||
for {
|
||||
msg, err := session.Recv()
|
||||
if err != nil {
|
||||
if err != io.EOF {
|
||||
return errdefs.FromGRPC(err)
|
||||
return errgrpc.ToNative(err)
|
||||
}
|
||||
|
||||
break
|
||||
@ -84,7 +109,7 @@ func (pcs *proxyContentStore) Delete(ctx context.Context, dgst digest.Digest) er
|
||||
if _, err := pcs.client.Delete(ctx, &contentapi.DeleteContentRequest{
|
||||
Digest: dgst.String(),
|
||||
}); err != nil {
|
||||
return errdefs.FromGRPC(err)
|
||||
return errgrpc.ToNative(err)
|
||||
}
|
||||
|
||||
return nil
|
||||
@ -110,7 +135,7 @@ func (pcs *proxyContentStore) Status(ctx context.Context, ref string) (content.S
|
||||
Ref: ref,
|
||||
})
|
||||
if err != nil {
|
||||
return content.Status{}, errdefs.FromGRPC(err)
|
||||
return content.Status{}, errgrpc.ToNative(err)
|
||||
}
|
||||
|
||||
status := resp.Status
|
||||
@ -132,7 +157,7 @@ func (pcs *proxyContentStore) Update(ctx context.Context, info content.Info, fie
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return content.Info{}, errdefs.FromGRPC(err)
|
||||
return content.Info{}, errgrpc.ToNative(err)
|
||||
}
|
||||
return infoFromGRPC(resp.Info), nil
|
||||
}
|
||||
@ -142,7 +167,7 @@ func (pcs *proxyContentStore) ListStatuses(ctx context.Context, filters ...strin
|
||||
Filters: filters,
|
||||
})
|
||||
if err != nil {
|
||||
return nil, errdefs.FromGRPC(err)
|
||||
return nil, errgrpc.ToNative(err)
|
||||
}
|
||||
|
||||
var statuses []content.Status
|
||||
@ -170,7 +195,7 @@ func (pcs *proxyContentStore) Writer(ctx context.Context, opts ...content.Writer
|
||||
}
|
||||
wrclient, offset, err := pcs.negotiate(ctx, wOpts.Ref, wOpts.Desc.Size, wOpts.Desc.Digest)
|
||||
if err != nil {
|
||||
return nil, errdefs.FromGRPC(err)
|
||||
return nil, errgrpc.ToNative(err)
|
||||
}
|
||||
|
||||
return &remoteWriter{
|
||||
@ -185,13 +210,13 @@ func (pcs *proxyContentStore) Abort(ctx context.Context, ref string) error {
|
||||
if _, err := pcs.client.Abort(ctx, &contentapi.AbortRequest{
|
||||
Ref: ref,
|
||||
}); err != nil {
|
||||
return errdefs.FromGRPC(err)
|
||||
return errgrpc.ToNative(err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (pcs *proxyContentStore) negotiate(ctx context.Context, ref string, size int64, expected digest.Digest) (contentapi.Content_WriteClient, int64, error) {
|
||||
func (pcs *proxyContentStore) negotiate(ctx context.Context, ref string, size int64, expected digest.Digest) (contentapi.TTRPCContent_WriteClient, int64, error) {
|
||||
wrclient, err := pcs.client.Write(ctx)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
@ -214,6 +239,70 @@ func (pcs *proxyContentStore) negotiate(ctx context.Context, ref string, size in
|
||||
return wrclient, resp.Offset, nil
|
||||
}
|
||||
|
||||
type convertClient struct {
|
||||
contentapi.ContentClient
|
||||
}
|
||||
|
||||
func (c convertClient) Info(ctx context.Context, req *contentapi.InfoRequest) (*contentapi.InfoResponse, error) {
|
||||
return c.ContentClient.Info(ctx, req)
|
||||
}
|
||||
|
||||
func (c convertClient) Update(ctx context.Context, req *contentapi.UpdateRequest) (*contentapi.UpdateResponse, error) {
|
||||
return c.ContentClient.Update(ctx, req)
|
||||
}
|
||||
|
||||
type convertListClient struct {
|
||||
contentapi.Content_ListClient
|
||||
}
|
||||
|
||||
func (c convertClient) List(ctx context.Context, req *contentapi.ListContentRequest) (contentapi.TTRPCContent_ListClient, error) {
|
||||
lc, err := c.ContentClient.List(ctx, req)
|
||||
if lc == nil {
|
||||
return nil, err
|
||||
}
|
||||
return convertListClient{lc}, err
|
||||
}
|
||||
|
||||
func (c convertClient) Delete(ctx context.Context, req *contentapi.DeleteContentRequest) (*emptypb.Empty, error) {
|
||||
return c.ContentClient.Delete(ctx, req)
|
||||
}
|
||||
|
||||
type convertReadClient struct {
|
||||
contentapi.Content_ReadClient
|
||||
}
|
||||
|
||||
func (c convertClient) Read(ctx context.Context, req *contentapi.ReadContentRequest) (contentapi.TTRPCContent_ReadClient, error) {
|
||||
rc, err := c.ContentClient.Read(ctx, req)
|
||||
if rc == nil {
|
||||
return nil, err
|
||||
}
|
||||
return convertReadClient{rc}, err
|
||||
}
|
||||
|
||||
func (c convertClient) Status(ctx context.Context, req *contentapi.StatusRequest) (*contentapi.StatusResponse, error) {
|
||||
return c.ContentClient.Status(ctx, req)
|
||||
}
|
||||
|
||||
func (c convertClient) ListStatuses(ctx context.Context, req *contentapi.ListStatusesRequest) (*contentapi.ListStatusesResponse, error) {
|
||||
return c.ContentClient.ListStatuses(ctx, req)
|
||||
}
|
||||
|
||||
type convertWriteClient struct {
|
||||
contentapi.Content_WriteClient
|
||||
}
|
||||
|
||||
func (c convertClient) Write(ctx context.Context) (contentapi.TTRPCContent_WriteClient, error) {
|
||||
wc, err := c.ContentClient.Write(ctx)
|
||||
if wc == nil {
|
||||
return nil, err
|
||||
}
|
||||
return convertWriteClient{wc}, err
|
||||
}
|
||||
|
||||
func (c convertClient) Abort(ctx context.Context, req *contentapi.AbortRequest) (*emptypb.Empty, error) {
|
||||
return c.ContentClient.Abort(ctx, req)
|
||||
}
|
||||
|
||||
func infoToGRPC(info *content.Info) *contentapi.Info {
|
||||
return &contentapi.Info{
|
||||
Digest: info.Digest.String(),
|
@ -22,15 +22,16 @@ import (
|
||||
"io"
|
||||
|
||||
contentapi "github.com/containerd/containerd/api/services/content/v1"
|
||||
"github.com/containerd/containerd/content"
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
"github.com/containerd/containerd/protobuf"
|
||||
"github.com/containerd/errdefs/pkg/errgrpc"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
|
||||
"github.com/containerd/containerd/v2/core/content"
|
||||
"github.com/containerd/containerd/v2/pkg/protobuf"
|
||||
)
|
||||
|
||||
type remoteWriter struct {
|
||||
ref string
|
||||
client contentapi.Content_WriteClient
|
||||
client contentapi.TTRPCContent_WriteClient
|
||||
offset int64
|
||||
digest digest.Digest
|
||||
}
|
||||
@ -58,7 +59,7 @@ func (rw *remoteWriter) Status() (content.Status, error) {
|
||||
Action: contentapi.WriteAction_STAT,
|
||||
})
|
||||
if err != nil {
|
||||
return content.Status{}, fmt.Errorf("error getting writer status: %w", errdefs.FromGRPC(err))
|
||||
return content.Status{}, fmt.Errorf("error getting writer status: %w", errgrpc.ToNative(err))
|
||||
}
|
||||
|
||||
return content.Status{
|
||||
@ -83,7 +84,7 @@ func (rw *remoteWriter) Write(p []byte) (n int, err error) {
|
||||
Data: p,
|
||||
})
|
||||
if err != nil {
|
||||
return 0, fmt.Errorf("failed to send write: %w", errdefs.FromGRPC(err))
|
||||
return 0, fmt.Errorf("failed to send write: %w", errgrpc.ToNative(err))
|
||||
}
|
||||
|
||||
n = int(resp.Offset - offset)
|
||||
@ -120,7 +121,7 @@ func (rw *remoteWriter) Commit(ctx context.Context, size int64, expected digest.
|
||||
Labels: base.Labels,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("commit failed: %w", errdefs.FromGRPC(err))
|
||||
return fmt.Errorf("commit failed: %w", errgrpc.ToNative(err))
|
||||
}
|
||||
|
||||
if size != 0 && resp.Offset != size {
|
@ -26,16 +26,15 @@ import (
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/containerd/containerd/v2/core/content"
|
||||
"github.com/containerd/containerd/v2/core/images"
|
||||
"github.com/containerd/containerd/v2/pkg/labels"
|
||||
"github.com/containerd/errdefs"
|
||||
"github.com/containerd/log"
|
||||
"github.com/containerd/platforms"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
ocispecs "github.com/opencontainers/image-spec/specs-go"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
|
||||
"github.com/containerd/containerd/content"
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
"github.com/containerd/containerd/images"
|
||||
"github.com/containerd/containerd/labels"
|
||||
)
|
||||
|
||||
type exportOptions struct {
|
||||
@ -213,7 +212,7 @@ func copySourceLabels(ctx context.Context, infoProvider content.InfoProvider, de
|
||||
}
|
||||
|
||||
// Export implements Exporter.
|
||||
func Export(ctx context.Context, store content.Provider, writer io.Writer, opts ...ExportOpt) error {
|
||||
func Export(ctx context.Context, store content.InfoReaderProvider, writer io.Writer, opts ...ExportOpt) error {
|
||||
var eo exportOptions
|
||||
for _, opt := range opts {
|
||||
if err := opt(ctx, &eo); err != nil {
|
||||
@ -226,17 +225,13 @@ func Export(ctx context.Context, store content.Provider, writer io.Writer, opts
|
||||
}
|
||||
|
||||
manifests := make([]ocispec.Descriptor, 0, len(eo.manifests))
|
||||
if infoProvider, ok := store.(content.InfoProvider); ok {
|
||||
for _, desc := range eo.manifests {
|
||||
d, err := copySourceLabels(ctx, infoProvider, desc)
|
||||
if err != nil {
|
||||
log.G(ctx).WithError(err).WithField("desc", desc).Warn("failed to copy distribution.source labels")
|
||||
continue
|
||||
}
|
||||
manifests = append(manifests, d)
|
||||
for _, desc := range eo.manifests {
|
||||
d, err := copySourceLabels(ctx, store, desc)
|
||||
if err != nil {
|
||||
log.G(ctx).WithError(err).WithField("desc", desc).Warn("failed to copy distribution.source labels")
|
||||
continue
|
||||
}
|
||||
} else {
|
||||
manifests = append(manifests, eo.manifests...)
|
||||
manifests = append(manifests, d)
|
||||
}
|
||||
|
||||
algorithms := map[string]struct{}{}
|
||||
@ -399,10 +394,9 @@ func blobRecord(cs content.Provider, desc ocispec.Descriptor, opts *blobRecordOp
|
||||
if opts != nil && opts.blobFilter != nil && !opts.blobFilter(desc) {
|
||||
return tarRecord{}
|
||||
}
|
||||
path := path.Join("blobs", desc.Digest.Algorithm().String(), desc.Digest.Encoded())
|
||||
return tarRecord{
|
||||
Header: &tar.Header{
|
||||
Name: path,
|
||||
Name: path.Join(ocispec.ImageBlobsDir, desc.Digest.Algorithm().String(), desc.Digest.Encoded()),
|
||||
Mode: 0444,
|
||||
Size: desc.Size,
|
||||
Typeflag: tar.TypeReg,
|
||||
@ -483,7 +477,7 @@ func ociIndexRecord(manifests []ocispec.Descriptor) tarRecord {
|
||||
|
||||
return tarRecord{
|
||||
Header: &tar.Header{
|
||||
Name: "index.json",
|
||||
Name: ocispec.ImageIndexFile,
|
||||
Mode: 0644,
|
||||
Size: int64(len(b)),
|
||||
Typeflag: tar.TypeReg,
|
||||
@ -518,18 +512,14 @@ func manifestsRecord(ctx context.Context, store content.Provider, manifests map[
|
||||
if err := json.Unmarshal(p, &manifest); err != nil {
|
||||
return tarRecord{}, err
|
||||
}
|
||||
if err := manifest.Config.Digest.Validate(); err != nil {
|
||||
return tarRecord{}, fmt.Errorf("invalid manifest %q: %w", m.manifest.Digest, err)
|
||||
}
|
||||
|
||||
dgst := manifest.Config.Digest
|
||||
if err := dgst.Validate(); err != nil {
|
||||
return tarRecord{}, err
|
||||
}
|
||||
mfsts[i].Config = path.Join("blobs", dgst.Algorithm().String(), dgst.Encoded())
|
||||
mfsts[i].Config = path.Join(ocispec.ImageBlobsDir, dgst.Algorithm().String(), dgst.Encoded())
|
||||
for _, l := range manifest.Layers {
|
||||
path := path.Join("blobs", l.Digest.Algorithm().String(), l.Digest.Encoded())
|
||||
mfsts[i].Layers = append(mfsts[i].Layers, path)
|
||||
mfsts[i].Layers = append(mfsts[i].Layers, path.Join(ocispec.ImageBlobsDir, l.Digest.Algorithm().String(), l.Digest.Encoded()))
|
||||
}
|
||||
|
||||
for _, name := range m.names {
|
@ -27,11 +27,11 @@ import (
|
||||
"io"
|
||||
"path"
|
||||
|
||||
"github.com/containerd/containerd/archive/compression"
|
||||
"github.com/containerd/containerd/content"
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
"github.com/containerd/containerd/images"
|
||||
"github.com/containerd/containerd/labels"
|
||||
"github.com/containerd/containerd/v2/core/content"
|
||||
"github.com/containerd/containerd/v2/core/images"
|
||||
"github.com/containerd/containerd/v2/pkg/archive/compression"
|
||||
"github.com/containerd/containerd/v2/pkg/labels"
|
||||
"github.com/containerd/errdefs"
|
||||
"github.com/containerd/log"
|
||||
"github.com/containerd/platforms"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
@ -133,7 +133,7 @@ func ImportIndex(ctx context.Context, store content.Store, reader io.Reader, opt
|
||||
return ocispec.Descriptor{}, fmt.Errorf("unsupported OCI version %s", ociLayout.Version)
|
||||
}
|
||||
|
||||
idx, ok := blobs["index.json"]
|
||||
idx, ok := blobs[ocispec.ImageIndexFile]
|
||||
if !ok {
|
||||
return ocispec.Descriptor{}, fmt.Errorf("missing index.json in OCI layout %s", ocispec.ImageLayoutVersion)
|
||||
}
|
||||
@ -256,8 +256,8 @@ func onUntarBlob(ctx context.Context, r io.Reader, store content.Ingester, size
|
||||
|
||||
func resolveLayers(ctx context.Context, store content.Store, layerFiles []string, blobs map[string]ocispec.Descriptor, compress bool) ([]ocispec.Descriptor, error) {
|
||||
layers := make([]ocispec.Descriptor, len(layerFiles))
|
||||
filters := make([]string, len(layerFiles))
|
||||
descs := map[digest.Digest]*ocispec.Descriptor{}
|
||||
filters := []string{}
|
||||
for i, f := range layerFiles {
|
||||
desc, ok := blobs[f]
|
||||
if !ok {
|
||||
@ -265,7 +265,7 @@ func resolveLayers(ctx context.Context, store content.Store, layerFiles []string
|
||||
}
|
||||
layers[i] = desc
|
||||
descs[desc.Digest] = &layers[i]
|
||||
filters = append(filters, fmt.Sprintf("labels.\"%s\"==%s", labels.LabelUncompressed, desc.Digest.String()))
|
||||
filters[i] = fmt.Sprintf("labels.\"%s\"==%s", labels.LabelUncompressed, desc.Digest.String())
|
||||
}
|
||||
|
||||
err := store.Walk(ctx, func(info content.Info) error {
|
@ -20,7 +20,7 @@ import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/containerd/containerd/reference"
|
||||
"github.com/containerd/containerd/v2/pkg/reference"
|
||||
distref "github.com/distribution/reference"
|
||||
"github.com/opencontainers/go-digest"
|
||||
)
|
||||
@ -72,7 +72,6 @@ func isImagePrefix(s, prefix string) bool {
|
||||
}
|
||||
|
||||
func normalizeReference(ref string) (string, error) {
|
||||
// TODO: Replace this function to not depend on reference package
|
||||
normalized, err := distref.ParseDockerRef(ref)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("normalize image ref %q: %w", ref, err)
|
@ -20,12 +20,13 @@ import (
|
||||
"context"
|
||||
"io"
|
||||
|
||||
"github.com/containerd/containerd/archive/compression"
|
||||
"github.com/containerd/containerd/content"
|
||||
"github.com/containerd/containerd/labels"
|
||||
"github.com/opencontainers/go-digest"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/containerd/containerd/v2/core/content"
|
||||
"github.com/containerd/containerd/v2/pkg/archive/compression"
|
||||
"github.com/containerd/containerd/v2/pkg/labels"
|
||||
"github.com/containerd/log"
|
||||
)
|
||||
|
||||
// GetDiffID gets the diff ID of the layer blob descriptor.
|
||||
@ -75,7 +76,7 @@ func GetDiffID(ctx context.Context, cs content.Store, desc ocispec.Descriptor) (
|
||||
}
|
||||
info.Labels[labels.LabelUncompressed] = digest.String()
|
||||
if _, err := cs.Update(ctx, info, "labels"); err != nil {
|
||||
logrus.WithError(err).Warnf("failed to set %s label for %s", labels.LabelUncompressed, desc.Digest)
|
||||
log.G(ctx).WithError(err).Warnf("failed to set %s label for %s", labels.LabelUncompressed, desc.Digest)
|
||||
}
|
||||
return digest, nil
|
||||
}
|
@ -22,13 +22,12 @@ import (
|
||||
"fmt"
|
||||
"sort"
|
||||
|
||||
"github.com/containerd/containerd/v2/core/content"
|
||||
"github.com/containerd/errdefs"
|
||||
"github.com/containerd/platforms"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"golang.org/x/sync/errgroup"
|
||||
"golang.org/x/sync/semaphore"
|
||||
|
||||
"github.com/containerd/containerd/content"
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
)
|
||||
|
||||
var (
|
||||
@ -295,8 +294,8 @@ func LimitManifests(f HandlerFunc, m platforms.MatchComparer, n int) HandlerFunc
|
||||
return children, err
|
||||
}
|
||||
|
||||
switch desc.MediaType {
|
||||
case ocispec.MediaTypeImageIndex, MediaTypeDockerSchema2ManifestList:
|
||||
// only limit manifests from an index
|
||||
if IsIndexType(desc.MediaType) {
|
||||
sort.SliceStable(children, func(i, j int) bool {
|
||||
if children[i].Platform == nil {
|
||||
return false
|
||||
@ -315,8 +314,6 @@ func LimitManifests(f HandlerFunc, m platforms.MatchComparer, n int) HandlerFunc
|
||||
children = children[:n]
|
||||
}
|
||||
}
|
||||
default:
|
||||
// only limit manifests from an index
|
||||
}
|
||||
return children, nil
|
||||
}
|
@ -23,13 +23,12 @@ import (
|
||||
"sort"
|
||||
"time"
|
||||
|
||||
"github.com/containerd/containerd/v2/core/content"
|
||||
"github.com/containerd/errdefs"
|
||||
"github.com/containerd/log"
|
||||
"github.com/containerd/platforms"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
|
||||
"github.com/containerd/containerd/content"
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
)
|
||||
|
||||
// Image provides the model for how containerd views container images.
|
||||
@ -59,6 +58,7 @@ type Image struct {
|
||||
// DeleteOptions provide options on image delete
|
||||
type DeleteOptions struct {
|
||||
Synchronous bool
|
||||
Target *ocispec.Descriptor
|
||||
}
|
||||
|
||||
// DeleteOpt allows configuring a delete operation
|
||||
@ -73,6 +73,16 @@ func SynchronousDelete() DeleteOpt {
|
||||
}
|
||||
}
|
||||
|
||||
// DeleteTarget is used to specify the target value an image is expected
|
||||
// to have when deleting. If the image has a different target, then
|
||||
// NotFound is returned.
|
||||
func DeleteTarget(target *ocispec.Descriptor) DeleteOpt {
|
||||
return func(ctx context.Context, o *DeleteOptions) error {
|
||||
o.Target = target
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// Store and interact with images
|
||||
type Store interface {
|
||||
Get(ctx context.Context, name string) (Image, error)
|
||||
@ -148,8 +158,7 @@ func Manifest(ctx context.Context, provider content.Provider, image ocispec.Desc
|
||||
)
|
||||
|
||||
if err := Walk(ctx, HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
|
||||
switch desc.MediaType {
|
||||
case MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest:
|
||||
if IsManifestType(desc.MediaType) {
|
||||
p, err := content.ReadBlob(ctx, provider, desc)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -170,17 +179,11 @@ func Manifest(ctx context.Context, provider content.Provider, image ocispec.Desc
|
||||
}
|
||||
|
||||
if desc.Platform == nil {
|
||||
p, err := content.ReadBlob(ctx, provider, manifest.Config)
|
||||
imagePlatform, err := ConfigPlatform(ctx, provider, manifest.Config)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var image ocispec.Image
|
||||
if err := json.Unmarshal(p, &image); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if !platform.Match(platforms.Normalize(ocispec.Platform{OS: image.OS, Architecture: image.Architecture})) {
|
||||
if !platform.Match(imagePlatform) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
@ -193,7 +196,7 @@ func Manifest(ctx context.Context, provider content.Provider, image ocispec.Desc
|
||||
})
|
||||
|
||||
return nil, nil
|
||||
case MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageIndex:
|
||||
} else if IsIndexType(desc.MediaType) {
|
||||
p, err := content.ReadBlob(ctx, provider, desc)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -261,7 +264,7 @@ func Config(ctx context.Context, provider content.Provider, image ocispec.Descri
|
||||
if err != nil {
|
||||
return ocispec.Descriptor{}, err
|
||||
}
|
||||
return manifest.Config, err
|
||||
return manifest.Config, nil
|
||||
}
|
||||
|
||||
// Platforms returns one or more platforms supported by the image.
|
||||
@ -276,20 +279,12 @@ func Platforms(ctx context.Context, provider content.Provider, image ocispec.Des
|
||||
return nil, ErrSkipDesc
|
||||
}
|
||||
|
||||
switch desc.MediaType {
|
||||
case MediaTypeDockerSchema2Config, ocispec.MediaTypeImageConfig:
|
||||
p, err := content.ReadBlob(ctx, provider, desc)
|
||||
if IsConfigType(desc.MediaType) {
|
||||
imagePlatform, err := ConfigPlatform(ctx, provider, desc)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var image ocispec.Image
|
||||
if err := json.Unmarshal(p, &image); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
platformSpecs = append(platformSpecs,
|
||||
platforms.Normalize(ocispec.Platform{OS: image.OS, Architecture: image.Architecture}))
|
||||
platformSpecs = append(platformSpecs, imagePlatform)
|
||||
}
|
||||
return nil, nil
|
||||
}), ChildrenHandler(provider)), image)
|
||||
@ -340,9 +335,7 @@ func Check(ctx context.Context, provider content.Provider, image ocispec.Descrip
|
||||
|
||||
// Children returns the immediate children of content described by the descriptor.
|
||||
func Children(ctx context.Context, provider content.Provider, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
|
||||
var descs []ocispec.Descriptor
|
||||
switch desc.MediaType {
|
||||
case MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest:
|
||||
if IsManifestType(desc.MediaType) {
|
||||
p, err := content.ReadBlob(ctx, provider, desc)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -359,9 +352,8 @@ func Children(ctx context.Context, provider content.Provider, desc ocispec.Descr
|
||||
return nil, err
|
||||
}
|
||||
|
||||
descs = append(descs, manifest.Config)
|
||||
descs = append(descs, manifest.Layers...)
|
||||
case MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageIndex:
|
||||
return append([]ocispec.Descriptor{manifest.Config}, manifest.Layers...), nil
|
||||
} else if IsIndexType(desc.MediaType) {
|
||||
p, err := content.ReadBlob(ctx, provider, desc)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -376,16 +368,12 @@ func Children(ctx context.Context, provider content.Provider, desc ocispec.Descr
|
||||
return nil, err
|
||||
}
|
||||
|
||||
descs = append(descs, index.Manifests...)
|
||||
default:
|
||||
if IsLayerType(desc.MediaType) || IsKnownConfig(desc.MediaType) {
|
||||
// childless data types.
|
||||
return nil, nil
|
||||
}
|
||||
return append([]ocispec.Descriptor{}, index.Manifests...), nil
|
||||
} else if !IsLayerType(desc.MediaType) && !IsKnownConfig(desc.MediaType) {
|
||||
// Layers and configs are childless data types and should not be logged.
|
||||
log.G(ctx).Debugf("encountered unknown type %v; children may not be fetched", desc.MediaType)
|
||||
}
|
||||
|
||||
return descs, nil
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
// unknownDocument represents a manifest, manifest list, or index that has not
|
||||
@ -398,9 +386,10 @@ type unknownDocument struct {
|
||||
FSLayers json.RawMessage `json:"fsLayers,omitempty"` // schema 1
|
||||
}
|
||||
|
||||
// validateMediaType returns an error if the byte slice is invalid JSON or if
|
||||
// the media type identifies the blob as one format but it contains elements of
|
||||
// another format.
|
||||
// validateMediaType returns an error if the byte slice is invalid JSON,
|
||||
// if the format of the blob is not supported, or if the media type
|
||||
// identifies the blob as one format, but it identifies itself as, or
|
||||
// contains elements of another format.
|
||||
func validateMediaType(b []byte, mt string) error {
|
||||
var doc unknownDocument
|
||||
if err := json.Unmarshal(b, &doc); err != nil {
|
||||
@ -409,19 +398,10 @@ func validateMediaType(b []byte, mt string) error {
|
||||
if len(doc.FSLayers) != 0 {
|
||||
return fmt.Errorf("media-type: schema 1 not supported")
|
||||
}
|
||||
switch mt {
|
||||
case MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest:
|
||||
if len(doc.Manifests) != 0 ||
|
||||
doc.MediaType == MediaTypeDockerSchema2ManifestList ||
|
||||
doc.MediaType == ocispec.MediaTypeImageIndex {
|
||||
return fmt.Errorf("media-type: expected manifest but found index (%s)", mt)
|
||||
}
|
||||
case MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageIndex:
|
||||
if len(doc.Config) != 0 || len(doc.Layers) != 0 ||
|
||||
doc.MediaType == MediaTypeDockerSchema2Manifest ||
|
||||
doc.MediaType == ocispec.MediaTypeImageManifest {
|
||||
return fmt.Errorf("media-type: expected index but found manifest (%s)", mt)
|
||||
}
|
||||
if IsManifestType(mt) && (len(doc.Manifests) != 0 || IsIndexType(doc.MediaType)) {
|
||||
return fmt.Errorf("media-type: expected manifest but found index (%s)", mt)
|
||||
} else if IsIndexType(mt) && (len(doc.Config) != 0 || len(doc.Layers) != 0 || IsManifestType(doc.MediaType)) {
|
||||
return fmt.Errorf("media-type: expected index but found manifest (%s)", mt)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@ -442,3 +422,19 @@ func RootFS(ctx context.Context, provider content.Provider, configDesc ocispec.D
|
||||
}
|
||||
return config.RootFS.DiffIDs, nil
|
||||
}
|
||||
|
||||
// ConfigPlatform returns a normalized platform from an image manifest config.
|
||||
func ConfigPlatform(ctx context.Context, provider content.Provider, configDesc ocispec.Descriptor) (ocispec.Platform, error) {
|
||||
p, err := content.ReadBlob(ctx, provider, configDesc)
|
||||
if err != nil {
|
||||
return ocispec.Platform{}, err
|
||||
}
|
||||
|
||||
// Technically, this should be ocispec.Image, but we only need the
|
||||
// ocispec.Platform that is embedded in the image struct.
|
||||
var imagePlatform ocispec.Platform
|
||||
if err := json.Unmarshal(p, &imagePlatform); err != nil {
|
||||
return ocispec.Platform{}, err
|
||||
}
|
||||
return platforms.Normalize(imagePlatform), nil
|
||||
}
|
@ -20,7 +20,7 @@ import (
|
||||
"context"
|
||||
"io"
|
||||
|
||||
"github.com/containerd/containerd/content"
|
||||
"github.com/containerd/containerd/v2/core/content"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
)
|
||||
|
@ -22,7 +22,7 @@ import (
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
"github.com/containerd/errdefs"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
)
|
||||
|
||||
@ -34,6 +34,7 @@ const (
|
||||
MediaTypeDockerSchema2Layer = "application/vnd.docker.image.rootfs.diff.tar"
|
||||
MediaTypeDockerSchema2LayerForeign = "application/vnd.docker.image.rootfs.foreign.diff.tar"
|
||||
MediaTypeDockerSchema2LayerGzip = "application/vnd.docker.image.rootfs.diff.tar.gzip"
|
||||
MediaTypeDockerSchema2LayerZstd = "application/vnd.docker.image.rootfs.diff.tar.zstd"
|
||||
MediaTypeDockerSchema2LayerForeignGzip = "application/vnd.docker.image.rootfs.foreign.diff.tar.gzip"
|
||||
MediaTypeDockerSchema2Config = "application/vnd.docker.container.image.v1+json"
|
||||
MediaTypeDockerSchema2Manifest = "application/vnd.docker.distribution.manifest.v2+json"
|
||||
@ -81,6 +82,12 @@ func DiffCompression(ctx context.Context, mediaType string) (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
return "gzip", nil
|
||||
case MediaTypeDockerSchema2LayerZstd:
|
||||
if len(ext) > 0 {
|
||||
// Type is wrapped
|
||||
return "", nil
|
||||
}
|
||||
return "zstd", nil
|
||||
case ocispec.MediaTypeImageLayer, ocispec.MediaTypeImageLayerNonDistributable: //nolint:staticcheck // Non-distributable layers are deprecated
|
||||
if len(ext) > 0 {
|
||||
switch ext[len(ext)-1] {
|
||||
@ -132,7 +139,7 @@ func IsLayerType(mt string) bool {
|
||||
// Parse Docker media types, strip off any + suffixes first
|
||||
switch base, _ := parseMediaTypes(mt); base {
|
||||
case MediaTypeDockerSchema2Layer, MediaTypeDockerSchema2LayerGzip,
|
||||
MediaTypeDockerSchema2LayerForeign, MediaTypeDockerSchema2LayerForeignGzip:
|
||||
MediaTypeDockerSchema2LayerForeign, MediaTypeDockerSchema2LayerForeignGzip, MediaTypeDockerSchema2LayerZstd:
|
||||
return true
|
||||
}
|
||||
return false
|
@ -65,6 +65,19 @@ func SynchronousDelete(ctx context.Context, o *DeleteOptions) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// WithLabel sets a label on a lease, and merges it with existing labels.
|
||||
// It overwrites the existing value of the given label (if present).
|
||||
func WithLabel(label, value string) Opt {
|
||||
return func(l *Lease) error {
|
||||
if l.Labels == nil {
|
||||
l.Labels = map[string]string{label: value}
|
||||
return nil
|
||||
}
|
||||
l.Labels[label] = value
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithLabels merges labels on a lease
|
||||
func WithLabels(labels map[string]string) Opt {
|
||||
return func(l *Lease) error {
|
@ -26,8 +26,9 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
remoteserrors "github.com/containerd/containerd/remotes/errors"
|
||||
"github.com/containerd/containerd/version"
|
||||
remoteserrors "github.com/containerd/containerd/v2/core/remotes/errors"
|
||||
"github.com/containerd/containerd/v2/pkg/tracing"
|
||||
"github.com/containerd/containerd/v2/version"
|
||||
"github.com/containerd/log"
|
||||
)
|
||||
|
||||
@ -86,15 +87,19 @@ type TokenOptions struct {
|
||||
|
||||
// OAuthTokenResponse is response from fetching token with a OAuth POST request
|
||||
type OAuthTokenResponse struct {
|
||||
AccessToken string `json:"access_token"`
|
||||
RefreshToken string `json:"refresh_token"`
|
||||
ExpiresIn int `json:"expires_in"`
|
||||
IssuedAt time.Time `json:"issued_at"`
|
||||
Scope string `json:"scope"`
|
||||
AccessToken string `json:"access_token"`
|
||||
RefreshToken string `json:"refresh_token"`
|
||||
ExpiresInSeconds int `json:"expires_in"`
|
||||
IssuedAt time.Time `json:"issued_at"`
|
||||
Scope string `json:"scope"`
|
||||
}
|
||||
|
||||
// FetchTokenWithOAuth fetches a token using a POST request
|
||||
func FetchTokenWithOAuth(ctx context.Context, client *http.Client, headers http.Header, clientID string, to TokenOptions) (*OAuthTokenResponse, error) {
|
||||
c := *client
|
||||
client = &c
|
||||
tracing.UpdateHTTPClient(client, tracing.Name("remotes.docker.resolver", "FetchTokenWithOAuth"))
|
||||
|
||||
form := url.Values{}
|
||||
if len(to.Scopes) > 0 {
|
||||
form.Set("scope", strings.Join(to.Scopes, " "))
|
||||
@ -152,15 +157,19 @@ func FetchTokenWithOAuth(ctx context.Context, client *http.Client, headers http.
|
||||
|
||||
// FetchTokenResponse is response from fetching token with GET request
|
||||
type FetchTokenResponse struct {
|
||||
Token string `json:"token"`
|
||||
AccessToken string `json:"access_token"`
|
||||
ExpiresIn int `json:"expires_in"`
|
||||
IssuedAt time.Time `json:"issued_at"`
|
||||
RefreshToken string `json:"refresh_token"`
|
||||
Token string `json:"token"`
|
||||
AccessToken string `json:"access_token"`
|
||||
ExpiresInSeconds int `json:"expires_in"`
|
||||
IssuedAt time.Time `json:"issued_at"`
|
||||
RefreshToken string `json:"refresh_token"`
|
||||
}
|
||||
|
||||
// FetchToken fetches a token using a GET request
|
||||
func FetchToken(ctx context.Context, client *http.Client, headers http.Header, to TokenOptions) (*FetchTokenResponse, error) {
|
||||
c := *client
|
||||
client = &c
|
||||
tracing.UpdateHTTPClient(client, tracing.Name("remotes.docker.resolver", "FetchToken"))
|
||||
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, to.Realm, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
@ -49,7 +49,7 @@ type byScheme []Challenge
|
||||
func (bs byScheme) Len() int { return len(bs) }
|
||||
func (bs byScheme) Swap(i, j int) { bs[i], bs[j] = bs[j], bs[i] }
|
||||
|
||||
// Sort in priority order: token > digest > basic
|
||||
// Less sorts in priority order: token > digest > basic
|
||||
func (bs byScheme) Less(i, j int) bool { return bs[i].Scheme > bs[j].Scheme }
|
||||
|
||||
// Octet types from RFC 2616.
|
@ -24,12 +24,12 @@ import (
|
||||
"net/http"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/containerd/containerd/v2/core/remotes/docker/auth"
|
||||
remoteerrors "github.com/containerd/containerd/v2/core/remotes/errors"
|
||||
"github.com/containerd/errdefs"
|
||||
"github.com/containerd/log"
|
||||
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
"github.com/containerd/containerd/remotes/docker/auth"
|
||||
remoteerrors "github.com/containerd/containerd/remotes/errors"
|
||||
)
|
||||
|
||||
type dockerAuthorizer struct {
|
||||
@ -206,9 +206,10 @@ func (a *dockerAuthorizer) AddResponses(ctx context.Context, responses []*http.R
|
||||
// authResult is used to control limit rate.
|
||||
type authResult struct {
|
||||
sync.WaitGroup
|
||||
token string
|
||||
refreshToken string
|
||||
err error
|
||||
token string
|
||||
refreshToken string
|
||||
expirationTime *time.Time
|
||||
err error
|
||||
}
|
||||
|
||||
// authHandler is used to handle auth request per registry server.
|
||||
@ -271,8 +272,12 @@ func (ah *authHandler) doBearerAuth(ctx context.Context) (token, refreshToken st
|
||||
// Docs: https://docs.docker.com/registry/spec/auth/scope
|
||||
scoped := strings.Join(to.Scopes, " ")
|
||||
|
||||
// Keep track of the expiration time of cached bearer tokens so they can be
|
||||
// refreshed when they expire without a server roundtrip.
|
||||
var expirationTime *time.Time
|
||||
|
||||
ah.Lock()
|
||||
if r, exist := ah.scopedTokens[scoped]; exist {
|
||||
if r, exist := ah.scopedTokens[scoped]; exist && (r.expirationTime == nil || r.expirationTime.After(time.Now())) {
|
||||
ah.Unlock()
|
||||
r.Wait()
|
||||
return r.token, r.refreshToken, r.err
|
||||
@ -286,7 +291,7 @@ func (ah *authHandler) doBearerAuth(ctx context.Context) (token, refreshToken st
|
||||
|
||||
defer func() {
|
||||
token = fmt.Sprintf("Bearer %s", token)
|
||||
r.token, r.refreshToken, r.err = token, refreshToken, err
|
||||
r.token, r.refreshToken, r.err, r.expirationTime = token, refreshToken, err, expirationTime
|
||||
r.Done()
|
||||
}()
|
||||
|
||||
@ -312,6 +317,7 @@ func (ah *authHandler) doBearerAuth(ctx context.Context) (token, refreshToken st
|
||||
if err != nil {
|
||||
return "", "", err
|
||||
}
|
||||
expirationTime = getExpirationTime(resp.ExpiresInSeconds)
|
||||
return resp.Token, resp.RefreshToken, nil
|
||||
}
|
||||
log.G(ctx).WithFields(log.Fields{
|
||||
@ -321,6 +327,7 @@ func (ah *authHandler) doBearerAuth(ctx context.Context) (token, refreshToken st
|
||||
}
|
||||
return "", "", err
|
||||
}
|
||||
expirationTime = getExpirationTime(resp.ExpiresInSeconds)
|
||||
return resp.AccessToken, resp.RefreshToken, nil
|
||||
}
|
||||
// do request anonymously
|
||||
@ -328,9 +335,18 @@ func (ah *authHandler) doBearerAuth(ctx context.Context) (token, refreshToken st
|
||||
if err != nil {
|
||||
return "", "", fmt.Errorf("failed to fetch anonymous token: %w", err)
|
||||
}
|
||||
expirationTime = getExpirationTime(resp.ExpiresInSeconds)
|
||||
return resp.Token, resp.RefreshToken, nil
|
||||
}
|
||||
|
||||
func getExpirationTime(expiresInSeconds int) *time.Time {
|
||||
if expiresInSeconds <= 0 {
|
||||
return nil
|
||||
}
|
||||
expirationTime := time.Now().Add(time.Duration(expiresInSeconds) * time.Second)
|
||||
return &expirationTime
|
||||
}
|
||||
|
||||
func invalidAuthorization(ctx context.Context, c auth.Challenge, responses []*http.Response) (retry bool, _ error) {
|
||||
errStr := c.Parameters["error"]
|
||||
if errStr == "" {
|
@ -22,9 +22,9 @@ import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
|
||||
"github.com/containerd/containerd/content"
|
||||
"github.com/containerd/containerd/images"
|
||||
"github.com/containerd/containerd/remotes"
|
||||
"github.com/containerd/containerd/v2/core/content"
|
||||
"github.com/containerd/containerd/v2/core/images"
|
||||
"github.com/containerd/containerd/v2/core/remotes"
|
||||
"github.com/containerd/log"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
@ -41,9 +41,7 @@ const LegacyConfigMediaType = "application/octet-stream"
|
||||
// 1. original manifest will be deleted by next gc round.
|
||||
// 2. don't cover manifest list.
|
||||
func ConvertManifest(ctx context.Context, store content.Store, desc ocispec.Descriptor) (ocispec.Descriptor, error) {
|
||||
if !(desc.MediaType == images.MediaTypeDockerSchema2Manifest ||
|
||||
desc.MediaType == ocispec.MediaTypeImageManifest) {
|
||||
|
||||
if !images.IsManifestType(desc.MediaType) {
|
||||
log.G(ctx).Warnf("do nothing for media type: %s", desc.MediaType)
|
||||
return desc, nil
|
||||
}
|
@ -23,10 +23,9 @@ import (
|
||||
"os"
|
||||
|
||||
fuzz "github.com/AdaLogics/go-fuzz-headers"
|
||||
"github.com/containerd/containerd/content/local"
|
||||
"github.com/containerd/containerd/v2/plugins/content/local"
|
||||
"github.com/containerd/log"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
func FuzzConvertManifest(data []byte) int {
|
||||
@ -34,7 +33,7 @@ func FuzzConvertManifest(data []byte) int {
|
||||
|
||||
// Do not log the message below
|
||||
// level=warning msg="do nothing for media type: ..."
|
||||
log.G(ctx).Logger.SetLevel(logrus.PanicLevel)
|
||||
log.G(ctx).Logger.SetLevel(log.PanicLevel)
|
||||
|
||||
f := fuzz.NewConsumer(data)
|
||||
desc := ocispec.Descriptor{}
|
@ -17,6 +17,8 @@
|
||||
package docker
|
||||
|
||||
import (
|
||||
"compress/flate"
|
||||
"compress/gzip"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
@ -26,12 +28,13 @@ import (
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"github.com/containerd/containerd/v2/core/images"
|
||||
"github.com/containerd/containerd/v2/core/remotes"
|
||||
"github.com/containerd/errdefs"
|
||||
"github.com/containerd/log"
|
||||
"github.com/klauspost/compress/zstd"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
"github.com/containerd/containerd/images"
|
||||
)
|
||||
|
||||
type dockerFetcher struct {
|
||||
@ -94,10 +97,8 @@ func (r dockerFetcher) Fetch(ctx context.Context, desc ocispec.Descriptor) (io.R
|
||||
}
|
||||
|
||||
// Try manifests endpoints for manifests types
|
||||
switch desc.MediaType {
|
||||
case images.MediaTypeDockerSchema2Manifest, images.MediaTypeDockerSchema2ManifestList,
|
||||
images.MediaTypeDockerSchema1Manifest,
|
||||
ocispec.MediaTypeImageManifest, ocispec.MediaTypeImageIndex:
|
||||
if images.IsManifestType(desc.MediaType) || images.IsIndexType(desc.MediaType) ||
|
||||
desc.MediaType == images.MediaTypeDockerSchema1Manifest {
|
||||
|
||||
var firstErr error
|
||||
for _, host := range r.hosts {
|
||||
@ -152,12 +153,18 @@ func (r dockerFetcher) Fetch(ctx context.Context, desc ocispec.Descriptor) (io.R
|
||||
})
|
||||
}
|
||||
|
||||
func (r dockerFetcher) createGetReq(ctx context.Context, host RegistryHost, ps ...string) (*request, int64, error) {
|
||||
func (r dockerFetcher) createGetReq(ctx context.Context, host RegistryHost, mediatype string, ps ...string) (*request, int64, error) {
|
||||
headReq := r.request(host, http.MethodHead, ps...)
|
||||
if err := headReq.addNamespace(r.refspec.Hostname()); err != nil {
|
||||
return nil, 0, err
|
||||
}
|
||||
|
||||
if mediatype == "" {
|
||||
headReq.header.Set("Accept", "*/*")
|
||||
} else {
|
||||
headReq.header.Set("Accept", strings.Join([]string{mediatype, `*/*`}, ", "))
|
||||
}
|
||||
|
||||
headResp, err := headReq.doWithRetries(ctx, nil)
|
||||
if err != nil {
|
||||
return nil, 0, err
|
||||
@ -176,9 +183,15 @@ func (r dockerFetcher) createGetReq(ctx context.Context, host RegistryHost, ps .
|
||||
return getReq, headResp.ContentLength, nil
|
||||
}
|
||||
|
||||
func (r dockerFetcher) FetchByDigest(ctx context.Context, dgst digest.Digest) (io.ReadCloser, ocispec.Descriptor, error) {
|
||||
func (r dockerFetcher) FetchByDigest(ctx context.Context, dgst digest.Digest, opts ...remotes.FetchByDigestOpts) (io.ReadCloser, ocispec.Descriptor, error) {
|
||||
var desc ocispec.Descriptor
|
||||
ctx = log.WithLogger(ctx, log.G(ctx).WithField("digest", dgst))
|
||||
var config remotes.FetchByDigestConfig
|
||||
for _, o := range opts {
|
||||
if err := o(ctx, &config); err != nil {
|
||||
return nil, desc, err
|
||||
}
|
||||
}
|
||||
|
||||
hosts := r.filterHosts(HostCapabilityPull)
|
||||
if len(hosts) == 0 {
|
||||
@ -197,7 +210,7 @@ func (r dockerFetcher) FetchByDigest(ctx context.Context, dgst digest.Digest) (i
|
||||
)
|
||||
|
||||
for _, host := range r.hosts {
|
||||
getReq, sz, err = r.createGetReq(ctx, host, "blobs", dgst.String())
|
||||
getReq, sz, err = r.createGetReq(ctx, host, config.Mediatype, "blobs", dgst.String())
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
@ -210,7 +223,7 @@ func (r dockerFetcher) FetchByDigest(ctx context.Context, dgst digest.Digest) (i
|
||||
if getReq == nil {
|
||||
// Fall back to the "manifests" endpoint
|
||||
for _, host := range r.hosts {
|
||||
getReq, sz, err = r.createGetReq(ctx, host, "manifests", dgst.String())
|
||||
getReq, sz, err = r.createGetReq(ctx, host, config.Mediatype, "manifests", dgst.String())
|
||||
if err == nil {
|
||||
break
|
||||
}
|
||||
@ -232,7 +245,7 @@ func (r dockerFetcher) FetchByDigest(ctx context.Context, dgst digest.Digest) (i
|
||||
}
|
||||
|
||||
seeker, err := newHTTPReadSeeker(sz, func(offset int64) (io.ReadCloser, error) {
|
||||
return r.open(ctx, getReq, "", offset)
|
||||
return r.open(ctx, getReq, config.Mediatype, offset)
|
||||
})
|
||||
if err != nil {
|
||||
return nil, desc, err
|
||||
@ -243,6 +256,9 @@ func (r dockerFetcher) FetchByDigest(ctx context.Context, dgst digest.Digest) (i
|
||||
Digest: dgst,
|
||||
Size: sz,
|
||||
}
|
||||
if config.Mediatype != "" {
|
||||
desc.MediaType = config.Mediatype
|
||||
}
|
||||
return seeker, desc, nil
|
||||
}
|
||||
|
||||
@ -252,6 +268,7 @@ func (r dockerFetcher) open(ctx context.Context, req *request, mediatype string,
|
||||
} else {
|
||||
req.header.Set("Accept", strings.Join([]string{mediatype, `*/*`}, ", "))
|
||||
}
|
||||
req.header.Set("Accept-Encoding", "zstd;q=1.0, gzip;q=0.8, deflate;q=0.5")
|
||||
|
||||
if offset > 0 {
|
||||
// Note: "Accept-Ranges: bytes" cannot be trusted as some endpoints
|
||||
@ -310,5 +327,32 @@ func (r dockerFetcher) open(ctx context.Context, req *request, mediatype string,
|
||||
}
|
||||
}
|
||||
|
||||
return resp.Body, nil
|
||||
body := resp.Body
|
||||
encoding := strings.FieldsFunc(resp.Header.Get("Content-Encoding"), func(r rune) bool {
|
||||
return r == ' ' || r == '\t' || r == ','
|
||||
})
|
||||
for i := len(encoding) - 1; i >= 0; i-- {
|
||||
algorithm := strings.ToLower(encoding[i])
|
||||
switch algorithm {
|
||||
case "zstd":
|
||||
r, err := zstd.NewReader(body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
body = r.IOReadCloser()
|
||||
case "gzip":
|
||||
body, err = gzip.NewReader(body)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
case "deflate":
|
||||
body = flate.NewReader(body)
|
||||
case "identity", "":
|
||||
// no content-encoding applied, use raw body
|
||||
default:
|
||||
return nil, errors.New("unsupported Content-Encoding algorithm: " + algorithm)
|
||||
}
|
||||
}
|
||||
|
||||
return body, nil
|
||||
}
|
@ -25,6 +25,7 @@ import (
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"net/url"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
func FuzzFetcher(data []byte) int {
|
||||
@ -35,7 +36,7 @@ func FuzzFetcher(data []byte) int {
|
||||
|
||||
s := httptest.NewServer(http.HandlerFunc(func(rw http.ResponseWriter, r *http.Request) {
|
||||
rw.Header().Set("content-range", fmt.Sprintf("bytes %d-%d/%d", 0, dataLen-1, dataLen))
|
||||
rw.Header().Set("content-length", fmt.Sprintf("%d", dataLen))
|
||||
rw.Header().Set("content-length", strconv.Itoa(dataLen))
|
||||
rw.Write(data)
|
||||
}))
|
||||
defer s.Close()
|
@ -22,10 +22,10 @@ import (
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
"github.com/containerd/containerd/content"
|
||||
"github.com/containerd/containerd/images"
|
||||
"github.com/containerd/containerd/labels"
|
||||
"github.com/containerd/containerd/reference"
|
||||
"github.com/containerd/containerd/v2/core/content"
|
||||
"github.com/containerd/containerd/v2/core/images"
|
||||
"github.com/containerd/containerd/v2/pkg/labels"
|
||||
"github.com/containerd/containerd/v2/pkg/reference"
|
||||
"github.com/containerd/log"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
)
|
||||
@ -103,7 +103,7 @@ func appendDistributionSourceLabel(originLabel, repo string) string {
|
||||
}
|
||||
|
||||
func distributionSourceLabelKey(source string) string {
|
||||
return fmt.Sprintf("%s.%s", labels.LabelDistributionSource, source)
|
||||
return labels.LabelDistributionSource + "." + source
|
||||
}
|
||||
|
||||
// selectRepositoryMountCandidate will select the repo which has longest
|
@ -21,9 +21,8 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/containerd/errdefs"
|
||||
"github.com/containerd/log"
|
||||
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
)
|
||||
|
||||
const maxRetry = 3
|
@ -28,15 +28,14 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/containerd/containerd/v2/core/content"
|
||||
"github.com/containerd/containerd/v2/core/images"
|
||||
"github.com/containerd/containerd/v2/core/remotes"
|
||||
remoteserrors "github.com/containerd/containerd/v2/core/remotes/errors"
|
||||
"github.com/containerd/errdefs"
|
||||
"github.com/containerd/log"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
|
||||
"github.com/containerd/containerd/content"
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
"github.com/containerd/containerd/images"
|
||||
"github.com/containerd/containerd/remotes"
|
||||
remoteserrors "github.com/containerd/containerd/remotes/errors"
|
||||
)
|
||||
|
||||
type dockerPusher struct {
|
||||
@ -104,12 +103,10 @@ func (p dockerPusher) push(ctx context.Context, desc ocispec.Descriptor, ref str
|
||||
host = hosts[0]
|
||||
)
|
||||
|
||||
switch desc.MediaType {
|
||||
case images.MediaTypeDockerSchema2Manifest, images.MediaTypeDockerSchema2ManifestList,
|
||||
ocispec.MediaTypeImageManifest, ocispec.MediaTypeImageIndex:
|
||||
if images.IsManifestType(desc.MediaType) || images.IsIndexType(desc.MediaType) {
|
||||
isManifest = true
|
||||
existCheck = getManifestPath(p.object, desc.Digest)
|
||||
default:
|
||||
} else {
|
||||
existCheck = []string{"blobs", desc.Digest.String()}
|
||||
}
|
||||
|
||||
@ -143,9 +140,7 @@ func (p dockerPusher) push(ctx context.Context, desc ocispec.Descriptor, ref str
|
||||
Exists: true,
|
||||
},
|
||||
Status: content.Status{
|
||||
Ref: ref,
|
||||
Total: desc.Size,
|
||||
Offset: desc.Size,
|
||||
Ref: ref,
|
||||
// TODO: Set updated time?
|
||||
},
|
||||
})
|
@ -30,18 +30,18 @@ import (
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/containerd/errdefs"
|
||||
"github.com/containerd/log"
|
||||
"github.com/opencontainers/go-digest"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
"github.com/containerd/containerd/images"
|
||||
"github.com/containerd/containerd/reference"
|
||||
"github.com/containerd/containerd/remotes"
|
||||
"github.com/containerd/containerd/remotes/docker/schema1" //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
|
||||
remoteerrors "github.com/containerd/containerd/remotes/errors"
|
||||
"github.com/containerd/containerd/tracing"
|
||||
"github.com/containerd/containerd/version"
|
||||
"github.com/containerd/containerd/v2/core/images"
|
||||
"github.com/containerd/containerd/v2/core/remotes"
|
||||
"github.com/containerd/containerd/v2/core/remotes/docker/schema1" //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
|
||||
remoteerrors "github.com/containerd/containerd/v2/core/remotes/errors"
|
||||
"github.com/containerd/containerd/v2/pkg/reference"
|
||||
"github.com/containerd/containerd/v2/pkg/tracing"
|
||||
"github.com/containerd/containerd/v2/version"
|
||||
)
|
||||
|
||||
var (
|
||||
@ -245,10 +245,9 @@ func (r *dockerResolver) Resolve(ctx context.Context, ref string) (string, ocisp
|
||||
}
|
||||
|
||||
var (
|
||||
firstErr error
|
||||
paths [][]string
|
||||
dgst = refspec.Digest()
|
||||
caps = HostCapabilityPull
|
||||
paths [][]string
|
||||
dgst = refspec.Digest()
|
||||
caps = HostCapabilityPull
|
||||
)
|
||||
|
||||
if dgst != "" {
|
||||
@ -279,8 +278,23 @@ func (r *dockerResolver) Resolve(ctx context.Context, ref string) (string, ocisp
|
||||
return "", ocispec.Descriptor{}, err
|
||||
}
|
||||
|
||||
var (
|
||||
// firstErr is the most relevant error encountered during resolution.
|
||||
// We use this to determine the error to return, making sure that the
|
||||
// error created furthest through the resolution process is returned.
|
||||
firstErr error
|
||||
firstErrPriority int
|
||||
)
|
||||
|
||||
nextHostOrFail := func(i int) string {
|
||||
if i < len(hosts)-1 {
|
||||
return "trying next host"
|
||||
}
|
||||
return "fetch failed"
|
||||
}
|
||||
|
||||
for _, u := range paths {
|
||||
for _, host := range hosts {
|
||||
for i, host := range hosts {
|
||||
ctx := log.WithLogger(ctx, log.G(ctx).WithField("host", host.Host))
|
||||
|
||||
req := base.request(host, http.MethodHead, u...)
|
||||
@ -298,25 +312,30 @@ func (r *dockerResolver) Resolve(ctx context.Context, ref string) (string, ocisp
|
||||
if errors.Is(err, ErrInvalidAuthorization) {
|
||||
err = fmt.Errorf("pull access denied, repository does not exist or may require authorization: %w", err)
|
||||
}
|
||||
// Store the error for referencing later
|
||||
if firstErr == nil {
|
||||
if firstErrPriority < 1 {
|
||||
firstErr = err
|
||||
firstErrPriority = 1
|
||||
}
|
||||
log.G(ctx).WithError(err).Info("trying next host")
|
||||
log.G(ctx).WithError(err).Info(nextHostOrFail(i))
|
||||
continue // try another host
|
||||
}
|
||||
resp.Body.Close() // don't care about body contents.
|
||||
|
||||
if resp.StatusCode > 299 {
|
||||
if resp.StatusCode == http.StatusNotFound {
|
||||
log.G(ctx).Info("trying next host - response was http.StatusNotFound")
|
||||
if firstErrPriority < 2 {
|
||||
firstErr = fmt.Errorf("%s: %w", ref, errdefs.ErrNotFound)
|
||||
firstErrPriority = 2
|
||||
}
|
||||
log.G(ctx).Infof("%s after status: %s", nextHostOrFail(i), resp.Status)
|
||||
continue
|
||||
}
|
||||
if resp.StatusCode > 399 {
|
||||
// Set firstErr when encountering the first non-404 status code.
|
||||
if firstErr == nil {
|
||||
if firstErrPriority < 3 {
|
||||
firstErr = remoteerrors.NewUnexpectedStatusErr(resp)
|
||||
firstErrPriority = 3
|
||||
}
|
||||
log.G(ctx).Infof("%s after status: %s", nextHostOrFail(i), resp.Status)
|
||||
continue // try another host
|
||||
}
|
||||
return "", ocispec.Descriptor{}, remoteerrors.NewUnexpectedStatusErr(resp)
|
||||
@ -387,8 +406,9 @@ func (r *dockerResolver) Resolve(ctx context.Context, ref string) (string, ocisp
|
||||
}
|
||||
// Prevent resolving to excessively large manifests
|
||||
if size > MaxManifestSize {
|
||||
if firstErr == nil {
|
||||
if firstErrPriority < 4 {
|
||||
firstErr = fmt.Errorf("rejecting %d byte manifest for %s: %w", size, ref, errdefs.ErrNotFound)
|
||||
firstErrPriority = 4
|
||||
}
|
||||
continue
|
||||
}
|
||||
@ -404,10 +424,8 @@ func (r *dockerResolver) Resolve(ctx context.Context, ref string) (string, ocisp
|
||||
}
|
||||
}
|
||||
|
||||
// If above loop terminates without return, then there was an error.
|
||||
// "firstErr" contains the first non-404 error. That is, "firstErr == nil"
|
||||
// means that either no registries were given or each registry returned 404.
|
||||
|
||||
// If above loop terminates without return or error, then no registries
|
||||
// were provided.
|
||||
if firstErr == nil {
|
||||
firstErr = fmt.Errorf("%s: %w", ref, errdefs.ErrNotFound)
|
||||
}
|
||||
@ -787,37 +805,3 @@ func isPortError(err error, host string) bool {
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// HTTPFallback is an http.RoundTripper which allows fallback from https to http
|
||||
// for registry endpoints with configurations for both http and TLS, such as
|
||||
// defaulted localhost endpoints.
|
||||
//
|
||||
// Deprecated: Use NewHTTPFallback instead.
|
||||
type HTTPFallback struct {
|
||||
http.RoundTripper
|
||||
}
|
||||
|
||||
func (f HTTPFallback) RoundTrip(r *http.Request) (*http.Response, error) {
|
||||
resp, err := f.RoundTripper.RoundTrip(r)
|
||||
var tlsErr tls.RecordHeaderError
|
||||
if errors.As(err, &tlsErr) && string(tlsErr.RecordHeader[:]) == "HTTP/" {
|
||||
// server gave HTTP response to HTTPS client
|
||||
plainHTTPUrl := *r.URL
|
||||
plainHTTPUrl.Scheme = "http"
|
||||
|
||||
plainHTTPRequest := *r
|
||||
plainHTTPRequest.URL = &plainHTTPUrl
|
||||
|
||||
if r.Body != nil && r.GetBody != nil {
|
||||
body, err := r.GetBody()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
plainHTTPRequest.Body = body
|
||||
}
|
||||
|
||||
return f.RoundTripper.RoundTrip(&plainHTTPRequest)
|
||||
}
|
||||
|
||||
return resp, err
|
||||
}
|
@ -27,23 +27,24 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/containerd/containerd/v2/core/content"
|
||||
"github.com/containerd/containerd/v2/core/images"
|
||||
"github.com/containerd/containerd/v2/core/remotes"
|
||||
"github.com/containerd/containerd/v2/pkg/archive/compression"
|
||||
"github.com/containerd/containerd/v2/pkg/deprecation"
|
||||
"github.com/containerd/containerd/v2/pkg/labels"
|
||||
"github.com/containerd/errdefs"
|
||||
"github.com/containerd/log"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
specs "github.com/opencontainers/image-spec/specs-go"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"golang.org/x/sync/errgroup"
|
||||
|
||||
"github.com/containerd/containerd/archive/compression"
|
||||
"github.com/containerd/containerd/content"
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
"github.com/containerd/containerd/images"
|
||||
"github.com/containerd/containerd/labels"
|
||||
"github.com/containerd/containerd/remotes"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -68,14 +69,30 @@ type Converter struct {
|
||||
layerBlobs map[digest.Digest]ocispec.Descriptor
|
||||
}
|
||||
|
||||
var ErrDisabled = fmt.Errorf("Pulling Schema 1 images have been deprecated and disabled by default since containerd v2.0. "+
|
||||
"As a workaround you may set an environment variable `%s=1`, but this will be completely removed in containerd v2.1.",
|
||||
deprecation.EnvPullSchema1Image)
|
||||
|
||||
// NewConverter returns a new converter
|
||||
func NewConverter(contentStore content.Store, fetcher remotes.Fetcher) *Converter {
|
||||
func NewConverter(contentStore content.Store, fetcher remotes.Fetcher) (*Converter, error) {
|
||||
s := os.Getenv(deprecation.EnvPullSchema1Image)
|
||||
if s == "" {
|
||||
return nil, ErrDisabled
|
||||
}
|
||||
enable, err := strconv.ParseBool(s)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse `%s=%s`: %w", deprecation.EnvPullSchema1Image, s, err)
|
||||
}
|
||||
if !enable {
|
||||
return nil, ErrDisabled
|
||||
}
|
||||
log.L.Warn(ErrDisabled)
|
||||
return &Converter{
|
||||
contentStore: contentStore,
|
||||
fetcher: fetcher,
|
||||
blobMap: map[digest.Digest]blobState{},
|
||||
layerBlobs: map[digest.Digest]ocispec.Descriptor{},
|
||||
}
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Handle fetching descriptors for a docker media type
|
@ -23,7 +23,7 @@ import (
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/containerd/containerd/reference"
|
||||
"github.com/containerd/containerd/v2/pkg/reference"
|
||||
)
|
||||
|
||||
// RepositoryScope returns a repository scope string such as "repository:foo/bar:pull"
|
@ -20,8 +20,8 @@ import (
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"github.com/containerd/containerd/content"
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
"github.com/containerd/containerd/v2/core/content"
|
||||
"github.com/containerd/errdefs"
|
||||
"github.com/moby/locker"
|
||||
)
|
||||
|
@ -25,15 +25,14 @@ import (
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/containerd/containerd/v2/core/content"
|
||||
"github.com/containerd/containerd/v2/core/images"
|
||||
"github.com/containerd/containerd/v2/pkg/labels"
|
||||
"github.com/containerd/errdefs"
|
||||
"github.com/containerd/log"
|
||||
"github.com/containerd/platforms"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"golang.org/x/sync/semaphore"
|
||||
|
||||
"github.com/containerd/containerd/content"
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
"github.com/containerd/containerd/images"
|
||||
"github.com/containerd/containerd/labels"
|
||||
)
|
||||
|
||||
type refKeyPrefix struct{}
|
||||
@ -72,17 +71,17 @@ func MakeRefKey(ctx context.Context, desc ocispec.Descriptor) string {
|
||||
}
|
||||
}
|
||||
|
||||
switch mt := desc.MediaType; {
|
||||
case mt == images.MediaTypeDockerSchema2Manifest || mt == ocispec.MediaTypeImageManifest:
|
||||
switch {
|
||||
case images.IsManifestType(desc.MediaType):
|
||||
return "manifest-" + key
|
||||
case mt == images.MediaTypeDockerSchema2ManifestList || mt == ocispec.MediaTypeImageIndex:
|
||||
case images.IsIndexType(desc.MediaType):
|
||||
return "index-" + key
|
||||
case images.IsLayerType(mt):
|
||||
case images.IsLayerType(desc.MediaType):
|
||||
return "layer-" + key
|
||||
case images.IsKnownConfig(mt):
|
||||
case images.IsKnownConfig(desc.MediaType):
|
||||
return "config-" + key
|
||||
default:
|
||||
log.G(ctx).Warnf("reference for unknown type: %s", mt)
|
||||
log.G(ctx).Warnf("reference for unknown type: %s", desc.MediaType)
|
||||
return "unknown-" + key
|
||||
}
|
||||
}
|
||||
@ -91,23 +90,21 @@ func MakeRefKey(ctx context.Context, desc ocispec.Descriptor) string {
|
||||
// discovered in a call to Dispatch. Use with ChildrenHandler to do a full
|
||||
// recursive fetch.
|
||||
func FetchHandler(ingester content.Ingester, fetcher Fetcher) images.HandlerFunc {
|
||||
return func(ctx context.Context, desc ocispec.Descriptor) (subdescs []ocispec.Descriptor, err error) {
|
||||
return func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
|
||||
ctx = log.WithLogger(ctx, log.G(ctx).WithFields(log.Fields{
|
||||
"digest": desc.Digest,
|
||||
"mediatype": desc.MediaType,
|
||||
"size": desc.Size,
|
||||
}))
|
||||
|
||||
switch desc.MediaType {
|
||||
case images.MediaTypeDockerSchema1Manifest:
|
||||
if desc.MediaType == images.MediaTypeDockerSchema1Manifest {
|
||||
return nil, fmt.Errorf("%v not supported", desc.MediaType)
|
||||
default:
|
||||
err := Fetch(ctx, ingester, fetcher, desc)
|
||||
if errdefs.IsAlreadyExists(err) {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
err := Fetch(ctx, ingester, fetcher, desc)
|
||||
if errdefs.IsAlreadyExists(err) {
|
||||
return nil, nil
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
@ -215,20 +212,18 @@ func PushContent(ctx context.Context, pusher Pusher, desc ocispec.Descriptor, st
|
||||
indexStack := []ocispec.Descriptor{}
|
||||
|
||||
filterHandler := images.HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
|
||||
switch desc.MediaType {
|
||||
case images.MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest:
|
||||
if images.IsManifestType(desc.MediaType) {
|
||||
m.Lock()
|
||||
manifests = append(manifests, desc)
|
||||
m.Unlock()
|
||||
return nil, images.ErrStopHandler
|
||||
case images.MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageIndex:
|
||||
} else if images.IsIndexType(desc.MediaType) {
|
||||
m.Lock()
|
||||
indexStack = append(indexStack, desc)
|
||||
m.Unlock()
|
||||
return nil, images.ErrStopHandler
|
||||
default:
|
||||
return nil, nil
|
||||
}
|
||||
return nil, nil
|
||||
})
|
||||
|
||||
pushHandler := PushHandler(pusher, store)
|
||||
@ -285,10 +280,6 @@ func SkipNonDistributableBlobs(f images.HandlerFunc) images.HandlerFunc {
|
||||
return nil, images.ErrSkipDesc
|
||||
}
|
||||
|
||||
if images.IsLayerType(desc.MediaType) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
children, err := f(ctx, desc)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -323,24 +314,16 @@ func FilterManifestByPlatformHandler(f images.HandlerFunc, m platforms.Matcher)
|
||||
return children, nil
|
||||
}
|
||||
|
||||
var descs []ocispec.Descriptor
|
||||
switch desc.MediaType {
|
||||
case images.MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest:
|
||||
if m.Match(*desc.Platform) {
|
||||
descs = children
|
||||
} else {
|
||||
for _, child := range children {
|
||||
if child.MediaType == images.MediaTypeDockerSchema2Config ||
|
||||
child.MediaType == ocispec.MediaTypeImageConfig {
|
||||
|
||||
descs = append(descs, child)
|
||||
}
|
||||
if images.IsManifestType(desc.MediaType) && !m.Match(*desc.Platform) {
|
||||
var descs []ocispec.Descriptor
|
||||
for _, child := range children {
|
||||
if images.IsConfigType(child.MediaType) {
|
||||
descs = append(descs, child)
|
||||
}
|
||||
}
|
||||
default:
|
||||
descs = children
|
||||
return descs, nil
|
||||
}
|
||||
return descs, nil
|
||||
return children, nil
|
||||
}
|
||||
}
|
||||
|
||||
@ -355,10 +338,7 @@ func annotateDistributionSourceHandler(f images.HandlerFunc, provider content.In
|
||||
|
||||
// Distribution source is only used for config or blob but may be inherited from
|
||||
// a manifest or manifest list
|
||||
switch desc.MediaType {
|
||||
case images.MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest,
|
||||
images.MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageIndex:
|
||||
default:
|
||||
if !images.IsManifestType(desc.MediaType) && !images.IsIndexType(desc.MediaType) {
|
||||
return children, nil
|
||||
}
|
||||
|
@ -20,7 +20,7 @@ import (
|
||||
"context"
|
||||
"io"
|
||||
|
||||
"github.com/containerd/containerd/content"
|
||||
"github.com/containerd/containerd/v2/core/content"
|
||||
"github.com/opencontainers/go-digest"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
)
|
||||
@ -65,7 +65,7 @@ type FetcherByDigest interface {
|
||||
// FetcherByDigest usually returns an incomplete descriptor.
|
||||
// Typically, the media type is always set to "application/octet-stream",
|
||||
// and the annotations are unset.
|
||||
FetchByDigest(ctx context.Context, dgst digest.Digest) (io.ReadCloser, ocispec.Descriptor, error)
|
||||
FetchByDigest(ctx context.Context, dgst digest.Digest, opts ...FetchByDigestOpts) (io.ReadCloser, ocispec.Descriptor, error)
|
||||
}
|
||||
|
||||
// Pusher pushes content
|
||||
@ -92,3 +92,20 @@ type PusherFunc func(ctx context.Context, desc ocispec.Descriptor) (content.Writ
|
||||
func (fn PusherFunc) Push(ctx context.Context, desc ocispec.Descriptor) (content.Writer, error) {
|
||||
return fn(ctx, desc)
|
||||
}
|
||||
|
||||
// FetchByDigestConfig provides configuration for fetching content by digest
|
||||
type FetchByDigestConfig struct {
|
||||
//Mediatype specifies mediatype header to append for fetch request
|
||||
Mediatype string
|
||||
}
|
||||
|
||||
// FetchByDigestOpts allows callers to set options for fetch object
|
||||
type FetchByDigestOpts func(context.Context, *FetchByDigestConfig) error
|
||||
|
||||
// WithMediaType sets the media type header for fetch request
|
||||
func WithMediaType(mediatype string) FetchByDigestOpts {
|
||||
return func(ctx context.Context, cfg *FetchByDigestConfig) error {
|
||||
cfg.Mediatype = mediatype
|
||||
return nil
|
||||
}
|
||||
}
|
@ -1,5 +1,3 @@
|
||||
//go:build !windows
|
||||
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
@ -16,15 +14,10 @@
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package platforms
|
||||
package defaults
|
||||
|
||||
import (
|
||||
specs "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
const (
|
||||
// DefaultDiffer will set the default differ for the platform.
|
||||
// This differ should be compatible with the windows snapshotter.
|
||||
DefaultDiffer = "windows"
|
||||
)
|
||||
|
||||
// NewMatcher returns the default Matcher for containerd
|
||||
func newDefaultMatcher(platform specs.Platform) Matcher {
|
||||
return &matcher{
|
||||
Platform: Normalize(platform),
|
||||
}
|
||||
}
|
@ -1,5 +1,3 @@
|
||||
//go:build !linux
|
||||
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
@ -16,13 +14,9 @@
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package seed
|
||||
package defaults
|
||||
|
||||
import (
|
||||
"crypto/rand"
|
||||
"io"
|
||||
const (
|
||||
// DefaultRuntime is the default freebsd runtime
|
||||
DefaultRuntime = "wtf.sbk.runj.v1"
|
||||
)
|
||||
|
||||
func tryReadRandom(p []byte) {
|
||||
io.ReadFull(rand.Reader, p)
|
||||
}
|
@ -14,11 +14,9 @@
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package seed
|
||||
package defaults
|
||||
|
||||
import "golang.org/x/sys/unix"
|
||||
|
||||
func tryReadRandom(p []byte) {
|
||||
// Ignore errors, just decreases uniqueness of seed
|
||||
unix.Getrandom(p, unix.GRND_NONBLOCK)
|
||||
}
|
||||
const (
|
||||
// DefaultRuntime is the default linux runtime
|
||||
DefaultRuntime = "io.containerd.runc.v2"
|
||||
)
|
24
vendor/github.com/containerd/containerd/v2/defaults/defaults_snapshotter_linux.go
generated
vendored
Normal file
24
vendor/github.com/containerd/containerd/v2/defaults/defaults_snapshotter_linux.go
generated
vendored
Normal file
@ -0,0 +1,24 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package defaults
|
||||
|
||||
const (
|
||||
// DefaultSnapshotter will set the default snapshotter for the platform.
|
||||
// This will be based on the client compilation target, so take that into
|
||||
// account when choosing this value.
|
||||
DefaultSnapshotter = "overlayfs"
|
||||
)
|
26
vendor/github.com/containerd/containerd/v2/defaults/defaults_snapshotter_unix.go
generated
vendored
Normal file
26
vendor/github.com/containerd/containerd/v2/defaults/defaults_snapshotter_unix.go
generated
vendored
Normal file
@ -0,0 +1,26 @@
|
||||
//go:build darwin || freebsd || solaris
|
||||
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package defaults
|
||||
|
||||
const (
|
||||
// DefaultSnapshotter will set the default snapshotter for the platform.
|
||||
// This will be based on the client compilation target, so take that into
|
||||
// account when choosing this value.
|
||||
DefaultSnapshotter = "native"
|
||||
)
|
24
vendor/github.com/containerd/containerd/v2/defaults/defaults_snapshotter_windows.go
generated
vendored
Normal file
24
vendor/github.com/containerd/containerd/v2/defaults/defaults_snapshotter_windows.go
generated
vendored
Normal file
@ -0,0 +1,24 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package defaults
|
||||
|
||||
const (
|
||||
// DefaultSnapshotter will set the default snapshotter for the platform.
|
||||
// This will be based on the client compilation target, so take that into
|
||||
// account when choosing this value.
|
||||
DefaultSnapshotter = "windows"
|
||||
)
|
@ -32,8 +32,6 @@ const (
|
||||
// DefaultFIFODir is the default location used by client-side cio library
|
||||
// to store FIFOs.
|
||||
DefaultFIFODir = "/run/containerd/fifo"
|
||||
// DefaultRuntime is the default linux runtime
|
||||
DefaultRuntime = "io.containerd.runc.v2"
|
||||
// DefaultConfigDir is the default location for config files.
|
||||
DefaultConfigDir = "/etc/containerd"
|
||||
)
|
130
vendor/github.com/containerd/containerd/v2/internal/fsverity/fsverity_linux.go
generated
vendored
Normal file
130
vendor/github.com/containerd/containerd/v2/internal/fsverity/fsverity_linux.go
generated
vendored
Normal file
@ -0,0 +1,130 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package fsverity
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
|
||||
"github.com/containerd/containerd/v2/pkg/kernelversion"
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
type fsverityEnableArg struct {
|
||||
version uint32
|
||||
hashAlgorithm uint32
|
||||
blockSize uint32
|
||||
saltSize uint32
|
||||
saltPtr uint64
|
||||
sigSize uint32
|
||||
reserved1 uint32
|
||||
sigPtr uint64
|
||||
reserved2 [11]uint64
|
||||
}
|
||||
|
||||
const (
|
||||
defaultBlockSize int = 4096
|
||||
maxDigestSize uint16 = 64
|
||||
)
|
||||
|
||||
func IsSupported(rootPath string) (bool, error) {
|
||||
minKernelVersion := kernelversion.KernelVersion{Kernel: 5, Major: 4}
|
||||
s, err := kernelversion.GreaterEqualThan(minKernelVersion)
|
||||
if err != nil {
|
||||
return s, err
|
||||
}
|
||||
|
||||
integrityDir, err := os.MkdirTemp(rootPath, ".fsverity-check-*")
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
defer os.RemoveAll(integrityDir)
|
||||
|
||||
digestPath := filepath.Join(integrityDir, "supported")
|
||||
digestFile, err := os.Create(digestPath)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
digestFile.Close()
|
||||
|
||||
eerr := Enable(digestPath)
|
||||
if eerr != nil {
|
||||
return false, eerr
|
||||
}
|
||||
|
||||
return true, nil
|
||||
}
|
||||
|
||||
func IsEnabled(path string) (bool, error) {
|
||||
f, err := os.Open(path)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
var attr int32
|
||||
|
||||
_, _, flagErr := unix.Syscall(syscall.SYS_IOCTL, f.Fd(), uintptr(unix.FS_IOC_GETFLAGS), uintptr(unsafe.Pointer(&attr)))
|
||||
if flagErr != 0 {
|
||||
return false, fmt.Errorf("error getting inode flags: %w", flagErr)
|
||||
}
|
||||
|
||||
if attr&unix.FS_VERITY_FL == unix.FS_VERITY_FL {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
func Enable(path string) error {
|
||||
f, err := os.Open(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var args = &fsverityEnableArg{}
|
||||
args.version = 1
|
||||
args.hashAlgorithm = 1
|
||||
|
||||
// fsverity block size should be the minimum between the page size
|
||||
// and the file system block size
|
||||
// If neither value is retrieved successfully, set fsverity block size to the default value
|
||||
blockSize := unix.Getpagesize()
|
||||
|
||||
s := unix.Stat_t{}
|
||||
serr := unix.Stat(path, &s)
|
||||
if serr == nil && int(s.Blksize) < blockSize {
|
||||
blockSize = int(s.Blksize)
|
||||
}
|
||||
|
||||
if blockSize <= 0 {
|
||||
blockSize = defaultBlockSize
|
||||
}
|
||||
|
||||
args.blockSize = uint32(blockSize)
|
||||
|
||||
_, _, errno := unix.Syscall(syscall.SYS_IOCTL, f.Fd(), uintptr(unix.FS_IOC_ENABLE_VERITY), uintptr(unsafe.Pointer(args)))
|
||||
if errno != 0 {
|
||||
return fmt.Errorf("enable fsverity failed: %w", errno)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
33
vendor/github.com/containerd/containerd/v2/internal/fsverity/fsverity_other.go
generated
vendored
Normal file
33
vendor/github.com/containerd/containerd/v2/internal/fsverity/fsverity_other.go
generated
vendored
Normal file
@ -0,0 +1,33 @@
|
||||
//go:build !linux
|
||||
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package fsverity
|
||||
|
||||
import "fmt"
|
||||
|
||||
func IsSupported(rootPath string) (bool, error) {
|
||||
return false, fmt.Errorf("fsverity is only supported on Linux systems")
|
||||
}
|
||||
|
||||
func IsEnabled(path string) (bool, error) {
|
||||
return false, fmt.Errorf("fsverity is only supported on Linux systems")
|
||||
}
|
||||
|
||||
func Enable(_ string) error {
|
||||
return fmt.Errorf("fsverity is only supported on Linux systems")
|
||||
}
|
@ -45,13 +45,18 @@ const (
|
||||
Gzip
|
||||
// Zstd is zstd compression algorithm.
|
||||
Zstd
|
||||
// Unknown is used when a plugin handles the algorithm.
|
||||
Unknown
|
||||
)
|
||||
|
||||
const disablePigzEnv = "CONTAINERD_DISABLE_PIGZ"
|
||||
const (
|
||||
disablePigzEnv = "CONTAINERD_DISABLE_PIGZ"
|
||||
disableIgzipEnv = "CONTAINERD_DISABLE_IGZIP"
|
||||
)
|
||||
|
||||
var (
|
||||
initPigz sync.Once
|
||||
unpigzPath string
|
||||
initGzip sync.Once
|
||||
gzipPath string
|
||||
)
|
||||
|
||||
var (
|
||||
@ -146,7 +151,7 @@ func magicNumberMatcher(m []byte) matcher {
|
||||
|
||||
// zstdMatcher detects zstd compression algorithm.
|
||||
// There are two frame formats defined by Zstandard: Zstandard frames and Skippable frames.
|
||||
// See https://tools.ietf.org/id/draft-kucherawy-dispatch-zstd-00.html#rfc.section.2 for more details.
|
||||
// See https://datatracker.ietf.org/doc/html/rfc8878#section-3 for more details.
|
||||
func zstdMatcher() matcher {
|
||||
return func(source []byte) bool {
|
||||
if bytes.HasPrefix(source, zstdMagic) {
|
||||
@ -254,22 +259,27 @@ func (compression *Compression) Extension() string {
|
||||
return "gz"
|
||||
case Zstd:
|
||||
return "zst"
|
||||
case Unknown:
|
||||
return "unknown"
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func gzipDecompress(ctx context.Context, buf io.Reader) (io.ReadCloser, error) {
|
||||
initPigz.Do(func() {
|
||||
if unpigzPath = detectPigz(); unpigzPath != "" {
|
||||
log.L.Debug("using pigz for decompression")
|
||||
initGzip.Do(func() {
|
||||
if gzipPath = detectCommand("igzip", disableIgzipEnv); gzipPath != "" {
|
||||
log.L.Debug("using igzip for decompression")
|
||||
return
|
||||
}
|
||||
if gzipPath = detectCommand("unpigz", disablePigzEnv); gzipPath != "" {
|
||||
log.L.Debug("using unpigz for decompression")
|
||||
}
|
||||
})
|
||||
|
||||
if unpigzPath == "" {
|
||||
if gzipPath == "" {
|
||||
return gzip.NewReader(buf)
|
||||
}
|
||||
|
||||
return cmdStream(exec.CommandContext(ctx, unpigzPath, "-d", "-c"), buf)
|
||||
return cmdStream(exec.CommandContext(ctx, gzipPath, "-d", "-c"), buf)
|
||||
}
|
||||
|
||||
func cmdStream(cmd *exec.Cmd, in io.Reader) (io.ReadCloser, error) {
|
||||
@ -296,26 +306,23 @@ func cmdStream(cmd *exec.Cmd, in io.Reader) (io.ReadCloser, error) {
|
||||
return reader, nil
|
||||
}
|
||||
|
||||
func detectPigz() string {
|
||||
path, err := exec.LookPath("unpigz")
|
||||
func detectCommand(path, disableEnvName string) string {
|
||||
// Check if this command is disabled via the env variable
|
||||
value := os.Getenv(disableEnvName)
|
||||
if value != "" {
|
||||
disable, err := strconv.ParseBool(value)
|
||||
if err != nil {
|
||||
log.L.WithError(err).Warnf("could not parse %s: %s", disableEnvName, value)
|
||||
}
|
||||
|
||||
if disable {
|
||||
return ""
|
||||
}
|
||||
}
|
||||
|
||||
path, err := exec.LookPath(path)
|
||||
if err != nil {
|
||||
log.L.WithError(err).Debug("unpigz not found, falling back to go gzip")
|
||||
return ""
|
||||
}
|
||||
|
||||
// Check if pigz disabled via CONTAINERD_DISABLE_PIGZ env variable
|
||||
value := os.Getenv(disablePigzEnv)
|
||||
if value == "" {
|
||||
return path
|
||||
}
|
||||
|
||||
disable, err := strconv.ParseBool(value)
|
||||
if err != nil {
|
||||
log.L.WithError(err).Warnf("could not parse %s: %s", disablePigzEnv, value)
|
||||
return path
|
||||
}
|
||||
|
||||
if disable {
|
||||
log.L.WithError(err).Debugf("%s not found", path)
|
||||
return ""
|
||||
}
|
||||
|
72
vendor/github.com/containerd/containerd/v2/pkg/deprecation/deprecation.go
generated
vendored
Normal file
72
vendor/github.com/containerd/containerd/v2/pkg/deprecation/deprecation.go
generated
vendored
Normal file
@ -0,0 +1,72 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package deprecation
|
||||
|
||||
type Warning string
|
||||
|
||||
const (
|
||||
// Prefix is a standard prefix for all Warnings, used for filtering plugin Exports
|
||||
Prefix = "io.containerd.deprecation/"
|
||||
// PullSchema1Image is a warning for the use of schema 1 images
|
||||
PullSchema1Image Warning = Prefix + "pull-schema-1-image"
|
||||
// GoPluginLibrary is a warning for the use of dynamic library Go plugins
|
||||
GoPluginLibrary Warning = Prefix + "go-plugin-library"
|
||||
// CRIRegistryMirrors is a warning for the use of the `mirrors` property
|
||||
CRIRegistryMirrors Warning = Prefix + "cri-registry-mirrors"
|
||||
// CRIRegistryAuths is a warning for the use of the `auths` property
|
||||
CRIRegistryAuths Warning = Prefix + "cri-registry-auths"
|
||||
// CRIRegistryConfigs is a warning for the use of the `configs` property
|
||||
CRIRegistryConfigs Warning = Prefix + "cri-registry-configs"
|
||||
// OTLPTracingConfig is a warning for the use of the `otlp` property
|
||||
TracingOTLPConfig Warning = Prefix + "tracing-processor-config"
|
||||
// TracingServiceConfig is a warning for the use of the `tracing` property
|
||||
TracingServiceConfig Warning = Prefix + "tracing-service-config"
|
||||
)
|
||||
|
||||
const (
|
||||
EnvPrefix = "CONTAINERD_ENABLE_DEPRECATED_"
|
||||
EnvPullSchema1Image = EnvPrefix + "PULL_SCHEMA_1_IMAGE"
|
||||
)
|
||||
|
||||
var messages = map[Warning]string{
|
||||
PullSchema1Image: "Schema 1 images are deprecated since containerd v1.7, disabled in containerd v2.0, and will be removed in containerd v2.1. " +
|
||||
`Since containerd v1.7.8, schema 1 images are identified by the "io.containerd.image/converted-docker-schema1" label.`,
|
||||
GoPluginLibrary: "Dynamically-linked Go plugins as containerd runtimes are deprecated since containerd v2.0 and removed in containerd v2.1.",
|
||||
CRIRegistryMirrors: "The `mirrors` property of `[plugins.\"io.containerd.grpc.v1.cri\".registry]` is deprecated since containerd v1.5 and will be removed in containerd v2.1." +
|
||||
"Use `config_path` instead.",
|
||||
CRIRegistryAuths: "The `auths` property of `[plugins.\"io.containerd.grpc.v1.cri\".registry]` is deprecated since containerd v1.3 and will be removed in containerd v2.1." +
|
||||
"Use `ImagePullSecrets` instead.",
|
||||
CRIRegistryConfigs: "The `configs` property of `[plugins.\"io.containerd.grpc.v1.cri\".registry]` is deprecated since containerd v1.5 and will be removed in containerd v2.1." +
|
||||
"Use `config_path` instead.",
|
||||
|
||||
TracingOTLPConfig: "The `otlp` property of `[plugins.\"io.containerd.tracing.processor.v1\".otlp]` is deprecated since containerd v1.6 and will be removed in containerd v2.0." +
|
||||
"Use OTLP environment variables instead: https://opentelemetry.io/docs/specs/otel/protocol/exporter/",
|
||||
TracingServiceConfig: "The `tracing` property of `[plugins.\"io.containerd.internal.v1\".tracing]` is deprecated since containerd v1.6 and will be removed in containerd v2.0." +
|
||||
"Use OTEL environment variables instead: https://opentelemetry.io/docs/specs/otel/configuration/sdk-environment-variables/",
|
||||
}
|
||||
|
||||
// Valid checks whether a given Warning is valid
|
||||
func Valid(id Warning) bool {
|
||||
_, ok := messages[id]
|
||||
return ok
|
||||
}
|
||||
|
||||
// Message returns the human-readable message for a given Warning
|
||||
func Message(id Warning) (string, bool) {
|
||||
msg, ok := messages[id]
|
||||
return msg, ok
|
||||
}
|
@ -36,10 +36,6 @@ func ContextDialer(ctx context.Context, address string) (net.Conn, error) {
|
||||
return timeoutDialer(address, 0)
|
||||
}
|
||||
|
||||
// Dialer returns a GRPC net.Conn connected to the provided address
|
||||
// Deprecated: use ContextDialer and grpc.WithContextDialer.
|
||||
var Dialer = timeoutDialer
|
||||
|
||||
func timeoutDialer(address string, timeout time.Duration) (net.Conn, error) {
|
||||
var (
|
||||
stopC = make(chan struct{})
|
@ -19,9 +19,9 @@
|
||||
package dialer
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"net"
|
||||
"os"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
@ -34,16 +34,7 @@ func DialAddress(address string) string {
|
||||
}
|
||||
|
||||
func isNoent(err error) bool {
|
||||
if err != nil {
|
||||
if nerr, ok := err.(*net.OpError); ok {
|
||||
if serr, ok := nerr.Err.(*os.SyscallError); ok {
|
||||
if serr.Err == syscall.ENOENT {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
return errors.Is(err, syscall.ENOENT)
|
||||
}
|
||||
|
||||
func dialer(address string, timeout time.Duration) (net.Conn, error) {
|
@ -20,7 +20,7 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
"github.com/containerd/errdefs"
|
||||
)
|
||||
|
||||
/*
|
||||
@ -208,7 +208,7 @@ func (p *parser) field() (string, error) {
|
||||
case tokenQuoted:
|
||||
return p.unquote(pos, s, false)
|
||||
case tokenIllegal:
|
||||
return "", p.mkerr(pos, p.scanner.err)
|
||||
return "", p.mkerr(pos, "%s", p.scanner.err)
|
||||
}
|
||||
|
||||
return "", p.mkerr(pos, "expected field or quoted")
|
||||
@ -229,7 +229,7 @@ func (p *parser) operator() (operator, error) {
|
||||
return 0, p.mkerr(pos, "unsupported operator %q", s)
|
||||
}
|
||||
case tokenIllegal:
|
||||
return 0, p.mkerr(pos, p.scanner.err)
|
||||
return 0, p.mkerr(pos, "%s", p.scanner.err)
|
||||
}
|
||||
|
||||
return 0, p.mkerr(pos, `expected an operator ("=="|"!="|"~=")`)
|
||||
@ -244,7 +244,7 @@ func (p *parser) value(allowAltQuotes bool) (string, error) {
|
||||
case tokenQuoted:
|
||||
return p.unquote(pos, s, allowAltQuotes)
|
||||
case tokenIllegal:
|
||||
return "", p.mkerr(pos, p.scanner.err)
|
||||
return "", p.mkerr(pos, "%s", p.scanner.err)
|
||||
}
|
||||
|
||||
return "", p.mkerr(pos, "expected value or quoted")
|
@ -28,7 +28,7 @@ import (
|
||||
"fmt"
|
||||
"regexp"
|
||||
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
"github.com/containerd/errdefs"
|
||||
)
|
||||
|
||||
const (
|
94
vendor/github.com/containerd/containerd/v2/pkg/kernelversion/kernel_linux.go
generated
vendored
Normal file
94
vendor/github.com/containerd/containerd/v2/pkg/kernelversion/kernel_linux.go
generated
vendored
Normal file
@ -0,0 +1,94 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
/*
|
||||
File copied and customized based on
|
||||
https://github.com/moby/moby/tree/v20.10.14/profiles/seccomp/kernel_linux.go
|
||||
*/
|
||||
|
||||
package kernelversion
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
// KernelVersion holds information about the kernel.
|
||||
type KernelVersion struct {
|
||||
Kernel uint64 // Version of the Kernel (i.e., the "4" in "4.1.2-generic")
|
||||
Major uint64 // Major revision of the Kernel (i.e., the "1" in "4.1.2-generic")
|
||||
}
|
||||
|
||||
// String implements fmt.Stringer for KernelVersion
|
||||
func (k *KernelVersion) String() string {
|
||||
if k.Kernel > 0 || k.Major > 0 {
|
||||
return fmt.Sprintf("%d.%d", k.Kernel, k.Major)
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
var (
|
||||
currentKernelVersion *KernelVersion
|
||||
kernelVersionError error
|
||||
once sync.Once
|
||||
)
|
||||
|
||||
// getKernelVersion gets the current kernel version.
|
||||
func getKernelVersion() (*KernelVersion, error) {
|
||||
once.Do(func() {
|
||||
var uts unix.Utsname
|
||||
if err := unix.Uname(&uts); err != nil {
|
||||
return
|
||||
}
|
||||
// Remove the \x00 from the release for Atoi to parse correctly
|
||||
currentKernelVersion, kernelVersionError = parseRelease(string(uts.Release[:bytes.IndexByte(uts.Release[:], 0)]))
|
||||
})
|
||||
return currentKernelVersion, kernelVersionError
|
||||
}
|
||||
|
||||
// parseRelease parses a string and creates a KernelVersion based on it.
|
||||
func parseRelease(release string) (*KernelVersion, error) {
|
||||
var version = KernelVersion{}
|
||||
|
||||
// We're only make sure we get the "kernel" and "major revision". Sometimes we have
|
||||
// 3.12.25-gentoo, but sometimes we just have 3.12-1-amd64.
|
||||
_, err := fmt.Sscanf(release, "%d.%d", &version.Kernel, &version.Major)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to parse kernel version %q: %w", release, err)
|
||||
}
|
||||
return &version, nil
|
||||
}
|
||||
|
||||
// GreaterEqualThan checks if the host's kernel version is greater than, or
|
||||
// equal to the given kernel version v. Only "kernel version" and "major revision"
|
||||
// can be specified (e.g., "3.12") and will be taken into account, which means
|
||||
// that 3.12.25-gentoo and 3.12-1-amd64 are considered equal (kernel: 3, major: 12).
|
||||
func GreaterEqualThan(minVersion KernelVersion) (bool, error) {
|
||||
kv, err := getKernelVersion()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
if kv.Kernel > minVersion.Kernel {
|
||||
return true, nil
|
||||
}
|
||||
if kv.Kernel == minVersion.Kernel && kv.Major >= minVersion.Major {
|
||||
return true, nil
|
||||
}
|
||||
return false, nil
|
||||
}
|
@ -19,7 +19,7 @@ package labels
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
"github.com/containerd/errdefs"
|
||||
)
|
||||
|
||||
const (
|
@ -21,8 +21,8 @@ import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
"github.com/containerd/containerd/identifiers"
|
||||
"github.com/containerd/containerd/v2/pkg/identifiers"
|
||||
"github.com/containerd/errdefs"
|
||||
)
|
||||
|
||||
const (
|
@ -14,13 +14,13 @@
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Package types provides convinient aliases that make google.golang.org/protobuf migration easier.
|
||||
// Package types provides convenient aliases that make google.golang.org/protobuf migration easier.
|
||||
package types
|
||||
|
||||
import (
|
||||
"google.golang.org/genproto/protobuf/field_mask"
|
||||
"google.golang.org/protobuf/types/known/anypb"
|
||||
"google.golang.org/protobuf/types/known/emptypb"
|
||||
field_mask "google.golang.org/protobuf/types/known/fieldmaskpb"
|
||||
)
|
||||
|
||||
type Empty = emptypb.Empty
|
@ -154,26 +154,3 @@ func (r Spec) String() string {
|
||||
|
||||
return r.Locator + ":" + r.Object
|
||||
}
|
||||
|
||||
// SplitObject provides two parts of the object spec, delimited by an "@"
|
||||
// symbol. It does not perform any validation on correctness of the values
|
||||
// returned, and it's the callers' responsibility to validate the result.
|
||||
//
|
||||
// If an "@" delimiter is found, it returns the part *including* the "@"
|
||||
// delimiter as "tag", and the part after the "@" as digest.
|
||||
//
|
||||
// The example below produces "docker.io/library/ubuntu:latest@" and
|
||||
// "sha256:deadbeef";
|
||||
//
|
||||
// t, d := SplitObject("docker.io/library/ubuntu:latest@sha256:deadbeef")
|
||||
// fmt.Println(t) // docker.io/library/ubuntu:latest@
|
||||
// fmt.Println(d) // sha256:deadbeef
|
||||
//
|
||||
// Deprecated: use [Parse] and [Spec.Digest] instead.
|
||||
func SplitObject(obj string) (tag string, dgst digest.Digest) {
|
||||
if i := strings.Index(obj, "@"); i >= 0 {
|
||||
// Offset by one so preserve the "@" in the tag returned.
|
||||
return obj[:i+1], digest.Digest(obj[i+1:])
|
||||
}
|
||||
return obj, ""
|
||||
}
|
@ -19,20 +19,11 @@ package tracing
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
)
|
||||
|
||||
const (
|
||||
spanDelimiter = "."
|
||||
)
|
||||
|
||||
func makeSpanName(names ...string) string {
|
||||
return strings.Join(names, spanDelimiter)
|
||||
}
|
||||
|
||||
func any(k string, v interface{}) attribute.KeyValue {
|
||||
func keyValue(k string, v any) attribute.KeyValue {
|
||||
if v == nil {
|
||||
return attribute.String(k, "<nil>")
|
||||
}
|
@ -17,33 +17,49 @@
|
||||
package tracing
|
||||
|
||||
import (
|
||||
"github.com/sirupsen/logrus"
|
||||
"github.com/containerd/log"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/trace"
|
||||
)
|
||||
|
||||
// allLevels is the equivalent to [logrus.AllLevels].
|
||||
//
|
||||
// [logrus.AllLevels]: https://github.com/sirupsen/logrus/blob/v1.9.3/logrus.go#L80-L89
|
||||
var allLevels = []log.Level{
|
||||
log.PanicLevel,
|
||||
log.FatalLevel,
|
||||
log.ErrorLevel,
|
||||
log.WarnLevel,
|
||||
log.InfoLevel,
|
||||
log.DebugLevel,
|
||||
log.TraceLevel,
|
||||
}
|
||||
|
||||
// NewLogrusHook creates a new logrus hook
|
||||
func NewLogrusHook() *LogrusHook {
|
||||
return &LogrusHook{}
|
||||
}
|
||||
|
||||
// LogrusHook is a logrus hook which adds logrus events to active spans.
|
||||
// If the span is not recording or the span context is invalid, the hook is a no-op.
|
||||
// LogrusHook is a [logrus.Hook] which adds logrus events to active spans.
|
||||
// If the span is not recording or the span context is invalid, the hook
|
||||
// is a no-op.
|
||||
//
|
||||
// [logrus.Hook]: https://github.com/sirupsen/logrus/blob/v1.9.3/hooks.go#L3-L11
|
||||
type LogrusHook struct{}
|
||||
|
||||
// Levels returns the logrus levels that this hook is interested in.
|
||||
func (h *LogrusHook) Levels() []logrus.Level {
|
||||
return logrus.AllLevels
|
||||
func (h *LogrusHook) Levels() []log.Level {
|
||||
return allLevels
|
||||
}
|
||||
|
||||
// Fire is called when a log event occurs.
|
||||
func (h *LogrusHook) Fire(entry *logrus.Entry) error {
|
||||
func (h *LogrusHook) Fire(entry *log.Entry) error {
|
||||
span := trace.SpanFromContext(entry.Context)
|
||||
if span == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
if !span.SpanContext().IsValid() || !span.IsRecording() {
|
||||
if !span.IsRecording() || !span.SpanContext().IsValid() {
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -57,10 +73,10 @@ func (h *LogrusHook) Fire(entry *logrus.Entry) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func logrusDataToAttrs(data logrus.Fields) []attribute.KeyValue {
|
||||
func logrusDataToAttrs(data map[string]any) []attribute.KeyValue {
|
||||
attrs := make([]attribute.KeyValue, 0, len(data))
|
||||
for k, v := range data {
|
||||
attrs = append(attrs, any(k, v))
|
||||
attrs = append(attrs, keyValue(k, v))
|
||||
}
|
||||
return attrs
|
||||
}
|
@ -19,6 +19,7 @@ package tracing
|
||||
import (
|
||||
"context"
|
||||
"net/http"
|
||||
"strings"
|
||||
|
||||
"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
|
||||
"go.opentelemetry.io/otel"
|
||||
@ -35,16 +36,11 @@ type StartConfig struct {
|
||||
|
||||
type SpanOpt func(config *StartConfig)
|
||||
|
||||
// WithHTTPRequest marks span as a HTTP request operation from client to server.
|
||||
// It'll append attributes from the HTTP request object and mark it with `SpanKindClient` type.
|
||||
//
|
||||
// Deprecated: use upstream functionality from otelhttp directly instead. This function is kept for API compatibility
|
||||
// but no longer works as expected due to required functionality no longer exported in OpenTelemetry libraries.
|
||||
func WithHTTPRequest(_ *http.Request) SpanOpt {
|
||||
// WithAttribute appends attributes to a new created span.
|
||||
func WithAttribute(k string, v interface{}) SpanOpt {
|
||||
return func(config *StartConfig) {
|
||||
config.spanOpts = append(config.spanOpts,
|
||||
trace.WithSpanKind(trace.SpanKindClient), // A client making a request to a server
|
||||
)
|
||||
trace.WithAttributes(Attribute(k, v)))
|
||||
}
|
||||
}
|
||||
|
||||
@ -92,8 +88,13 @@ func (s *Span) End() {
|
||||
}
|
||||
|
||||
// AddEvent adds an event with provided name and options.
|
||||
func (s *Span) AddEvent(name string, options ...trace.EventOption) {
|
||||
s.otelSpan.AddEvent(name, options...)
|
||||
func (s *Span) AddEvent(name string, attributes ...attribute.KeyValue) {
|
||||
s.otelSpan.AddEvent(name, trace.WithAttributes(attributes...))
|
||||
}
|
||||
|
||||
// RecordError will record err as an exception span event for this span
|
||||
func (s *Span) RecordError(err error, options ...trace.EventOption) {
|
||||
s.otelSpan.RecordError(err, options...)
|
||||
}
|
||||
|
||||
// SetStatus sets the status of the current span.
|
||||
@ -112,14 +113,16 @@ func (s *Span) SetAttributes(kv ...attribute.KeyValue) {
|
||||
s.otelSpan.SetAttributes(kv...)
|
||||
}
|
||||
|
||||
const spanDelimiter = "."
|
||||
|
||||
// Name sets the span name by joining a list of strings in dot separated format.
|
||||
func Name(names ...string) string {
|
||||
return makeSpanName(names...)
|
||||
return strings.Join(names, spanDelimiter)
|
||||
}
|
||||
|
||||
// Attribute takes a key value pair and returns attribute.KeyValue type.
|
||||
func Attribute(k string, v interface{}) attribute.KeyValue {
|
||||
return any(k, v)
|
||||
func Attribute(k string, v any) attribute.KeyValue {
|
||||
return keyValue(k, v)
|
||||
}
|
||||
|
||||
// HTTPStatusCodeAttributes generates attributes of the HTTP namespace as specified by the OpenTelemetry
|
@ -28,7 +28,7 @@ import (
|
||||
|
||||
"github.com/opencontainers/go-digest"
|
||||
|
||||
"github.com/containerd/containerd/content"
|
||||
"github.com/containerd/containerd/v2/core/content"
|
||||
)
|
||||
|
||||
func FuzzContentStoreWriter(data []byte) int {
|
@ -18,10 +18,9 @@ package local
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
"github.com/containerd/errdefs"
|
||||
)
|
||||
|
||||
// Handles locking references
|
||||
@ -30,17 +29,11 @@ type lock struct {
|
||||
since time.Time
|
||||
}
|
||||
|
||||
var (
|
||||
// locks lets us lock in process
|
||||
locks = make(map[string]*lock)
|
||||
locksMu sync.Mutex
|
||||
)
|
||||
func (s *store) tryLock(ref string) error {
|
||||
s.locksMu.Lock()
|
||||
defer s.locksMu.Unlock()
|
||||
|
||||
func tryLock(ref string) error {
|
||||
locksMu.Lock()
|
||||
defer locksMu.Unlock()
|
||||
|
||||
if v, ok := locks[ref]; ok {
|
||||
if v, ok := s.locks[ref]; ok {
|
||||
// Returning the duration may help developers distinguish dead locks (long duration) from
|
||||
// lock contentions (short duration).
|
||||
now := time.Now()
|
||||
@ -50,13 +43,13 @@ func tryLock(ref string) error {
|
||||
)
|
||||
}
|
||||
|
||||
locks[ref] = &lock{time.Now()}
|
||||
s.locks[ref] = &lock{time.Now()}
|
||||
return nil
|
||||
}
|
||||
|
||||
func unlock(ref string) {
|
||||
locksMu.Lock()
|
||||
defer locksMu.Unlock()
|
||||
func (s *store) unlock(ref string) {
|
||||
s.locksMu.Lock()
|
||||
defer s.locksMu.Unlock()
|
||||
|
||||
delete(locks, ref)
|
||||
delete(s.locks, ref)
|
||||
}
|
@ -21,8 +21,8 @@ import (
|
||||
"io"
|
||||
"os"
|
||||
|
||||
"github.com/containerd/containerd/content"
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
"github.com/containerd/containerd/v2/core/content"
|
||||
"github.com/containerd/errdefs"
|
||||
)
|
||||
|
||||
// readerat implements io.ReaderAt in a completely stateless manner by opening
|
@ -27,13 +27,12 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/containerd/errdefs"
|
||||
"github.com/containerd/log"
|
||||
"github.com/sirupsen/logrus"
|
||||
|
||||
"github.com/containerd/containerd/content"
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
"github.com/containerd/containerd/filters"
|
||||
"github.com/containerd/containerd/pkg/randutil"
|
||||
"github.com/containerd/containerd/v2/core/content"
|
||||
"github.com/containerd/containerd/v2/internal/fsverity"
|
||||
"github.com/containerd/containerd/v2/pkg/filters"
|
||||
|
||||
"github.com/opencontainers/go-digest"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
@ -65,9 +64,12 @@ type LabelStore interface {
|
||||
// Store can generally support multi-reader, single-writer ingest of data,
|
||||
// including resumable ingest.
|
||||
type store struct {
|
||||
root string
|
||||
ls LabelStore
|
||||
root string
|
||||
ls LabelStore
|
||||
integritySupported bool
|
||||
|
||||
locksMu sync.Mutex
|
||||
locks map[string]*lock
|
||||
ensureIngestRootOnce func() error
|
||||
}
|
||||
|
||||
@ -82,11 +84,14 @@ func NewStore(root string) (content.Store, error) {
|
||||
// require labels and should use `NewStore`. `NewLabeledStore` is primarily
|
||||
// useful for tests or standalone implementations.
|
||||
func NewLabeledStore(root string, ls LabelStore) (content.Store, error) {
|
||||
s := &store{
|
||||
root: root,
|
||||
ls: ls,
|
||||
}
|
||||
supported, _ := fsverity.IsSupported(root)
|
||||
|
||||
s := &store{
|
||||
root: root,
|
||||
ls: ls,
|
||||
integritySupported: supported,
|
||||
locks: map[string]*lock{},
|
||||
}
|
||||
s.ensureIngestRootOnce = sync.OnceValue(s.ensureIngestRoot)
|
||||
return s, nil
|
||||
}
|
||||
@ -300,10 +305,9 @@ func (s *store) ListStatuses(ctx context.Context, fs ...string) ([]content.Statu
|
||||
}
|
||||
return nil, err
|
||||
}
|
||||
|
||||
defer fp.Close()
|
||||
|
||||
fis, err := fp.Readdir(-1)
|
||||
fis, err := fp.Readdirnames(-1)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -315,7 +319,7 @@ func (s *store) ListStatuses(ctx context.Context, fs ...string) ([]content.Statu
|
||||
|
||||
var active []content.Status
|
||||
for _, fi := range fis {
|
||||
p := filepath.Join(s.root, "ingest", fi.Name())
|
||||
p := filepath.Join(s.root, "ingest", fi)
|
||||
stat, err := s.status(p)
|
||||
if err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
@ -353,16 +357,15 @@ func (s *store) WalkStatusRefs(ctx context.Context, fn func(string) error) error
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
defer fp.Close()
|
||||
|
||||
fis, err := fp.Readdir(-1)
|
||||
fis, err := fp.Readdirnames(-1)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
for _, fi := range fis {
|
||||
rf := filepath.Join(s.root, "ingest", fi.Name(), "ref")
|
||||
rf := filepath.Join(s.root, "ingest", fi, "ref")
|
||||
|
||||
ref, err := readFileString(rf)
|
||||
if err != nil {
|
||||
@ -469,28 +472,14 @@ func (s *store) Writer(ctx context.Context, opts ...content.WriterOpt) (content.
|
||||
if wOpts.Ref == "" {
|
||||
return nil, fmt.Errorf("ref must not be empty: %w", errdefs.ErrInvalidArgument)
|
||||
}
|
||||
var lockErr error
|
||||
for count := uint64(0); count < 10; count++ {
|
||||
if err := tryLock(wOpts.Ref); err != nil {
|
||||
if !errdefs.IsUnavailable(err) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
lockErr = err
|
||||
} else {
|
||||
lockErr = nil
|
||||
break
|
||||
}
|
||||
time.Sleep(time.Millisecond * time.Duration(randutil.Intn(1<<count)))
|
||||
}
|
||||
|
||||
if lockErr != nil {
|
||||
return nil, lockErr
|
||||
if err := s.tryLock(wOpts.Ref); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
w, err := s.writer(ctx, wOpts.Ref, wOpts.Desc.Size, wOpts.Desc.Digest)
|
||||
if err != nil {
|
||||
unlock(wOpts.Ref)
|
||||
s.unlock(wOpts.Ref)
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@ -570,7 +559,7 @@ func (s *store) writer(ctx context.Context, ref string, total int64, expected di
|
||||
total = status.Total
|
||||
offset = status.Offset
|
||||
} else {
|
||||
logrus.Infof("failed to resume the status from path %s: %s. will recreate them", path, err.Error())
|
||||
log.G(ctx).Infof("failed to resume the status from path %s: %s. will recreate them", path, err.Error())
|
||||
}
|
||||
}
|
||||
|
@ -20,7 +20,7 @@ import (
|
||||
"context"
|
||||
"testing"
|
||||
|
||||
"github.com/containerd/containerd/content"
|
||||
"github.com/containerd/containerd/v2/core/content"
|
||||
)
|
||||
|
||||
func contentStoreEnv(t testing.TB) (context.Context, string, content.Store, func()) {
|
@ -26,11 +26,12 @@ import (
|
||||
"runtime"
|
||||
"time"
|
||||
|
||||
"github.com/containerd/errdefs"
|
||||
"github.com/containerd/log"
|
||||
"github.com/opencontainers/go-digest"
|
||||
|
||||
"github.com/containerd/containerd/content"
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
"github.com/containerd/containerd/v2/core/content"
|
||||
"github.com/containerd/containerd/v2/internal/fsverity"
|
||||
)
|
||||
|
||||
// writer represents a write transaction against the blob store.
|
||||
@ -77,7 +78,7 @@ func (w *writer) Write(p []byte) (n int, err error) {
|
||||
|
||||
func (w *writer) Commit(ctx context.Context, size int64, expected digest.Digest, opts ...content.Opt) error {
|
||||
// Ensure even on error the writer is fully closed
|
||||
defer unlock(w.ref)
|
||||
defer w.s.unlock(w.ref)
|
||||
|
||||
var base content.Info
|
||||
for _, opt := range opts {
|
||||
@ -138,6 +139,14 @@ func (w *writer) Commit(ctx context.Context, size int64, expected digest.Digest,
|
||||
return err
|
||||
}
|
||||
|
||||
// Enable content blob integrity verification if supported
|
||||
|
||||
if w.s.integritySupported {
|
||||
if err := fsverity.Enable(target); err != nil {
|
||||
log.G(ctx).Warnf("failed to enable integrity for blob %v: %s", target, err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
// Ingest has now been made available in the content store, attempt to complete
|
||||
// setting metadata but errors should only be logged and not returned since
|
||||
// the content store cannot be cleanly rolled back.
|
||||
@ -189,7 +198,7 @@ func (w *writer) Close() (err error) {
|
||||
err = w.fp.Close()
|
||||
writeTimestampFile(filepath.Join(w.path, "updatedat"), w.updatedAt)
|
||||
w.fp = nil
|
||||
unlock(w.ref)
|
||||
w.s.unlock(w.ref)
|
||||
return
|
||||
}
|
||||
|
||||
@ -207,3 +216,11 @@ func (w *writer) Truncate(size int64) error {
|
||||
}
|
||||
return w.fp.Truncate(0)
|
||||
}
|
||||
|
||||
func (w *writer) Sync() error {
|
||||
if w.fp != nil {
|
||||
return w.fp.Sync()
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user