vendor: update buildkit

Signed-off-by: CrazyMax <crazy-max@users.noreply.github.com>
This commit is contained in:
CrazyMax
2023-02-10 18:19:57 +01:00
parent b1949b7388
commit 8311b0963a
433 changed files with 34791 additions and 13411 deletions

View File

@@ -25,6 +25,26 @@ import (
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
)
// Store combines the methods of content-oriented interfaces into a set that
// are commonly provided by complete implementations.
//
// Overall content lifecycle:
// - Ingester is used to initiate a write operation (aka ingestion)
// - IngestManager is used to manage (e.g. list, abort) active ingestions
// - Once an ingestion is complete (see Writer.Commit), Provider is used to
// query a single piece of content by its digest
// - Manager is used to manage (e.g. list, delete) previously committed content
//
// Note that until ingestion is complete, its content is not visible through
// Provider or Manager. Once ingestion is complete, it is no longer exposed
// through IngestManager.
type Store interface {
Manager
Provider
IngestManager
Ingester
}
// ReaderAt extends the standard io.ReaderAt interface with reporting of Size and io.Closer
type ReaderAt interface {
io.ReaderAt
@@ -42,10 +62,30 @@ type Provider interface {
// Ingester writes content
type Ingester interface {
// Some implementations require WithRef to be included in opts.
// Writer initiates a writing operation (aka ingestion). A single ingestion
// is uniquely identified by its ref, provided using a WithRef option.
// Writer can be called multiple times with the same ref to access the same
// ingestion.
// Once all the data is written, use Writer.Commit to complete the ingestion.
Writer(ctx context.Context, opts ...WriterOpt) (Writer, error)
}
// IngestManager provides methods for managing ingestions. An ingestion is a
// not-yet-complete writing operation initiated using Ingester and identified
// by a ref string.
type IngestManager interface {
// Status returns the status of the provided ref.
Status(ctx context.Context, ref string) (Status, error)
// ListStatuses returns the status of any active ingestions whose ref match
// the provided regular expression. If empty, all active ingestions will be
// returned.
ListStatuses(ctx context.Context, filters ...string) ([]Status, error)
// Abort completely cancels the ingest operation targeted by ref.
Abort(ctx context.Context, ref string) error
}
// Info holds content specific information
//
// TODO(stevvooe): Consider a very different name for this struct. Info is way
@@ -58,7 +98,7 @@ type Info struct {
Labels map[string]string
}
// Status of a content operation
// Status of a content operation (i.e. an ingestion)
type Status struct {
Ref string
Offset int64
@@ -94,21 +134,7 @@ type Manager interface {
Delete(ctx context.Context, dgst digest.Digest) error
}
// IngestManager provides methods for managing ingests.
type IngestManager interface {
// Status returns the status of the provided ref.
Status(ctx context.Context, ref string) (Status, error)
// ListStatuses returns the status of any active ingestions whose ref match the
// provided regular expression. If empty, all active ingestions will be
// returned.
ListStatuses(ctx context.Context, filters ...string) ([]Status, error)
// Abort completely cancels the ingest operation targeted by ref.
Abort(ctx context.Context, ref string) error
}
// Writer handles the write of content into a content store
// Writer handles writing of content into a content store
type Writer interface {
// Close closes the writer, if the writer has not been
// committed this allows resuming or aborting.
@@ -131,15 +157,6 @@ type Writer interface {
Truncate(size int64) error
}
// Store combines the methods of content-oriented interfaces into a set that
// are commonly provided by complete implementations.
type Store interface {
Manager
Provider
IngestManager
Ingester
}
// Opt is used to alter the mutable properties of content
type Opt func(*Info) error

View File

@@ -43,10 +43,16 @@ var bufPool = sync.Pool{
},
}
type reader interface {
Reader() io.Reader
}
// NewReader returns a io.Reader from a ReaderAt
func NewReader(ra ReaderAt) io.Reader {
rd := io.NewSectionReader(ra, 0, ra.Size())
return rd
if rd, ok := ra.(reader); ok {
return rd.Reader()
}
return io.NewSectionReader(ra, 0, ra.Size())
}
// ReadBlob retrieves the entire contents of the blob from the provider.

View File

@@ -0,0 +1,76 @@
//go:build gofuzz
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package local
import (
"bufio"
"bytes"
"context"
_ "crypto/sha256"
"io"
"testing"
"github.com/opencontainers/go-digest"
"github.com/containerd/containerd/content"
)
func FuzzContentStoreWriter(data []byte) int {
t := &testing.T{}
ctx := context.Background()
ctx, _, cs, cleanup := contentStoreEnv(t)
defer cleanup()
cw, err := cs.Writer(ctx, content.WithRef("myref"))
if err != nil {
return 0
}
if err := cw.Close(); err != nil {
return 0
}
// reopen, so we can test things
cw, err = cs.Writer(ctx, content.WithRef("myref"))
if err != nil {
return 0
}
err = checkCopyFuzz(int64(len(data)), cw, bufio.NewReader(io.NopCloser(bytes.NewReader(data))))
if err != nil {
return 0
}
expected := digest.FromBytes(data)
if err = cw.Commit(ctx, int64(len(data)), expected); err != nil {
return 0
}
return 1
}
func checkCopyFuzz(size int64, dst io.Writer, src io.Reader) error {
nn, err := io.Copy(dst, src)
if err != nil {
return err
}
if nn != size {
return err
}
return nil
}

View File

@@ -18,6 +18,7 @@ package local
import (
"fmt"
"io"
"os"
"github.com/containerd/containerd/content"
@@ -65,3 +66,7 @@ func (ra sizeReaderAt) Size() int64 {
func (ra sizeReaderAt) Close() error {
return ra.fp.Close()
}
func (ra sizeReaderAt) Reader() io.Reader {
return io.LimitReader(ra.fp, ra.size)
}

View File

@@ -34,7 +34,7 @@ import (
"github.com/containerd/containerd/log"
"github.com/sirupsen/logrus"
digest "github.com/opencontainers/go-digest"
"github.com/opencontainers/go-digest"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
)
@@ -262,7 +262,7 @@ func (s *store) Walk(ctx context.Context, fn content.WalkFunc, fs ...string) err
return nil
}
dgst := digest.NewDigestFromHex(alg.String(), filepath.Base(path))
dgst := digest.NewDigestFromEncoded(alg, filepath.Base(path))
if err := dgst.Validate(); err != nil {
// log error but don't report
log.L.WithError(err).WithField("path", path).Error("invalid digest for blob path")
@@ -505,6 +505,7 @@ func (s *store) resumeStatus(ref string, total int64, digester digest.Digester)
return status, fmt.Errorf("provided total differs from status: %v != %v", total, status.Total)
}
//nolint:dupword
// TODO(stevvooe): slow slow slow!!, send to goroutine or use resumable hashes
fp, err := os.Open(data)
if err != nil {
@@ -628,14 +629,14 @@ func (s *store) blobPath(dgst digest.Digest) (string, error) {
return "", fmt.Errorf("cannot calculate blob path from invalid digest: %v: %w", err, errdefs.ErrInvalidArgument)
}
return filepath.Join(s.root, "blobs", dgst.Algorithm().String(), dgst.Hex()), nil
return filepath.Join(s.root, "blobs", dgst.Algorithm().String(), dgst.Encoded()), nil
}
func (s *store) ingestRoot(ref string) string {
// we take a digest of the ref to keep the ingest paths constant length.
// Note that this is not the current or potential digest of incoming content.
dgst := digest.FromString(ref)
return filepath.Join(s.root, "ingest", dgst.Hex())
return filepath.Join(s.root, "ingest", dgst.Encoded())
}
// ingestPaths are returned. The paths are the following:

View File

@@ -1,5 +1,4 @@
//go:build darwin || freebsd || netbsd
// +build darwin freebsd netbsd
/*
Copyright The containerd Authors.

View File

@@ -1,5 +1,4 @@
//go:build openbsd
// +build openbsd
/*
Copyright The containerd Authors.

View File

@@ -1,5 +1,4 @@
//go:build linux || solaris
// +build linux solaris
/*
Copyright The containerd Authors.

View File

@@ -0,0 +1,38 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package local
import (
"context"
"testing"
"github.com/containerd/containerd/content"
)
func contentStoreEnv(t testing.TB) (context.Context, string, content.Store, func()) {
tmpdir := t.TempDir()
cs, err := NewStore(tmpdir)
if err != nil {
t.Fatal(err)
}
ctx, cancel := context.WithCancel(context.Background())
return ctx, tmpdir, cs, func() {
cancel()
}
}

View File

@@ -36,9 +36,9 @@ func (ra *remoteReaderAt) Size() int64 {
func (ra *remoteReaderAt) ReadAt(p []byte, off int64) (n int, err error) {
rr := &contentapi.ReadContentRequest{
Digest: ra.digest,
Digest: ra.digest.String(),
Offset: off,
Size_: int64(len(p)),
Size: int64(len(p)),
}
// we need a child context with cancel, or the eventually called
// grpc.NewStream will leak the goroutine until the whole thing is cleared.

View File

@@ -23,7 +23,8 @@ import (
contentapi "github.com/containerd/containerd/api/services/content/v1"
"github.com/containerd/containerd/content"
"github.com/containerd/containerd/errdefs"
protobuftypes "github.com/gogo/protobuf/types"
"github.com/containerd/containerd/protobuf"
protobuftypes "github.com/containerd/containerd/protobuf/types"
digest "github.com/opencontainers/go-digest"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
)
@@ -42,7 +43,7 @@ func NewContentStore(client contentapi.ContentClient) content.Store {
func (pcs *proxyContentStore) Info(ctx context.Context, dgst digest.Digest) (content.Info, error) {
resp, err := pcs.client.Info(ctx, &contentapi.InfoRequest{
Digest: dgst,
Digest: dgst.String(),
})
if err != nil {
return content.Info{}, errdefs.FromGRPC(err)
@@ -81,7 +82,7 @@ func (pcs *proxyContentStore) Walk(ctx context.Context, fn content.WalkFunc, fil
func (pcs *proxyContentStore) Delete(ctx context.Context, dgst digest.Digest) error {
if _, err := pcs.client.Delete(ctx, &contentapi.DeleteContentRequest{
Digest: dgst,
Digest: dgst.String(),
}); err != nil {
return errdefs.FromGRPC(err)
}
@@ -115,17 +116,17 @@ func (pcs *proxyContentStore) Status(ctx context.Context, ref string) (content.S
status := resp.Status
return content.Status{
Ref: status.Ref,
StartedAt: status.StartedAt,
UpdatedAt: status.UpdatedAt,
StartedAt: protobuf.FromTimestamp(status.StartedAt),
UpdatedAt: protobuf.FromTimestamp(status.UpdatedAt),
Offset: status.Offset,
Total: status.Total,
Expected: status.Expected,
Expected: digest.Digest(status.Expected),
}, nil
}
func (pcs *proxyContentStore) Update(ctx context.Context, info content.Info, fieldpaths ...string) (content.Info, error) {
resp, err := pcs.client.Update(ctx, &contentapi.UpdateRequest{
Info: infoToGRPC(info),
Info: infoToGRPC(&info),
UpdateMask: &protobuftypes.FieldMask{
Paths: fieldpaths,
},
@@ -148,11 +149,11 @@ func (pcs *proxyContentStore) ListStatuses(ctx context.Context, filters ...strin
for _, status := range resp.Statuses {
statuses = append(statuses, content.Status{
Ref: status.Ref,
StartedAt: status.StartedAt,
UpdatedAt: status.UpdatedAt,
StartedAt: protobuf.FromTimestamp(status.StartedAt),
UpdatedAt: protobuf.FromTimestamp(status.UpdatedAt),
Offset: status.Offset,
Total: status.Total,
Expected: status.Expected,
Expected: digest.Digest(status.Expected),
})
}
@@ -197,10 +198,10 @@ func (pcs *proxyContentStore) negotiate(ctx context.Context, ref string, size in
}
if err := wrclient.Send(&contentapi.WriteContentRequest{
Action: contentapi.WriteActionStat,
Action: contentapi.WriteAction_STAT,
Ref: ref,
Total: size,
Expected: expected,
Expected: expected.String(),
}); err != nil {
return nil, 0, err
}
@@ -213,22 +214,22 @@ func (pcs *proxyContentStore) negotiate(ctx context.Context, ref string, size in
return wrclient, resp.Offset, nil
}
func infoToGRPC(info content.Info) contentapi.Info {
return contentapi.Info{
Digest: info.Digest,
Size_: info.Size,
CreatedAt: info.CreatedAt,
UpdatedAt: info.UpdatedAt,
func infoToGRPC(info *content.Info) *contentapi.Info {
return &contentapi.Info{
Digest: info.Digest.String(),
Size: info.Size,
CreatedAt: protobuf.ToTimestamp(info.CreatedAt),
UpdatedAt: protobuf.ToTimestamp(info.UpdatedAt),
Labels: info.Labels,
}
}
func infoFromGRPC(info contentapi.Info) content.Info {
func infoFromGRPC(info *contentapi.Info) content.Info {
return content.Info{
Digest: info.Digest,
Size: info.Size_,
CreatedAt: info.CreatedAt,
UpdatedAt: info.UpdatedAt,
Digest: digest.Digest(info.Digest),
Size: info.Size,
CreatedAt: protobuf.FromTimestamp(info.CreatedAt),
UpdatedAt: protobuf.FromTimestamp(info.UpdatedAt),
Labels: info.Labels,
}
}

View File

@@ -24,6 +24,7 @@ import (
contentapi "github.com/containerd/containerd/api/services/content/v1"
"github.com/containerd/containerd/content"
"github.com/containerd/containerd/errdefs"
"github.com/containerd/containerd/protobuf"
digest "github.com/opencontainers/go-digest"
)
@@ -45,7 +46,7 @@ func (rw *remoteWriter) send(req *contentapi.WriteContentRequest) (*contentapi.W
if err == nil {
// try to keep these in sync
if resp.Digest != "" {
rw.digest = resp.Digest
rw.digest = digest.Digest(resp.Digest)
}
}
@@ -54,7 +55,7 @@ func (rw *remoteWriter) send(req *contentapi.WriteContentRequest) (*contentapi.W
func (rw *remoteWriter) Status() (content.Status, error) {
resp, err := rw.send(&contentapi.WriteContentRequest{
Action: contentapi.WriteActionStat,
Action: contentapi.WriteAction_STAT,
})
if err != nil {
return content.Status{}, fmt.Errorf("error getting writer status: %w", errdefs.FromGRPC(err))
@@ -64,8 +65,8 @@ func (rw *remoteWriter) Status() (content.Status, error) {
Ref: rw.ref,
Offset: resp.Offset,
Total: resp.Total,
StartedAt: resp.StartedAt,
UpdatedAt: resp.UpdatedAt,
StartedAt: protobuf.FromTimestamp(resp.StartedAt),
UpdatedAt: protobuf.FromTimestamp(resp.UpdatedAt),
}, nil
}
@@ -77,7 +78,7 @@ func (rw *remoteWriter) Write(p []byte) (n int, err error) {
offset := rw.offset
resp, err := rw.send(&contentapi.WriteContentRequest{
Action: contentapi.WriteActionWrite,
Action: contentapi.WriteAction_WRITE,
Offset: offset,
Data: p,
})
@@ -92,7 +93,7 @@ func (rw *remoteWriter) Write(p []byte) (n int, err error) {
rw.offset += int64(n)
if resp.Digest != "" {
rw.digest = resp.Digest
rw.digest = digest.Digest(resp.Digest)
}
return
}
@@ -112,10 +113,10 @@ func (rw *remoteWriter) Commit(ctx context.Context, size int64, expected digest.
}
}
resp, err := rw.send(&contentapi.WriteContentRequest{
Action: contentapi.WriteActionCommit,
Action: contentapi.WriteAction_COMMIT,
Total: size,
Offset: rw.offset,
Expected: expected,
Expected: expected.String(),
Labels: base.Labels,
})
if err != nil {
@@ -126,11 +127,12 @@ func (rw *remoteWriter) Commit(ctx context.Context, size int64, expected digest.
return fmt.Errorf("unexpected size: %v != %v", resp.Offset, size)
}
if expected != "" && resp.Digest != expected {
actual := digest.Digest(resp.Digest)
if expected != "" && actual != expected {
return fmt.Errorf("unexpected digest: %v != %v", resp.Digest, expected)
}
rw.digest = resp.Digest
rw.digest = actual
rw.offset = resp.Offset
return nil
}