mirror of
https://gitea.com/Lydanne/buildx.git
synced 2025-07-26 05:08:02 +08:00
Bump moby/buildkit
Signed-off-by: ulyssessouza <ulyssessouza@gmail.com>
This commit is contained in:
23
vendor/github.com/containerd/containerd/images/annotations.go
generated
vendored
Normal file
23
vendor/github.com/containerd/containerd/images/annotations.go
generated
vendored
Normal file
@@ -0,0 +1,23 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package images
|
||||
|
||||
const (
|
||||
// AnnotationImageName is an annotation on a Descriptor in an index.json
|
||||
// containing the `Name` value as used by an `Image` struct
|
||||
AnnotationImageName = "io.containerd.image.name"
|
||||
)
|
468
vendor/github.com/containerd/containerd/images/archive/exporter.go
generated
vendored
Normal file
468
vendor/github.com/containerd/containerd/images/archive/exporter.go
generated
vendored
Normal file
@@ -0,0 +1,468 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package archive
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"path"
|
||||
"sort"
|
||||
|
||||
"github.com/containerd/containerd/content"
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
"github.com/containerd/containerd/images"
|
||||
"github.com/containerd/containerd/platforms"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
ocispecs "github.com/opencontainers/image-spec/specs-go"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type exportOptions struct {
|
||||
manifests []ocispec.Descriptor
|
||||
platform platforms.MatchComparer
|
||||
allPlatforms bool
|
||||
skipDockerManifest bool
|
||||
}
|
||||
|
||||
// ExportOpt defines options for configuring exported descriptors
|
||||
type ExportOpt func(context.Context, *exportOptions) error
|
||||
|
||||
// WithPlatform defines the platform to require manifest lists have
|
||||
// not exporting all platforms.
|
||||
// Additionally, platform is used to resolve image configs for
|
||||
// Docker v1.1, v1.2 format compatibility.
|
||||
func WithPlatform(p platforms.MatchComparer) ExportOpt {
|
||||
return func(ctx context.Context, o *exportOptions) error {
|
||||
o.platform = p
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithAllPlatforms exports all manifests from a manifest list.
|
||||
// Missing content will fail the export.
|
||||
func WithAllPlatforms() ExportOpt {
|
||||
return func(ctx context.Context, o *exportOptions) error {
|
||||
o.allPlatforms = true
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithSkipDockerManifest skips creation of the Docker compatible
|
||||
// manifest.json file.
|
||||
func WithSkipDockerManifest() ExportOpt {
|
||||
return func(ctx context.Context, o *exportOptions) error {
|
||||
o.skipDockerManifest = true
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithImage adds the provided images to the exported archive.
|
||||
func WithImage(is images.Store, name string) ExportOpt {
|
||||
return func(ctx context.Context, o *exportOptions) error {
|
||||
img, err := is.Get(ctx, name)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
img.Target.Annotations = addNameAnnotation(name, img.Target.Annotations)
|
||||
o.manifests = append(o.manifests, img.Target)
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithManifest adds a manifest to the exported archive.
|
||||
// When names are given they will be set on the manifest in the
|
||||
// exported archive, creating an index record for each name.
|
||||
// When no names are provided, it is up to caller to put name annotation to
|
||||
// on the manifest descriptor if needed.
|
||||
func WithManifest(manifest ocispec.Descriptor, names ...string) ExportOpt {
|
||||
return func(ctx context.Context, o *exportOptions) error {
|
||||
if len(names) == 0 {
|
||||
o.manifests = append(o.manifests, manifest)
|
||||
}
|
||||
for _, name := range names {
|
||||
mc := manifest
|
||||
mc.Annotations = addNameAnnotation(name, manifest.Annotations)
|
||||
o.manifests = append(o.manifests, mc)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func addNameAnnotation(name string, base map[string]string) map[string]string {
|
||||
annotations := map[string]string{}
|
||||
for k, v := range base {
|
||||
annotations[k] = v
|
||||
}
|
||||
|
||||
annotations[images.AnnotationImageName] = name
|
||||
annotations[ocispec.AnnotationRefName] = ociReferenceName(name)
|
||||
|
||||
return annotations
|
||||
}
|
||||
|
||||
// Export implements Exporter.
|
||||
func Export(ctx context.Context, store content.Provider, writer io.Writer, opts ...ExportOpt) error {
|
||||
var eo exportOptions
|
||||
for _, opt := range opts {
|
||||
if err := opt(ctx, &eo); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
records := []tarRecord{
|
||||
ociLayoutFile(""),
|
||||
ociIndexRecord(eo.manifests),
|
||||
}
|
||||
|
||||
algorithms := map[string]struct{}{}
|
||||
dManifests := map[digest.Digest]*exportManifest{}
|
||||
resolvedIndex := map[digest.Digest]digest.Digest{}
|
||||
for _, desc := range eo.manifests {
|
||||
switch desc.MediaType {
|
||||
case images.MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest:
|
||||
mt, ok := dManifests[desc.Digest]
|
||||
if !ok {
|
||||
// TODO(containerd): Skip if already added
|
||||
r, err := getRecords(ctx, store, desc, algorithms)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
records = append(records, r...)
|
||||
|
||||
mt = &exportManifest{
|
||||
manifest: desc,
|
||||
}
|
||||
dManifests[desc.Digest] = mt
|
||||
}
|
||||
|
||||
name := desc.Annotations[images.AnnotationImageName]
|
||||
if name != "" && !eo.skipDockerManifest {
|
||||
mt.names = append(mt.names, name)
|
||||
}
|
||||
case images.MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageIndex:
|
||||
d, ok := resolvedIndex[desc.Digest]
|
||||
if !ok {
|
||||
records = append(records, blobRecord(store, desc))
|
||||
|
||||
p, err := content.ReadBlob(ctx, store, desc)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var index ocispec.Index
|
||||
if err := json.Unmarshal(p, &index); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
var manifests []ocispec.Descriptor
|
||||
for _, m := range index.Manifests {
|
||||
if eo.platform != nil {
|
||||
if m.Platform == nil || eo.platform.Match(*m.Platform) {
|
||||
manifests = append(manifests, m)
|
||||
} else if !eo.allPlatforms {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
r, err := getRecords(ctx, store, m, algorithms)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
records = append(records, r...)
|
||||
}
|
||||
|
||||
if !eo.skipDockerManifest {
|
||||
if len(manifests) >= 1 {
|
||||
if len(manifests) > 1 {
|
||||
sort.SliceStable(manifests, func(i, j int) bool {
|
||||
if manifests[i].Platform == nil {
|
||||
return false
|
||||
}
|
||||
if manifests[j].Platform == nil {
|
||||
return true
|
||||
}
|
||||
return eo.platform.Less(*manifests[i].Platform, *manifests[j].Platform)
|
||||
})
|
||||
}
|
||||
d = manifests[0].Digest
|
||||
dManifests[d] = &exportManifest{
|
||||
manifest: manifests[0],
|
||||
}
|
||||
} else if eo.platform != nil {
|
||||
return errors.Wrap(errdefs.ErrNotFound, "no manifest found for platform")
|
||||
}
|
||||
}
|
||||
resolvedIndex[desc.Digest] = d
|
||||
}
|
||||
if d != "" {
|
||||
if name := desc.Annotations[images.AnnotationImageName]; name != "" {
|
||||
mt := dManifests[d]
|
||||
mt.names = append(mt.names, name)
|
||||
}
|
||||
|
||||
}
|
||||
default:
|
||||
return errors.Wrap(errdefs.ErrInvalidArgument, "only manifests may be exported")
|
||||
}
|
||||
}
|
||||
|
||||
if len(dManifests) > 0 {
|
||||
tr, err := manifestsRecord(ctx, store, dManifests)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "unable to create manifests file")
|
||||
}
|
||||
|
||||
records = append(records, tr)
|
||||
}
|
||||
|
||||
if len(algorithms) > 0 {
|
||||
records = append(records, directoryRecord("blobs/", 0755))
|
||||
for alg := range algorithms {
|
||||
records = append(records, directoryRecord("blobs/"+alg+"/", 0755))
|
||||
}
|
||||
}
|
||||
|
||||
tw := tar.NewWriter(writer)
|
||||
defer tw.Close()
|
||||
return writeTar(ctx, tw, records)
|
||||
}
|
||||
|
||||
func getRecords(ctx context.Context, store content.Provider, desc ocispec.Descriptor, algorithms map[string]struct{}) ([]tarRecord, error) {
|
||||
var records []tarRecord
|
||||
exportHandler := func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
|
||||
records = append(records, blobRecord(store, desc))
|
||||
algorithms[desc.Digest.Algorithm().String()] = struct{}{}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
childrenHandler := images.ChildrenHandler(store)
|
||||
|
||||
handlers := images.Handlers(
|
||||
childrenHandler,
|
||||
images.HandlerFunc(exportHandler),
|
||||
)
|
||||
|
||||
// Walk sequentially since the number of fetches is likely one and doing in
|
||||
// parallel requires locking the export handler
|
||||
if err := images.Walk(ctx, handlers, desc); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return records, nil
|
||||
}
|
||||
|
||||
type tarRecord struct {
|
||||
Header *tar.Header
|
||||
CopyTo func(context.Context, io.Writer) (int64, error)
|
||||
}
|
||||
|
||||
func blobRecord(cs content.Provider, desc ocispec.Descriptor) tarRecord {
|
||||
path := path.Join("blobs", desc.Digest.Algorithm().String(), desc.Digest.Encoded())
|
||||
return tarRecord{
|
||||
Header: &tar.Header{
|
||||
Name: path,
|
||||
Mode: 0444,
|
||||
Size: desc.Size,
|
||||
Typeflag: tar.TypeReg,
|
||||
},
|
||||
CopyTo: func(ctx context.Context, w io.Writer) (int64, error) {
|
||||
r, err := cs.ReaderAt(ctx, desc)
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "failed to get reader")
|
||||
}
|
||||
defer r.Close()
|
||||
|
||||
// Verify digest
|
||||
dgstr := desc.Digest.Algorithm().Digester()
|
||||
|
||||
n, err := io.Copy(io.MultiWriter(w, dgstr.Hash()), content.NewReader(r))
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "failed to copy to tar")
|
||||
}
|
||||
if dgstr.Digest() != desc.Digest {
|
||||
return 0, errors.Errorf("unexpected digest %s copied", dgstr.Digest())
|
||||
}
|
||||
return n, nil
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func directoryRecord(name string, mode int64) tarRecord {
|
||||
return tarRecord{
|
||||
Header: &tar.Header{
|
||||
Name: name,
|
||||
Mode: mode,
|
||||
Typeflag: tar.TypeDir,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func ociLayoutFile(version string) tarRecord {
|
||||
if version == "" {
|
||||
version = ocispec.ImageLayoutVersion
|
||||
}
|
||||
layout := ocispec.ImageLayout{
|
||||
Version: version,
|
||||
}
|
||||
|
||||
b, err := json.Marshal(layout)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return tarRecord{
|
||||
Header: &tar.Header{
|
||||
Name: ocispec.ImageLayoutFile,
|
||||
Mode: 0444,
|
||||
Size: int64(len(b)),
|
||||
Typeflag: tar.TypeReg,
|
||||
},
|
||||
CopyTo: func(ctx context.Context, w io.Writer) (int64, error) {
|
||||
n, err := w.Write(b)
|
||||
return int64(n), err
|
||||
},
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func ociIndexRecord(manifests []ocispec.Descriptor) tarRecord {
|
||||
index := ocispec.Index{
|
||||
Versioned: ocispecs.Versioned{
|
||||
SchemaVersion: 2,
|
||||
},
|
||||
Manifests: manifests,
|
||||
}
|
||||
|
||||
b, err := json.Marshal(index)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return tarRecord{
|
||||
Header: &tar.Header{
|
||||
Name: "index.json",
|
||||
Mode: 0644,
|
||||
Size: int64(len(b)),
|
||||
Typeflag: tar.TypeReg,
|
||||
},
|
||||
CopyTo: func(ctx context.Context, w io.Writer) (int64, error) {
|
||||
n, err := w.Write(b)
|
||||
return int64(n), err
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
type exportManifest struct {
|
||||
manifest ocispec.Descriptor
|
||||
names []string
|
||||
}
|
||||
|
||||
func manifestsRecord(ctx context.Context, store content.Provider, manifests map[digest.Digest]*exportManifest) (tarRecord, error) {
|
||||
mfsts := make([]struct {
|
||||
Config string
|
||||
RepoTags []string
|
||||
Layers []string
|
||||
}, len(manifests))
|
||||
|
||||
var i int
|
||||
for _, m := range manifests {
|
||||
p, err := content.ReadBlob(ctx, store, m.manifest)
|
||||
if err != nil {
|
||||
return tarRecord{}, err
|
||||
}
|
||||
|
||||
var manifest ocispec.Manifest
|
||||
if err := json.Unmarshal(p, &manifest); err != nil {
|
||||
return tarRecord{}, err
|
||||
}
|
||||
if err := manifest.Config.Digest.Validate(); err != nil {
|
||||
return tarRecord{}, errors.Wrapf(err, "invalid manifest %q", m.manifest.Digest)
|
||||
}
|
||||
|
||||
dgst := manifest.Config.Digest
|
||||
mfsts[i].Config = path.Join("blobs", dgst.Algorithm().String(), dgst.Encoded())
|
||||
for _, l := range manifest.Layers {
|
||||
path := path.Join("blobs", l.Digest.Algorithm().String(), l.Digest.Encoded())
|
||||
mfsts[i].Layers = append(mfsts[i].Layers, path)
|
||||
}
|
||||
|
||||
for _, name := range m.names {
|
||||
nname, err := familiarizeReference(name)
|
||||
if err != nil {
|
||||
return tarRecord{}, err
|
||||
}
|
||||
|
||||
mfsts[i].RepoTags = append(mfsts[i].RepoTags, nname)
|
||||
}
|
||||
|
||||
i++
|
||||
}
|
||||
|
||||
b, err := json.Marshal(mfsts)
|
||||
if err != nil {
|
||||
return tarRecord{}, err
|
||||
}
|
||||
|
||||
return tarRecord{
|
||||
Header: &tar.Header{
|
||||
Name: "manifest.json",
|
||||
Mode: 0644,
|
||||
Size: int64(len(b)),
|
||||
Typeflag: tar.TypeReg,
|
||||
},
|
||||
CopyTo: func(ctx context.Context, w io.Writer) (int64, error) {
|
||||
n, err := w.Write(b)
|
||||
return int64(n), err
|
||||
},
|
||||
}, nil
|
||||
}
|
||||
|
||||
func writeTar(ctx context.Context, tw *tar.Writer, records []tarRecord) error {
|
||||
sort.Slice(records, func(i, j int) bool {
|
||||
return records[i].Header.Name < records[j].Header.Name
|
||||
})
|
||||
|
||||
var last string
|
||||
for _, record := range records {
|
||||
if record.Header.Name == last {
|
||||
continue
|
||||
}
|
||||
last = record.Header.Name
|
||||
if err := tw.WriteHeader(record.Header); err != nil {
|
||||
return err
|
||||
}
|
||||
if record.CopyTo != nil {
|
||||
n, err := record.CopyTo(ctx, tw)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if n != record.Header.Size {
|
||||
return errors.Errorf("unexpected copy size for %s", record.Header.Name)
|
||||
}
|
||||
} else if record.Header.Size > 0 {
|
||||
return errors.Errorf("no content to write to record with non-zero size for %s", record.Header.Name)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
154
vendor/github.com/containerd/containerd/images/archive/importer.go
generated
vendored
154
vendor/github.com/containerd/containerd/images/archive/importer.go
generated
vendored
@@ -22,12 +22,14 @@ import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"path"
|
||||
|
||||
"github.com/containerd/containerd/archive/compression"
|
||||
"github.com/containerd/containerd/content"
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
"github.com/containerd/containerd/images"
|
||||
"github.com/containerd/containerd/log"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
@@ -36,6 +38,22 @@ import (
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type importOpts struct {
|
||||
compress bool
|
||||
}
|
||||
|
||||
// ImportOpt is an option for importing an OCI index
|
||||
type ImportOpt func(*importOpts) error
|
||||
|
||||
// WithImportCompression compresses uncompressed layers on import.
|
||||
// This is used for import formats which do not include the manifest.
|
||||
func WithImportCompression() ImportOpt {
|
||||
return func(io *importOpts) error {
|
||||
io.compress = true
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// ImportIndex imports an index from a tar archive image bundle
|
||||
// - implements Docker v1.1, v1.2 and OCI v1.
|
||||
// - prefers OCI v1 when provided
|
||||
@@ -43,8 +61,7 @@ import (
|
||||
// - normalizes Docker references and adds as OCI ref name
|
||||
// e.g. alpine:latest -> docker.io/library/alpine:latest
|
||||
// - existing OCI reference names are untouched
|
||||
// - TODO: support option to compress layers on ingest
|
||||
func ImportIndex(ctx context.Context, store content.Store, reader io.Reader) (ocispec.Descriptor, error) {
|
||||
func ImportIndex(ctx context.Context, store content.Store, reader io.Reader, opts ...ImportOpt) (ocispec.Descriptor, error) {
|
||||
var (
|
||||
tr = tar.NewReader(reader)
|
||||
|
||||
@@ -56,7 +73,15 @@ func ImportIndex(ctx context.Context, store content.Store, reader io.Reader) (oc
|
||||
}
|
||||
symlinks = make(map[string]string)
|
||||
blobs = make(map[string]ocispec.Descriptor)
|
||||
iopts importOpts
|
||||
)
|
||||
|
||||
for _, o := range opts {
|
||||
if err := o(&iopts); err != nil {
|
||||
return ocispec.Descriptor{}, err
|
||||
}
|
||||
}
|
||||
|
||||
for {
|
||||
hdr, err := tr.Next()
|
||||
if err == io.EOF {
|
||||
@@ -99,7 +124,7 @@ func ImportIndex(ctx context.Context, store content.Store, reader io.Reader) (oc
|
||||
}
|
||||
|
||||
// If OCI layout was given, interpret the tar as an OCI layout.
|
||||
// When not provided, the layout of the tar will be interpretted
|
||||
// When not provided, the layout of the tar will be interpreted
|
||||
// as Docker v1.1 or v1.2.
|
||||
if ociLayout.Version != "" {
|
||||
if ociLayout.Version != ocispec.ImageLayoutVersion {
|
||||
@@ -137,19 +162,23 @@ func ImportIndex(ctx context.Context, store content.Store, reader io.Reader) (oc
|
||||
if !ok {
|
||||
return ocispec.Descriptor{}, errors.Errorf("image config %q not found", mfst.Config)
|
||||
}
|
||||
config.MediaType = ocispec.MediaTypeImageConfig
|
||||
config.MediaType = images.MediaTypeDockerSchema2Config
|
||||
|
||||
layers, err := resolveLayers(ctx, store, mfst.Layers, blobs)
|
||||
layers, err := resolveLayers(ctx, store, mfst.Layers, blobs, iopts.compress)
|
||||
if err != nil {
|
||||
return ocispec.Descriptor{}, errors.Wrap(err, "failed to resolve layers")
|
||||
}
|
||||
|
||||
manifest := ocispec.Manifest{
|
||||
Versioned: specs.Versioned{
|
||||
SchemaVersion: 2,
|
||||
},
|
||||
Config: config,
|
||||
Layers: layers,
|
||||
manifest := struct {
|
||||
SchemaVersion int `json:"schemaVersion"`
|
||||
MediaType string `json:"mediaType"`
|
||||
Config ocispec.Descriptor `json:"config"`
|
||||
Layers []ocispec.Descriptor `json:"layers"`
|
||||
}{
|
||||
SchemaVersion: 2,
|
||||
MediaType: images.MediaTypeDockerSchema2Manifest,
|
||||
Config: config,
|
||||
Layers: layers,
|
||||
}
|
||||
|
||||
desc, err := writeManifest(ctx, store, manifest, ocispec.MediaTypeImageManifest)
|
||||
@@ -181,7 +210,8 @@ func ImportIndex(ctx context.Context, store content.Store, reader io.Reader) (oc
|
||||
}
|
||||
|
||||
mfstdesc.Annotations = map[string]string{
|
||||
ocispec.AnnotationRefName: normalized,
|
||||
images.AnnotationImageName: normalized,
|
||||
ocispec.AnnotationRefName: ociReferenceName(normalized),
|
||||
}
|
||||
|
||||
idx.Manifests = append(idx.Manifests, mfstdesc)
|
||||
@@ -210,36 +240,118 @@ func onUntarBlob(ctx context.Context, r io.Reader, store content.Ingester, size
|
||||
return dgstr.Digest(), nil
|
||||
}
|
||||
|
||||
func resolveLayers(ctx context.Context, store content.Store, layerFiles []string, blobs map[string]ocispec.Descriptor) ([]ocispec.Descriptor, error) {
|
||||
var layers []ocispec.Descriptor
|
||||
for _, f := range layerFiles {
|
||||
func resolveLayers(ctx context.Context, store content.Store, layerFiles []string, blobs map[string]ocispec.Descriptor, compress bool) ([]ocispec.Descriptor, error) {
|
||||
layers := make([]ocispec.Descriptor, len(layerFiles))
|
||||
descs := map[digest.Digest]*ocispec.Descriptor{}
|
||||
filters := []string{}
|
||||
for i, f := range layerFiles {
|
||||
desc, ok := blobs[f]
|
||||
if !ok {
|
||||
return nil, errors.Errorf("layer %q not found", f)
|
||||
}
|
||||
layers[i] = desc
|
||||
descs[desc.Digest] = &layers[i]
|
||||
filters = append(filters, "labels.\"containerd.io/uncompressed\"=="+desc.Digest.String())
|
||||
}
|
||||
|
||||
err := store.Walk(ctx, func(info content.Info) error {
|
||||
dgst, ok := info.Labels["containerd.io/uncompressed"]
|
||||
if ok {
|
||||
desc := descs[digest.Digest(dgst)]
|
||||
if desc != nil {
|
||||
desc.MediaType = images.MediaTypeDockerSchema2LayerGzip
|
||||
desc.Digest = info.Digest
|
||||
desc.Size = info.Size
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}, filters...)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failure checking for compressed blobs")
|
||||
}
|
||||
|
||||
for i, desc := range layers {
|
||||
if desc.MediaType != "" {
|
||||
continue
|
||||
}
|
||||
// Open blob, resolve media type
|
||||
ra, err := store.ReaderAt(ctx, desc)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to open %q (%s)", f, desc.Digest)
|
||||
return nil, errors.Wrapf(err, "failed to open %q (%s)", layerFiles[i], desc.Digest)
|
||||
}
|
||||
s, err := compression.DecompressStream(content.NewReader(ra))
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "failed to detect compression for %q", f)
|
||||
return nil, errors.Wrapf(err, "failed to detect compression for %q", layerFiles[i])
|
||||
}
|
||||
if s.GetCompression() == compression.Uncompressed {
|
||||
// TODO: Support compressing and writing back to content store
|
||||
desc.MediaType = ocispec.MediaTypeImageLayer
|
||||
if compress {
|
||||
ref := fmt.Sprintf("compress-blob-%s-%s", desc.Digest.Algorithm().String(), desc.Digest.Encoded())
|
||||
labels := map[string]string{
|
||||
"containerd.io/uncompressed": desc.Digest.String(),
|
||||
}
|
||||
layers[i], err = compressBlob(ctx, store, s, ref, content.WithLabels(labels))
|
||||
if err != nil {
|
||||
s.Close()
|
||||
return nil, err
|
||||
}
|
||||
layers[i].MediaType = images.MediaTypeDockerSchema2LayerGzip
|
||||
} else {
|
||||
layers[i].MediaType = images.MediaTypeDockerSchema2Layer
|
||||
}
|
||||
} else {
|
||||
desc.MediaType = ocispec.MediaTypeImageLayerGzip
|
||||
layers[i].MediaType = images.MediaTypeDockerSchema2LayerGzip
|
||||
}
|
||||
s.Close()
|
||||
|
||||
layers = append(layers, desc)
|
||||
}
|
||||
return layers, nil
|
||||
}
|
||||
|
||||
func compressBlob(ctx context.Context, cs content.Store, r io.Reader, ref string, opts ...content.Opt) (desc ocispec.Descriptor, err error) {
|
||||
w, err := content.OpenWriter(ctx, cs, content.WithRef(ref))
|
||||
if err != nil {
|
||||
return ocispec.Descriptor{}, errors.Wrap(err, "failed to open writer")
|
||||
}
|
||||
|
||||
defer func() {
|
||||
w.Close()
|
||||
if err != nil {
|
||||
cs.Abort(ctx, ref)
|
||||
}
|
||||
}()
|
||||
if err := w.Truncate(0); err != nil {
|
||||
return ocispec.Descriptor{}, errors.Wrap(err, "failed to truncate writer")
|
||||
}
|
||||
|
||||
cw, err := compression.CompressStream(w, compression.Gzip)
|
||||
if err != nil {
|
||||
return ocispec.Descriptor{}, err
|
||||
}
|
||||
|
||||
if _, err := io.Copy(cw, r); err != nil {
|
||||
return ocispec.Descriptor{}, err
|
||||
}
|
||||
if err := cw.Close(); err != nil {
|
||||
return ocispec.Descriptor{}, err
|
||||
}
|
||||
|
||||
cst, err := w.Status()
|
||||
if err != nil {
|
||||
return ocispec.Descriptor{}, errors.Wrap(err, "failed to get writer status")
|
||||
}
|
||||
|
||||
desc.Digest = w.Digest()
|
||||
desc.Size = cst.Offset
|
||||
|
||||
if err := w.Commit(ctx, desc.Size, desc.Digest, opts...); err != nil {
|
||||
if !errdefs.IsAlreadyExists(err) {
|
||||
return ocispec.Descriptor{}, errors.Wrap(err, "failed to commit")
|
||||
}
|
||||
}
|
||||
|
||||
return desc, nil
|
||||
}
|
||||
|
||||
func writeManifest(ctx context.Context, cs content.Ingester, manifest interface{}, mediaType string) (ocispec.Descriptor, error) {
|
||||
manifestBytes, err := json.Marshal(manifest)
|
||||
if err != nil {
|
||||
|
30
vendor/github.com/containerd/containerd/images/archive/reference.go
generated
vendored
30
vendor/github.com/containerd/containerd/images/archive/reference.go
generated
vendored
@@ -19,7 +19,8 @@ package archive
|
||||
import (
|
||||
"strings"
|
||||
|
||||
"github.com/docker/distribution/reference"
|
||||
"github.com/containerd/containerd/reference"
|
||||
distref "github.com/containerd/containerd/reference/docker"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
@@ -69,7 +70,7 @@ func isImagePrefix(s, prefix string) bool {
|
||||
|
||||
func normalizeReference(ref string) (string, error) {
|
||||
// TODO: Replace this function to not depend on reference package
|
||||
normalized, err := reference.ParseDockerRef(ref)
|
||||
normalized, err := distref.ParseDockerRef(ref)
|
||||
if err != nil {
|
||||
return "", errors.Wrapf(err, "normalize image ref %q", ref)
|
||||
}
|
||||
@@ -77,6 +78,31 @@ func normalizeReference(ref string) (string, error) {
|
||||
return normalized.String(), nil
|
||||
}
|
||||
|
||||
func familiarizeReference(ref string) (string, error) {
|
||||
named, err := distref.ParseNormalizedNamed(ref)
|
||||
if err != nil {
|
||||
return "", errors.Wrapf(err, "failed to parse %q", ref)
|
||||
}
|
||||
named = distref.TagNameOnly(named)
|
||||
|
||||
return distref.FamiliarString(named), nil
|
||||
}
|
||||
|
||||
func ociReferenceName(name string) string {
|
||||
// OCI defines the reference name as only a tag excluding the
|
||||
// repository. The containerd annotation contains the full image name
|
||||
// since the tag is insufficient for correctly naming and referring to an
|
||||
// image
|
||||
var ociRef string
|
||||
if spec, err := reference.Parse(name); err == nil {
|
||||
ociRef = spec.Object
|
||||
} else {
|
||||
ociRef = name
|
||||
}
|
||||
|
||||
return ociRef
|
||||
}
|
||||
|
||||
// DigestTranslator creates a digest reference by adding the
|
||||
// digest to an image name
|
||||
func DigestTranslator(prefix string) func(digest.Digest) string {
|
||||
|
7
vendor/github.com/containerd/containerd/images/handlers.go
generated
vendored
7
vendor/github.com/containerd/containerd/images/handlers.go
generated
vendored
@@ -117,7 +117,7 @@ func Walk(ctx context.Context, handler Handler, descs ...ocispec.Descriptor) err
|
||||
//
|
||||
// If any handler returns an error, the dispatch session will be canceled.
|
||||
func Dispatch(ctx context.Context, handler Handler, limiter *semaphore.Weighted, descs ...ocispec.Descriptor) error {
|
||||
eg, ctx := errgroup.WithContext(ctx)
|
||||
eg, ctx2 := errgroup.WithContext(ctx)
|
||||
for _, desc := range descs {
|
||||
desc := desc
|
||||
|
||||
@@ -126,10 +126,11 @@ func Dispatch(ctx context.Context, handler Handler, limiter *semaphore.Weighted,
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
eg.Go(func() error {
|
||||
desc := desc
|
||||
|
||||
children, err := handler.Handle(ctx, desc)
|
||||
children, err := handler.Handle(ctx2, desc)
|
||||
if limiter != nil {
|
||||
limiter.Release(1)
|
||||
}
|
||||
@@ -141,7 +142,7 @@ func Dispatch(ctx context.Context, handler Handler, limiter *semaphore.Weighted,
|
||||
}
|
||||
|
||||
if len(children) > 0 {
|
||||
return Dispatch(ctx, handler, limiter, children...)
|
||||
return Dispatch(ctx2, handler, limiter, children...)
|
||||
}
|
||||
|
||||
return nil
|
||||
|
60
vendor/github.com/containerd/containerd/images/image.go
generated
vendored
60
vendor/github.com/containerd/containerd/images/image.go
generated
vendored
@@ -20,7 +20,6 @@ import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/containerd/containerd/content"
|
||||
@@ -119,7 +118,7 @@ func (image *Image) Size(ctx context.Context, provider content.Provider, platfor
|
||||
}
|
||||
size += desc.Size
|
||||
return nil, nil
|
||||
}), FilterPlatforms(ChildrenHandler(provider), platform)), image.Target)
|
||||
}), LimitManifests(FilterPlatforms(ChildrenHandler(provider), platform), platform, 1)), image.Target)
|
||||
}
|
||||
|
||||
type platformManifest struct {
|
||||
@@ -142,6 +141,7 @@ type platformManifest struct {
|
||||
// this direction because this abstraction is not needed.`
|
||||
func Manifest(ctx context.Context, provider content.Provider, image ocispec.Descriptor, platform platforms.MatchComparer) (ocispec.Manifest, error) {
|
||||
var (
|
||||
limit = 1
|
||||
m []platformManifest
|
||||
wasIndex bool
|
||||
)
|
||||
@@ -210,10 +210,22 @@ func Manifest(ctx context.Context, provider content.Provider, image ocispec.Desc
|
||||
}
|
||||
}
|
||||
|
||||
sort.SliceStable(descs, func(i, j int) bool {
|
||||
if descs[i].Platform == nil {
|
||||
return false
|
||||
}
|
||||
if descs[j].Platform == nil {
|
||||
return true
|
||||
}
|
||||
return platform.Less(*descs[i].Platform, *descs[j].Platform)
|
||||
})
|
||||
|
||||
wasIndex = true
|
||||
|
||||
if len(descs) > limit {
|
||||
return descs[:limit], nil
|
||||
}
|
||||
return descs, nil
|
||||
|
||||
}
|
||||
return nil, errors.Wrapf(errdefs.ErrNotFound, "unexpected media type %v for %v", desc.MediaType, desc.Digest)
|
||||
}), image); err != nil {
|
||||
@@ -227,17 +239,6 @@ func Manifest(ctx context.Context, provider content.Provider, image ocispec.Desc
|
||||
}
|
||||
return ocispec.Manifest{}, err
|
||||
}
|
||||
|
||||
sort.SliceStable(m, func(i, j int) bool {
|
||||
if m[i].p == nil {
|
||||
return false
|
||||
}
|
||||
if m[j].p == nil {
|
||||
return true
|
||||
}
|
||||
return platform.Less(*m[i].p, *m[j].p)
|
||||
})
|
||||
|
||||
return *m[0].m, nil
|
||||
}
|
||||
|
||||
@@ -356,15 +357,11 @@ func Children(ctx context.Context, provider content.Provider, desc ocispec.Descr
|
||||
}
|
||||
|
||||
descs = append(descs, index.Manifests...)
|
||||
case MediaTypeDockerSchema2Layer, MediaTypeDockerSchema2LayerGzip,
|
||||
MediaTypeDockerSchema2LayerForeign, MediaTypeDockerSchema2LayerForeignGzip,
|
||||
MediaTypeDockerSchema2Config, ocispec.MediaTypeImageConfig,
|
||||
ocispec.MediaTypeImageLayer, ocispec.MediaTypeImageLayerGzip,
|
||||
ocispec.MediaTypeImageLayerNonDistributable, ocispec.MediaTypeImageLayerNonDistributableGzip,
|
||||
MediaTypeContainerd1Checkpoint, MediaTypeContainerd1CheckpointConfig:
|
||||
// childless data types.
|
||||
return nil, nil
|
||||
default:
|
||||
if IsLayerType(desc.MediaType) || IsKnownConfig(desc.MediaType) {
|
||||
// childless data types.
|
||||
return nil, nil
|
||||
}
|
||||
log.G(ctx).Warnf("encountered unknown type %v; children may not be fetched", desc.MediaType)
|
||||
}
|
||||
|
||||
@@ -387,22 +384,3 @@ func RootFS(ctx context.Context, provider content.Provider, configDesc ocispec.D
|
||||
}
|
||||
return config.RootFS.DiffIDs, nil
|
||||
}
|
||||
|
||||
// IsCompressedDiff returns true if mediaType is a known compressed diff media type.
|
||||
// It returns false if the media type is a diff, but not compressed. If the media type
|
||||
// is not a known diff type, it returns errdefs.ErrNotImplemented
|
||||
func IsCompressedDiff(ctx context.Context, mediaType string) (bool, error) {
|
||||
switch mediaType {
|
||||
case ocispec.MediaTypeImageLayer, MediaTypeDockerSchema2Layer:
|
||||
case ocispec.MediaTypeImageLayerGzip, MediaTypeDockerSchema2LayerGzip:
|
||||
return true, nil
|
||||
default:
|
||||
// Still apply all generic media types *.tar[.+]gzip and *.tar
|
||||
if strings.HasSuffix(mediaType, ".tar.gzip") || strings.HasSuffix(mediaType, ".tar+gzip") {
|
||||
return true, nil
|
||||
} else if !strings.HasSuffix(mediaType, ".tar") {
|
||||
return false, errdefs.ErrNotImplemented
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
84
vendor/github.com/containerd/containerd/images/mediatypes.go
generated
vendored
84
vendor/github.com/containerd/containerd/images/mediatypes.go
generated
vendored
@@ -16,6 +16,15 @@
|
||||
|
||||
package images
|
||||
|
||||
import (
|
||||
"context"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
)
|
||||
|
||||
// mediatype definitions for image components handled in containerd.
|
||||
//
|
||||
// oci components are generally referenced directly, although we may centralize
|
||||
@@ -40,3 +49,78 @@ const (
|
||||
// Legacy Docker schema1 manifest
|
||||
MediaTypeDockerSchema1Manifest = "application/vnd.docker.distribution.manifest.v1+prettyjws"
|
||||
)
|
||||
|
||||
// DiffCompression returns the compression as defined by the layer diff media
|
||||
// type. For Docker media types without compression, "unknown" is returned to
|
||||
// indicate that the media type may be compressed. If the media type is not
|
||||
// recognized as a layer diff, then it returns errdefs.ErrNotImplemented
|
||||
func DiffCompression(ctx context.Context, mediaType string) (string, error) {
|
||||
base, ext := parseMediaTypes(mediaType)
|
||||
switch base {
|
||||
case MediaTypeDockerSchema2Layer, MediaTypeDockerSchema2LayerForeign:
|
||||
if len(ext) > 0 {
|
||||
// Type is wrapped
|
||||
return "", nil
|
||||
}
|
||||
// These media types may have been compressed but failed to
|
||||
// use the correct media type. The decompression function
|
||||
// should detect and handle this case.
|
||||
return "unknown", nil
|
||||
case MediaTypeDockerSchema2LayerGzip, MediaTypeDockerSchema2LayerForeignGzip:
|
||||
if len(ext) > 0 {
|
||||
// Type is wrapped
|
||||
return "", nil
|
||||
}
|
||||
return "gzip", nil
|
||||
case ocispec.MediaTypeImageLayer, ocispec.MediaTypeImageLayerNonDistributable:
|
||||
if len(ext) > 0 {
|
||||
switch ext[len(ext)-1] {
|
||||
case "gzip":
|
||||
return "gzip", nil
|
||||
}
|
||||
}
|
||||
return "", nil
|
||||
default:
|
||||
return "", errdefs.ErrNotImplemented
|
||||
}
|
||||
}
|
||||
|
||||
// parseMediaTypes splits the media type into the base type and
|
||||
// an array of sorted extensions
|
||||
func parseMediaTypes(mt string) (string, []string) {
|
||||
if mt == "" {
|
||||
return "", []string{}
|
||||
}
|
||||
|
||||
s := strings.Split(mt, "+")
|
||||
ext := s[1:]
|
||||
sort.Strings(ext)
|
||||
|
||||
return s[0], ext
|
||||
}
|
||||
|
||||
// IsLayerTypes returns true if the media type is a layer
|
||||
func IsLayerType(mt string) bool {
|
||||
if strings.HasPrefix(mt, "application/vnd.oci.image.layer.") {
|
||||
return true
|
||||
}
|
||||
|
||||
// Parse Docker media types, strip off any + suffixes first
|
||||
base, _ := parseMediaTypes(mt)
|
||||
switch base {
|
||||
case MediaTypeDockerSchema2Layer, MediaTypeDockerSchema2LayerGzip,
|
||||
MediaTypeDockerSchema2LayerForeign, MediaTypeDockerSchema2LayerForeignGzip:
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// IsKnownConfig returns true if the media type is a known config type
|
||||
func IsKnownConfig(mt string) bool {
|
||||
switch mt {
|
||||
case MediaTypeDockerSchema2Config, ocispec.MediaTypeImageConfig,
|
||||
MediaTypeContainerd1Checkpoint, MediaTypeContainerd1CheckpointConfig:
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
241
vendor/github.com/containerd/containerd/images/oci/exporter.go
generated
vendored
241
vendor/github.com/containerd/containerd/images/oci/exporter.go
generated
vendored
@@ -1,241 +0,0 @@
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package oci
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"io"
|
||||
"sort"
|
||||
|
||||
"github.com/containerd/containerd/content"
|
||||
"github.com/containerd/containerd/images"
|
||||
"github.com/containerd/containerd/platforms"
|
||||
ocispecs "github.com/opencontainers/image-spec/specs-go"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// V1Exporter implements OCI Image Spec v1.
|
||||
// It is up to caller to put "org.opencontainers.image.ref.name" annotation to desc.
|
||||
//
|
||||
// TODO(AkihiroSuda): add V1Exporter{TranslateMediaTypes: true} that transforms media types,
|
||||
// e.g. application/vnd.docker.image.rootfs.diff.tar.gzip
|
||||
// -> application/vnd.oci.image.layer.v1.tar+gzip
|
||||
type V1Exporter struct {
|
||||
AllPlatforms bool
|
||||
}
|
||||
|
||||
// V1ExporterOpt allows the caller to set additional options to a new V1Exporter
|
||||
type V1ExporterOpt func(c *V1Exporter) error
|
||||
|
||||
// DefaultV1Exporter return a default V1Exporter pointer
|
||||
func DefaultV1Exporter() *V1Exporter {
|
||||
return &V1Exporter{
|
||||
AllPlatforms: false,
|
||||
}
|
||||
}
|
||||
|
||||
// ResolveV1ExportOpt return a new V1Exporter with V1ExporterOpt
|
||||
func ResolveV1ExportOpt(opts ...V1ExporterOpt) (*V1Exporter, error) {
|
||||
exporter := DefaultV1Exporter()
|
||||
for _, o := range opts {
|
||||
if err := o(exporter); err != nil {
|
||||
return exporter, err
|
||||
}
|
||||
}
|
||||
return exporter, nil
|
||||
}
|
||||
|
||||
// WithAllPlatforms set V1Exporter`s AllPlatforms option
|
||||
func WithAllPlatforms(allPlatforms bool) V1ExporterOpt {
|
||||
return func(c *V1Exporter) error {
|
||||
c.AllPlatforms = allPlatforms
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// Export implements Exporter.
|
||||
func (oe *V1Exporter) Export(ctx context.Context, store content.Provider, desc ocispec.Descriptor, writer io.Writer) error {
|
||||
tw := tar.NewWriter(writer)
|
||||
defer tw.Close()
|
||||
|
||||
records := []tarRecord{
|
||||
ociLayoutFile(""),
|
||||
ociIndexRecord(desc),
|
||||
}
|
||||
|
||||
algorithms := map[string]struct{}{}
|
||||
exportHandler := func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
|
||||
records = append(records, blobRecord(store, desc))
|
||||
algorithms[desc.Digest.Algorithm().String()] = struct{}{}
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
childrenHandler := images.ChildrenHandler(store)
|
||||
|
||||
if !oe.AllPlatforms {
|
||||
// get local default platform to fetch image manifest
|
||||
childrenHandler = images.FilterPlatforms(childrenHandler, platforms.Any(platforms.DefaultSpec()))
|
||||
}
|
||||
|
||||
handlers := images.Handlers(
|
||||
childrenHandler,
|
||||
images.HandlerFunc(exportHandler),
|
||||
)
|
||||
|
||||
// Walk sequentially since the number of fetchs is likely one and doing in
|
||||
// parallel requires locking the export handler
|
||||
if err := images.Walk(ctx, handlers, desc); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if len(algorithms) > 0 {
|
||||
records = append(records, directoryRecord("blobs/", 0755))
|
||||
for alg := range algorithms {
|
||||
records = append(records, directoryRecord("blobs/"+alg+"/", 0755))
|
||||
}
|
||||
}
|
||||
|
||||
return writeTar(ctx, tw, records)
|
||||
}
|
||||
|
||||
type tarRecord struct {
|
||||
Header *tar.Header
|
||||
CopyTo func(context.Context, io.Writer) (int64, error)
|
||||
}
|
||||
|
||||
func blobRecord(cs content.Provider, desc ocispec.Descriptor) tarRecord {
|
||||
path := "blobs/" + desc.Digest.Algorithm().String() + "/" + desc.Digest.Hex()
|
||||
return tarRecord{
|
||||
Header: &tar.Header{
|
||||
Name: path,
|
||||
Mode: 0444,
|
||||
Size: desc.Size,
|
||||
Typeflag: tar.TypeReg,
|
||||
},
|
||||
CopyTo: func(ctx context.Context, w io.Writer) (int64, error) {
|
||||
r, err := cs.ReaderAt(ctx, desc)
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "failed to get reader")
|
||||
}
|
||||
defer r.Close()
|
||||
|
||||
// Verify digest
|
||||
dgstr := desc.Digest.Algorithm().Digester()
|
||||
|
||||
n, err := io.Copy(io.MultiWriter(w, dgstr.Hash()), content.NewReader(r))
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "failed to copy to tar")
|
||||
}
|
||||
if dgstr.Digest() != desc.Digest {
|
||||
return 0, errors.Errorf("unexpected digest %s copied", dgstr.Digest())
|
||||
}
|
||||
return n, nil
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func directoryRecord(name string, mode int64) tarRecord {
|
||||
return tarRecord{
|
||||
Header: &tar.Header{
|
||||
Name: name,
|
||||
Mode: mode,
|
||||
Typeflag: tar.TypeDir,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func ociLayoutFile(version string) tarRecord {
|
||||
if version == "" {
|
||||
version = ocispec.ImageLayoutVersion
|
||||
}
|
||||
layout := ocispec.ImageLayout{
|
||||
Version: version,
|
||||
}
|
||||
|
||||
b, err := json.Marshal(layout)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return tarRecord{
|
||||
Header: &tar.Header{
|
||||
Name: ocispec.ImageLayoutFile,
|
||||
Mode: 0444,
|
||||
Size: int64(len(b)),
|
||||
Typeflag: tar.TypeReg,
|
||||
},
|
||||
CopyTo: func(ctx context.Context, w io.Writer) (int64, error) {
|
||||
n, err := w.Write(b)
|
||||
return int64(n), err
|
||||
},
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func ociIndexRecord(manifests ...ocispec.Descriptor) tarRecord {
|
||||
index := ocispec.Index{
|
||||
Versioned: ocispecs.Versioned{
|
||||
SchemaVersion: 2,
|
||||
},
|
||||
Manifests: manifests,
|
||||
}
|
||||
|
||||
b, err := json.Marshal(index)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
|
||||
return tarRecord{
|
||||
Header: &tar.Header{
|
||||
Name: "index.json",
|
||||
Mode: 0644,
|
||||
Size: int64(len(b)),
|
||||
Typeflag: tar.TypeReg,
|
||||
},
|
||||
CopyTo: func(ctx context.Context, w io.Writer) (int64, error) {
|
||||
n, err := w.Write(b)
|
||||
return int64(n), err
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func writeTar(ctx context.Context, tw *tar.Writer, records []tarRecord) error {
|
||||
sort.Slice(records, func(i, j int) bool {
|
||||
return records[i].Header.Name < records[j].Header.Name
|
||||
})
|
||||
|
||||
for _, record := range records {
|
||||
if err := tw.WriteHeader(record.Header); err != nil {
|
||||
return err
|
||||
}
|
||||
if record.CopyTo != nil {
|
||||
n, err := record.CopyTo(ctx, tw)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if n != record.Header.Size {
|
||||
return errors.Errorf("unexpected copy size for %s", record.Header.Name)
|
||||
}
|
||||
} else if record.Header.Size > 0 {
|
||||
return errors.Errorf("no content to write to record with non-zero size for %s", record.Header.Name)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
Reference in New Issue
Block a user