vendor: initial vendor

Signed-off-by: Tonis Tiigi <tonistiigi@gmail.com>
This commit is contained in:
Tonis Tiigi
2019-03-22 13:31:59 -07:00
parent 4f2cc0e220
commit fd8fbf21e6
2584 changed files with 828696 additions and 0 deletions

View File

@@ -0,0 +1,198 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package docker
import (
"net/http"
"sort"
"strings"
)
type authenticationScheme byte
const (
basicAuth authenticationScheme = 1 << iota // Defined in RFC 7617
digestAuth // Defined in RFC 7616
bearerAuth // Defined in RFC 6750
)
// challenge carries information from a WWW-Authenticate response header.
// See RFC 2617.
type challenge struct {
// scheme is the auth-scheme according to RFC 2617
scheme authenticationScheme
// parameters are the auth-params according to RFC 2617
parameters map[string]string
}
type byScheme []challenge
func (bs byScheme) Len() int { return len(bs) }
func (bs byScheme) Swap(i, j int) { bs[i], bs[j] = bs[j], bs[i] }
// Sort in priority order: token > digest > basic
func (bs byScheme) Less(i, j int) bool { return bs[i].scheme > bs[j].scheme }
// Octet types from RFC 2616.
type octetType byte
var octetTypes [256]octetType
const (
isToken octetType = 1 << iota
isSpace
)
func init() {
// OCTET = <any 8-bit sequence of data>
// CHAR = <any US-ASCII character (octets 0 - 127)>
// CTL = <any US-ASCII control character (octets 0 - 31) and DEL (127)>
// CR = <US-ASCII CR, carriage return (13)>
// LF = <US-ASCII LF, linefeed (10)>
// SP = <US-ASCII SP, space (32)>
// HT = <US-ASCII HT, horizontal-tab (9)>
// <"> = <US-ASCII double-quote mark (34)>
// CRLF = CR LF
// LWS = [CRLF] 1*( SP | HT )
// TEXT = <any OCTET except CTLs, but including LWS>
// separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <">
// | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT
// token = 1*<any CHAR except CTLs or separators>
// qdtext = <any TEXT except <">>
for c := 0; c < 256; c++ {
var t octetType
isCtl := c <= 31 || c == 127
isChar := 0 <= c && c <= 127
isSeparator := strings.ContainsRune(" \t\"(),/:;<=>?@[]\\{}", rune(c))
if strings.ContainsRune(" \t\r\n", rune(c)) {
t |= isSpace
}
if isChar && !isCtl && !isSeparator {
t |= isToken
}
octetTypes[c] = t
}
}
func parseAuthHeader(header http.Header) []challenge {
challenges := []challenge{}
for _, h := range header[http.CanonicalHeaderKey("WWW-Authenticate")] {
v, p := parseValueAndParams(h)
var s authenticationScheme
switch v {
case "basic":
s = basicAuth
case "digest":
s = digestAuth
case "bearer":
s = bearerAuth
default:
continue
}
challenges = append(challenges, challenge{scheme: s, parameters: p})
}
sort.Stable(byScheme(challenges))
return challenges
}
func parseValueAndParams(header string) (value string, params map[string]string) {
params = make(map[string]string)
value, s := expectToken(header)
if value == "" {
return
}
value = strings.ToLower(value)
for {
var pkey string
pkey, s = expectToken(skipSpace(s))
if pkey == "" {
return
}
if !strings.HasPrefix(s, "=") {
return
}
var pvalue string
pvalue, s = expectTokenOrQuoted(s[1:])
if pvalue == "" {
return
}
pkey = strings.ToLower(pkey)
params[pkey] = pvalue
s = skipSpace(s)
if !strings.HasPrefix(s, ",") {
return
}
s = s[1:]
}
}
func skipSpace(s string) (rest string) {
i := 0
for ; i < len(s); i++ {
if octetTypes[s[i]]&isSpace == 0 {
break
}
}
return s[i:]
}
func expectToken(s string) (token, rest string) {
i := 0
for ; i < len(s); i++ {
if octetTypes[s[i]]&isToken == 0 {
break
}
}
return s[:i], s[i:]
}
func expectTokenOrQuoted(s string) (value string, rest string) {
if !strings.HasPrefix(s, "\"") {
return expectToken(s)
}
s = s[1:]
for i := 0; i < len(s); i++ {
switch s[i] {
case '"':
return s[:i], s[i+1:]
case '\\':
p := make([]byte, len(s)-1)
j := copy(p, s[:i])
escape := true
for i = i + 1; i < len(s); i++ {
b := s[i]
switch {
case escape:
escape = false
p[j] = b
j++
case b == '\\':
escape = true
case b == '"':
return string(p[:j]), s[i+1:]
default:
p[j] = b
j++
}
}
return "", ""
}
}
return "", ""
}

View File

@@ -0,0 +1,313 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package docker
import (
"context"
"encoding/base64"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"strings"
"sync"
"time"
"github.com/containerd/containerd/errdefs"
"github.com/containerd/containerd/log"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/net/context/ctxhttp"
)
type dockerAuthorizer struct {
credentials func(string) (string, string, error)
client *http.Client
mu sync.Mutex
auth map[string]string
}
// NewAuthorizer creates a Docker authorizer using the provided function to
// get credentials for the token server or basic auth.
func NewAuthorizer(client *http.Client, f func(string) (string, string, error)) Authorizer {
if client == nil {
client = http.DefaultClient
}
return &dockerAuthorizer{
credentials: f,
client: client,
auth: map[string]string{},
}
}
func (a *dockerAuthorizer) Authorize(ctx context.Context, req *http.Request) error {
// TODO: Lookup matching challenge and scope rather than just host
if auth := a.getAuth(req.URL.Host); auth != "" {
req.Header.Set("Authorization", auth)
}
return nil
}
func (a *dockerAuthorizer) AddResponses(ctx context.Context, responses []*http.Response) error {
last := responses[len(responses)-1]
host := last.Request.URL.Host
for _, c := range parseAuthHeader(last.Header) {
if c.scheme == bearerAuth {
if err := invalidAuthorization(c, responses); err != nil {
// TODO: Clear token
a.setAuth(host, "")
return err
}
// TODO(dmcg): Store challenge, not token
// Move token fetching to authorize
return a.setTokenAuth(ctx, host, c.parameters)
} else if c.scheme == basicAuth {
// TODO: Resolve credentials on authorize
username, secret, err := a.credentials(host)
if err != nil {
return err
}
if username != "" && secret != "" {
auth := username + ":" + secret
a.setAuth(host, fmt.Sprintf("Basic %s", base64.StdEncoding.EncodeToString([]byte(auth))))
return nil
}
}
}
return errors.Wrap(errdefs.ErrNotImplemented, "failed to find supported auth scheme")
}
func (a *dockerAuthorizer) getAuth(host string) string {
a.mu.Lock()
defer a.mu.Unlock()
return a.auth[host]
}
func (a *dockerAuthorizer) setAuth(host string, auth string) bool {
a.mu.Lock()
defer a.mu.Unlock()
changed := a.auth[host] != auth
a.auth[host] = auth
return changed
}
func (a *dockerAuthorizer) setTokenAuth(ctx context.Context, host string, params map[string]string) error {
realm, ok := params["realm"]
if !ok {
return errors.New("no realm specified for token auth challenge")
}
realmURL, err := url.Parse(realm)
if err != nil {
return errors.Wrap(err, "invalid token auth challenge realm")
}
to := tokenOptions{
realm: realmURL.String(),
service: params["service"],
}
to.scopes = getTokenScopes(ctx, params)
if len(to.scopes) == 0 {
return errors.Errorf("no scope specified for token auth challenge")
}
if a.credentials != nil {
to.username, to.secret, err = a.credentials(host)
if err != nil {
return err
}
}
var token string
if to.secret != "" {
// Credential information is provided, use oauth POST endpoint
token, err = a.fetchTokenWithOAuth(ctx, to)
if err != nil {
return errors.Wrap(err, "failed to fetch oauth token")
}
} else {
// Do request anonymously
token, err = a.fetchToken(ctx, to)
if err != nil {
return errors.Wrap(err, "failed to fetch anonymous token")
}
}
a.setAuth(host, fmt.Sprintf("Bearer %s", token))
return nil
}
type tokenOptions struct {
realm string
service string
scopes []string
username string
secret string
}
type postTokenResponse struct {
AccessToken string `json:"access_token"`
RefreshToken string `json:"refresh_token"`
ExpiresIn int `json:"expires_in"`
IssuedAt time.Time `json:"issued_at"`
Scope string `json:"scope"`
}
func (a *dockerAuthorizer) fetchTokenWithOAuth(ctx context.Context, to tokenOptions) (string, error) {
form := url.Values{}
form.Set("scope", strings.Join(to.scopes, " "))
form.Set("service", to.service)
// TODO: Allow setting client_id
form.Set("client_id", "containerd-client")
if to.username == "" {
form.Set("grant_type", "refresh_token")
form.Set("refresh_token", to.secret)
} else {
form.Set("grant_type", "password")
form.Set("username", to.username)
form.Set("password", to.secret)
}
resp, err := ctxhttp.PostForm(ctx, a.client, to.realm, form)
if err != nil {
return "", err
}
defer resp.Body.Close()
// Registries without support for POST may return 404 for POST /v2/token.
// As of September 2017, GCR is known to return 404.
// As of February 2018, JFrog Artifactory is known to return 401.
if (resp.StatusCode == 405 && to.username != "") || resp.StatusCode == 404 || resp.StatusCode == 401 {
return a.fetchToken(ctx, to)
} else if resp.StatusCode < 200 || resp.StatusCode >= 400 {
b, _ := ioutil.ReadAll(io.LimitReader(resp.Body, 64000)) // 64KB
log.G(ctx).WithFields(logrus.Fields{
"status": resp.Status,
"body": string(b),
}).Debugf("token request failed")
// TODO: handle error body and write debug output
return "", errors.Errorf("unexpected status: %s", resp.Status)
}
decoder := json.NewDecoder(resp.Body)
var tr postTokenResponse
if err = decoder.Decode(&tr); err != nil {
return "", fmt.Errorf("unable to decode token response: %s", err)
}
return tr.AccessToken, nil
}
type getTokenResponse struct {
Token string `json:"token"`
AccessToken string `json:"access_token"`
ExpiresIn int `json:"expires_in"`
IssuedAt time.Time `json:"issued_at"`
RefreshToken string `json:"refresh_token"`
}
// getToken fetches a token using a GET request
func (a *dockerAuthorizer) fetchToken(ctx context.Context, to tokenOptions) (string, error) {
req, err := http.NewRequest("GET", to.realm, nil)
if err != nil {
return "", err
}
reqParams := req.URL.Query()
if to.service != "" {
reqParams.Add("service", to.service)
}
for _, scope := range to.scopes {
reqParams.Add("scope", scope)
}
if to.secret != "" {
req.SetBasicAuth(to.username, to.secret)
}
req.URL.RawQuery = reqParams.Encode()
resp, err := ctxhttp.Do(ctx, a.client, req)
if err != nil {
return "", err
}
defer resp.Body.Close()
if resp.StatusCode < 200 || resp.StatusCode >= 400 {
// TODO: handle error body and write debug output
return "", errors.Errorf("unexpected status: %s", resp.Status)
}
decoder := json.NewDecoder(resp.Body)
var tr getTokenResponse
if err = decoder.Decode(&tr); err != nil {
return "", fmt.Errorf("unable to decode token response: %s", err)
}
// `access_token` is equivalent to `token` and if both are specified
// the choice is undefined. Canonicalize `access_token` by sticking
// things in `token`.
if tr.AccessToken != "" {
tr.Token = tr.AccessToken
}
if tr.Token == "" {
return "", ErrNoToken
}
return tr.Token, nil
}
func invalidAuthorization(c challenge, responses []*http.Response) error {
errStr := c.parameters["error"]
if errStr == "" {
return nil
}
n := len(responses)
if n == 1 || (n > 1 && !sameRequest(responses[n-2].Request, responses[n-1].Request)) {
return nil
}
return errors.Wrapf(ErrInvalidAuthorization, "server message: %s", errStr)
}
func sameRequest(r1, r2 *http.Request) bool {
if r1.Method != r2.Method {
return false
}
if *r1.URL != *r2.URL {
return false
}
return true
}

View File

@@ -0,0 +1,88 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package docker
import (
"bytes"
"context"
"encoding/json"
"fmt"
"github.com/containerd/containerd/content"
"github.com/containerd/containerd/images"
"github.com/containerd/containerd/log"
"github.com/containerd/containerd/remotes"
digest "github.com/opencontainers/go-digest"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
)
// LegacyConfigMediaType should be replaced by OCI image spec.
//
// More detail: docker/distribution#1622
const LegacyConfigMediaType = "application/octet-stream"
// ConvertManifest changes application/octet-stream to schema2 config media type if need.
//
// NOTE:
// 1. original manifest will be deleted by next gc round.
// 2. don't cover manifest list.
func ConvertManifest(ctx context.Context, store content.Store, desc ocispec.Descriptor) (ocispec.Descriptor, error) {
if !(desc.MediaType == images.MediaTypeDockerSchema2Manifest ||
desc.MediaType == ocispec.MediaTypeImageManifest) {
log.G(ctx).Warnf("do nothing for media type: %s", desc.MediaType)
return desc, nil
}
// read manifest data
mb, err := content.ReadBlob(ctx, store, desc)
if err != nil {
return ocispec.Descriptor{}, errors.Wrap(err, "failed to read index data")
}
var manifest ocispec.Manifest
if err := json.Unmarshal(mb, &manifest); err != nil {
return ocispec.Descriptor{}, errors.Wrap(err, "failed to unmarshal data into manifest")
}
// check config media type
if manifest.Config.MediaType != LegacyConfigMediaType {
return desc, nil
}
manifest.Config.MediaType = images.MediaTypeDockerSchema2Config
data, err := json.MarshalIndent(manifest, "", " ")
if err != nil {
return ocispec.Descriptor{}, errors.Wrap(err, "failed to marshal manifest")
}
// update manifest with gc labels
desc.Digest = digest.Canonical.FromBytes(data)
desc.Size = int64(len(data))
labels := map[string]string{}
for i, c := range append([]ocispec.Descriptor{manifest.Config}, manifest.Layers...) {
labels[fmt.Sprintf("containerd.io/gc.ref.content.%d", i)] = c.Digest.String()
}
ref := remotes.MakeRefKey(ctx, desc)
if err := content.WriteBlob(ctx, store, ref, bytes.NewReader(data), desc, content.WithLabels(labels)); err != nil {
return ocispec.Descriptor{}, errors.Wrap(err, "failed to update content")
}
return desc, nil
}

View File

@@ -0,0 +1,164 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package docker
import (
"context"
"fmt"
"io"
"io/ioutil"
"net/http"
"path"
"strings"
"github.com/containerd/containerd/errdefs"
"github.com/containerd/containerd/images"
"github.com/containerd/containerd/log"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
type dockerFetcher struct {
*dockerBase
}
func (r dockerFetcher) Fetch(ctx context.Context, desc ocispec.Descriptor) (io.ReadCloser, error) {
ctx = log.WithLogger(ctx, log.G(ctx).WithFields(
logrus.Fields{
"base": r.base.String(),
"digest": desc.Digest,
},
))
urls, err := r.getV2URLPaths(ctx, desc)
if err != nil {
return nil, err
}
ctx, err = contextWithRepositoryScope(ctx, r.refspec, false)
if err != nil {
return nil, err
}
return newHTTPReadSeeker(desc.Size, func(offset int64) (io.ReadCloser, error) {
for _, u := range urls {
rc, err := r.open(ctx, u, desc.MediaType, offset)
if err != nil {
if errdefs.IsNotFound(err) {
continue // try one of the other urls.
}
return nil, err
}
return rc, nil
}
return nil, errors.Wrapf(errdefs.ErrNotFound,
"could not fetch content descriptor %v (%v) from remote",
desc.Digest, desc.MediaType)
})
}
func (r dockerFetcher) open(ctx context.Context, u, mediatype string, offset int64) (io.ReadCloser, error) {
req, err := http.NewRequest(http.MethodGet, u, nil)
if err != nil {
return nil, err
}
req.Header.Set("Accept", strings.Join([]string{mediatype, `*`}, ", "))
if offset > 0 {
// Note: "Accept-Ranges: bytes" cannot be trusted as some endpoints
// will return the header without supporting the range. The content
// range must always be checked.
req.Header.Set("Range", fmt.Sprintf("bytes=%d-", offset))
}
resp, err := r.doRequestWithRetries(ctx, req, nil)
if err != nil {
return nil, err
}
if resp.StatusCode > 299 {
// TODO(stevvooe): When doing a offset specific request, we should
// really distinguish between a 206 and a 200. In the case of 200, we
// can discard the bytes, hiding the seek behavior from the
// implementation.
resp.Body.Close()
if resp.StatusCode == http.StatusNotFound {
return nil, errors.Wrapf(errdefs.ErrNotFound, "content at %v not found", u)
}
return nil, errors.Errorf("unexpected status code %v: %v", u, resp.Status)
}
if offset > 0 {
cr := resp.Header.Get("content-range")
if cr != "" {
if !strings.HasPrefix(cr, fmt.Sprintf("bytes %d-", offset)) {
return nil, errors.Errorf("unhandled content range in response: %v", cr)
}
} else {
// TODO: Should any cases where use of content range
// without the proper header be considered?
// 206 responses?
// Discard up to offset
// Could use buffer pool here but this case should be rare
n, err := io.Copy(ioutil.Discard, io.LimitReader(resp.Body, offset))
if err != nil {
return nil, errors.Wrap(err, "failed to discard to offset")
}
if n != offset {
return nil, errors.Errorf("unable to discard to offset")
}
}
}
return resp.Body, nil
}
// getV2URLPaths generates the candidate urls paths for the object based on the
// set of hints and the provided object id. URLs are returned in the order of
// most to least likely succeed.
func (r *dockerFetcher) getV2URLPaths(ctx context.Context, desc ocispec.Descriptor) ([]string, error) {
var urls []string
if len(desc.URLs) > 0 {
// handle fetch via external urls.
for _, u := range desc.URLs {
log.G(ctx).WithField("url", u).Debug("adding alternative url")
urls = append(urls, u)
}
}
switch desc.MediaType {
case images.MediaTypeDockerSchema2Manifest, images.MediaTypeDockerSchema2ManifestList,
images.MediaTypeDockerSchema1Manifest,
ocispec.MediaTypeImageManifest, ocispec.MediaTypeImageIndex:
urls = append(urls, r.url(path.Join("manifests", desc.Digest.String())))
}
// always fallback to attempting to get the object out of the blobs store.
urls = append(urls, r.url(path.Join("blobs", desc.Digest.String())))
return urls, nil
}

View File

@@ -0,0 +1,144 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package docker
import (
"bytes"
"io"
"io/ioutil"
"github.com/containerd/containerd/errdefs"
"github.com/containerd/containerd/log"
"github.com/pkg/errors"
)
type httpReadSeeker struct {
size int64
offset int64
rc io.ReadCloser
open func(offset int64) (io.ReadCloser, error)
closed bool
}
func newHTTPReadSeeker(size int64, open func(offset int64) (io.ReadCloser, error)) (io.ReadCloser, error) {
return &httpReadSeeker{
size: size,
open: open,
}, nil
}
func (hrs *httpReadSeeker) Read(p []byte) (n int, err error) {
if hrs.closed {
return 0, io.EOF
}
rd, err := hrs.reader()
if err != nil {
return 0, err
}
n, err = rd.Read(p)
hrs.offset += int64(n)
return
}
func (hrs *httpReadSeeker) Close() error {
if hrs.closed {
return nil
}
hrs.closed = true
if hrs.rc != nil {
return hrs.rc.Close()
}
return nil
}
func (hrs *httpReadSeeker) Seek(offset int64, whence int) (int64, error) {
if hrs.closed {
return 0, errors.Wrap(errdefs.ErrUnavailable, "Fetcher.Seek: closed")
}
abs := hrs.offset
switch whence {
case io.SeekStart:
abs = offset
case io.SeekCurrent:
abs += offset
case io.SeekEnd:
if hrs.size == -1 {
return 0, errors.Wrap(errdefs.ErrUnavailable, "Fetcher.Seek: unknown size, cannot seek from end")
}
abs = hrs.size + offset
default:
return 0, errors.Wrap(errdefs.ErrInvalidArgument, "Fetcher.Seek: invalid whence")
}
if abs < 0 {
return 0, errors.Wrapf(errdefs.ErrInvalidArgument, "Fetcher.Seek: negative offset")
}
if abs != hrs.offset {
if hrs.rc != nil {
if err := hrs.rc.Close(); err != nil {
log.L.WithError(err).Errorf("Fetcher.Seek: failed to close ReadCloser")
}
hrs.rc = nil
}
hrs.offset = abs
}
return hrs.offset, nil
}
func (hrs *httpReadSeeker) reader() (io.Reader, error) {
if hrs.rc != nil {
return hrs.rc, nil
}
if hrs.size == -1 || hrs.offset < hrs.size {
// only try to reopen the body request if we are seeking to a value
// less than the actual size.
if hrs.open == nil {
return nil, errors.Wrapf(errdefs.ErrNotImplemented, "cannot open")
}
rc, err := hrs.open(hrs.offset)
if err != nil {
return nil, errors.Wrapf(err, "httpReaderSeeker: failed open")
}
if hrs.rc != nil {
if err := hrs.rc.Close(); err != nil {
log.L.WithError(err).Errorf("httpReadSeeker: failed to close ReadCloser")
}
}
hrs.rc = rc
} else {
// There is an edge case here where offset == size of the content. If
// we seek, we will probably get an error for content that cannot be
// sought (?). In that case, we should err on committing the content,
// as the length is already satisfied but we just return the empty
// reader instead.
hrs.rc = ioutil.NopCloser(bytes.NewReader([]byte{}))
}
return hrs.rc, nil
}

View File

@@ -0,0 +1,322 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package docker
import (
"context"
"io"
"io/ioutil"
"net/http"
"path"
"strings"
"time"
"github.com/containerd/containerd/content"
"github.com/containerd/containerd/errdefs"
"github.com/containerd/containerd/images"
"github.com/containerd/containerd/log"
"github.com/containerd/containerd/remotes"
digest "github.com/opencontainers/go-digest"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
)
type dockerPusher struct {
*dockerBase
tag string
// TODO: namespace tracker
tracker StatusTracker
}
func (p dockerPusher) Push(ctx context.Context, desc ocispec.Descriptor) (content.Writer, error) {
ctx, err := contextWithRepositoryScope(ctx, p.refspec, true)
if err != nil {
return nil, err
}
ref := remotes.MakeRefKey(ctx, desc)
status, err := p.tracker.GetStatus(ref)
if err == nil {
if status.Offset == status.Total {
return nil, errors.Wrapf(errdefs.ErrAlreadyExists, "ref %v", ref)
}
// TODO: Handle incomplete status
} else if !errdefs.IsNotFound(err) {
return nil, errors.Wrap(err, "failed to get status")
}
var (
isManifest bool
existCheck string
)
switch desc.MediaType {
case images.MediaTypeDockerSchema2Manifest, images.MediaTypeDockerSchema2ManifestList,
ocispec.MediaTypeImageManifest, ocispec.MediaTypeImageIndex:
isManifest = true
if p.tag == "" {
existCheck = path.Join("manifests", desc.Digest.String())
} else {
existCheck = path.Join("manifests", p.tag)
}
default:
existCheck = path.Join("blobs", desc.Digest.String())
}
req, err := http.NewRequest(http.MethodHead, p.url(existCheck), nil)
if err != nil {
return nil, err
}
req.Header.Set("Accept", strings.Join([]string{desc.MediaType, `*`}, ", "))
resp, err := p.doRequestWithRetries(ctx, req, nil)
if err != nil {
if errors.Cause(err) != ErrInvalidAuthorization {
return nil, err
}
log.G(ctx).WithError(err).Debugf("Unable to check existence, continuing with push")
} else {
if resp.StatusCode == http.StatusOK {
var exists bool
if isManifest && p.tag != "" {
dgstHeader := digest.Digest(resp.Header.Get("Docker-Content-Digest"))
if dgstHeader == desc.Digest {
exists = true
}
} else {
exists = true
}
if exists {
p.tracker.SetStatus(ref, Status{
Status: content.Status{
Ref: ref,
// TODO: Set updated time?
},
})
return nil, errors.Wrapf(errdefs.ErrAlreadyExists, "content %v on remote", desc.Digest)
}
} else if resp.StatusCode != http.StatusNotFound {
// TODO: log error
return nil, errors.Errorf("unexpected response: %s", resp.Status)
}
}
// TODO: Lookup related objects for cross repository push
if isManifest {
var putPath string
if p.tag != "" {
putPath = path.Join("manifests", p.tag)
} else {
putPath = path.Join("manifests", desc.Digest.String())
}
req, err = http.NewRequest(http.MethodPut, p.url(putPath), nil)
if err != nil {
return nil, err
}
req.Header.Add("Content-Type", desc.MediaType)
} else {
// TODO: Do monolithic upload if size is small
// Start upload request
req, err = http.NewRequest(http.MethodPost, p.url("blobs", "uploads")+"/", nil)
if err != nil {
return nil, err
}
resp, err := p.doRequestWithRetries(ctx, req, nil)
if err != nil {
return nil, err
}
switch resp.StatusCode {
case http.StatusOK, http.StatusAccepted, http.StatusNoContent:
default:
// TODO: log error
return nil, errors.Errorf("unexpected response: %s", resp.Status)
}
location := resp.Header.Get("Location")
// Support paths without host in location
if strings.HasPrefix(location, "/") {
// Support location string containing path and query
qmIndex := strings.Index(location, "?")
if qmIndex > 0 {
u := p.base
u.Path = location[:qmIndex]
u.RawQuery = location[qmIndex+1:]
location = u.String()
} else {
u := p.base
u.Path = location
location = u.String()
}
}
req, err = http.NewRequest(http.MethodPut, location, nil)
if err != nil {
return nil, err
}
q := req.URL.Query()
q.Add("digest", desc.Digest.String())
req.URL.RawQuery = q.Encode()
}
p.tracker.SetStatus(ref, Status{
Status: content.Status{
Ref: ref,
Total: desc.Size,
Expected: desc.Digest,
StartedAt: time.Now(),
},
})
// TODO: Support chunked upload
pr, pw := io.Pipe()
respC := make(chan *http.Response, 1)
req.Body = ioutil.NopCloser(pr)
req.ContentLength = desc.Size
go func() {
defer close(respC)
resp, err = p.doRequest(ctx, req)
if err != nil {
pr.CloseWithError(err)
return
}
switch resp.StatusCode {
case http.StatusOK, http.StatusCreated, http.StatusNoContent:
default:
// TODO: log error
pr.CloseWithError(errors.Errorf("unexpected response: %s", resp.Status))
}
respC <- resp
}()
return &pushWriter{
base: p.dockerBase,
ref: ref,
pipe: pw,
responseC: respC,
isManifest: isManifest,
expected: desc.Digest,
tracker: p.tracker,
}, nil
}
type pushWriter struct {
base *dockerBase
ref string
pipe *io.PipeWriter
responseC <-chan *http.Response
isManifest bool
expected digest.Digest
tracker StatusTracker
}
func (pw *pushWriter) Write(p []byte) (n int, err error) {
status, err := pw.tracker.GetStatus(pw.ref)
if err != nil {
return n, err
}
n, err = pw.pipe.Write(p)
status.Offset += int64(n)
status.UpdatedAt = time.Now()
pw.tracker.SetStatus(pw.ref, status)
return
}
func (pw *pushWriter) Close() error {
return pw.pipe.Close()
}
func (pw *pushWriter) Status() (content.Status, error) {
status, err := pw.tracker.GetStatus(pw.ref)
if err != nil {
return content.Status{}, err
}
return status.Status, nil
}
func (pw *pushWriter) Digest() digest.Digest {
// TODO: Get rid of this function?
return pw.expected
}
func (pw *pushWriter) Commit(ctx context.Context, size int64, expected digest.Digest, opts ...content.Opt) error {
// Check whether read has already thrown an error
if _, err := pw.pipe.Write([]byte{}); err != nil && err != io.ErrClosedPipe {
return errors.Wrap(err, "pipe error before commit")
}
if err := pw.pipe.Close(); err != nil {
return err
}
// TODO: Update status to determine committing
// TODO: timeout waiting for response
resp := <-pw.responseC
if resp == nil {
return errors.New("no response")
}
// 201 is specified return status, some registries return
// 200 or 204.
switch resp.StatusCode {
case http.StatusOK, http.StatusCreated, http.StatusNoContent:
default:
return errors.Errorf("unexpected status: %s", resp.Status)
}
status, err := pw.tracker.GetStatus(pw.ref)
if err != nil {
return errors.Wrap(err, "failed to get status")
}
if size > 0 && size != status.Offset {
return errors.Errorf("unxpected size %d, expected %d", status.Offset, size)
}
if expected == "" {
expected = status.Expected
}
actual, err := digest.Parse(resp.Header.Get("Docker-Content-Digest"))
if err != nil {
return errors.Wrap(err, "invalid content digest in response")
}
if actual != expected {
return errors.Errorf("got digest %s, expected %s", actual, expected)
}
return nil
}
func (pw *pushWriter) Truncate(size int64) error {
// TODO: if blob close request and start new request at offset
// TODO: always error on manifest
return errors.New("cannot truncate remote upload")
}

View File

@@ -0,0 +1,425 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package docker
import (
"context"
"net/http"
"net/url"
"path"
"strconv"
"strings"
"github.com/containerd/containerd/errdefs"
"github.com/containerd/containerd/images"
"github.com/containerd/containerd/log"
"github.com/containerd/containerd/reference"
"github.com/containerd/containerd/remotes"
digest "github.com/opencontainers/go-digest"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"golang.org/x/net/context/ctxhttp"
)
var (
// ErrNoToken is returned if a request is successful but the body does not
// contain an authorization token.
ErrNoToken = errors.New("authorization server did not include a token in the response")
// ErrInvalidAuthorization is used when credentials are passed to a server but
// those credentials are rejected.
ErrInvalidAuthorization = errors.New("authorization failed")
)
// Authorizer is used to authorize HTTP requests based on 401 HTTP responses.
// An Authorizer is responsible for caching tokens or credentials used by
// requests.
type Authorizer interface {
// Authorize sets the appropriate `Authorization` header on the given
// request.
//
// If no authorization is found for the request, the request remains
// unmodified. It may also add an `Authorization` header as
// "bearer <some bearer token>"
// "basic <base64 encoded credentials>"
Authorize(context.Context, *http.Request) error
// AddResponses adds a 401 response for the authorizer to consider when
// authorizing requests. The last response should be unauthorized and
// the previous requests are used to consider redirects and retries
// that may have led to the 401.
//
// If response is not handled, returns `ErrNotImplemented`
AddResponses(context.Context, []*http.Response) error
}
// ResolverOptions are used to configured a new Docker register resolver
type ResolverOptions struct {
// Authorizer is used to authorize registry requests
Authorizer Authorizer
// Credentials provides username and secret given a host.
// If username is empty but a secret is given, that secret
// is interpretted as a long lived token.
// Deprecated: use Authorizer
Credentials func(string) (string, string, error)
// Host provides the hostname given a namespace.
Host func(string) (string, error)
// PlainHTTP specifies to use plain http and not https
PlainHTTP bool
// Client is the http client to used when making registry requests
Client *http.Client
// Tracker is used to track uploads to the registry. This is used
// since the registry does not have upload tracking and the existing
// mechanism for getting blob upload status is expensive.
Tracker StatusTracker
}
// DefaultHost is the default host function.
func DefaultHost(ns string) (string, error) {
if ns == "docker.io" {
return "registry-1.docker.io", nil
}
return ns, nil
}
type dockerResolver struct {
auth Authorizer
host func(string) (string, error)
plainHTTP bool
client *http.Client
tracker StatusTracker
}
// NewResolver returns a new resolver to a Docker registry
func NewResolver(options ResolverOptions) remotes.Resolver {
if options.Tracker == nil {
options.Tracker = NewInMemoryTracker()
}
if options.Host == nil {
options.Host = DefaultHost
}
if options.Authorizer == nil {
options.Authorizer = NewAuthorizer(options.Client, options.Credentials)
}
return &dockerResolver{
auth: options.Authorizer,
host: options.Host,
plainHTTP: options.PlainHTTP,
client: options.Client,
tracker: options.Tracker,
}
}
var _ remotes.Resolver = &dockerResolver{}
func (r *dockerResolver) Resolve(ctx context.Context, ref string) (string, ocispec.Descriptor, error) {
refspec, err := reference.Parse(ref)
if err != nil {
return "", ocispec.Descriptor{}, err
}
if refspec.Object == "" {
return "", ocispec.Descriptor{}, reference.ErrObjectRequired
}
base, err := r.base(refspec)
if err != nil {
return "", ocispec.Descriptor{}, err
}
fetcher := dockerFetcher{
dockerBase: base,
}
var (
urls []string
dgst = refspec.Digest()
)
if dgst != "" {
if err := dgst.Validate(); err != nil {
// need to fail here, since we can't actually resolve the invalid
// digest.
return "", ocispec.Descriptor{}, err
}
// turns out, we have a valid digest, make a url.
urls = append(urls, fetcher.url("manifests", dgst.String()))
// fallback to blobs on not found.
urls = append(urls, fetcher.url("blobs", dgst.String()))
} else {
urls = append(urls, fetcher.url("manifests", refspec.Object))
}
ctx, err = contextWithRepositoryScope(ctx, refspec, false)
if err != nil {
return "", ocispec.Descriptor{}, err
}
for _, u := range urls {
req, err := http.NewRequest(http.MethodHead, u, nil)
if err != nil {
return "", ocispec.Descriptor{}, err
}
// set headers for all the types we support for resolution.
req.Header.Set("Accept", strings.Join([]string{
images.MediaTypeDockerSchema2Manifest,
images.MediaTypeDockerSchema2ManifestList,
ocispec.MediaTypeImageManifest,
ocispec.MediaTypeImageIndex, "*"}, ", "))
log.G(ctx).Debug("resolving")
resp, err := fetcher.doRequestWithRetries(ctx, req, nil)
if err != nil {
if errors.Cause(err) == ErrInvalidAuthorization {
err = errors.Wrapf(err, "pull access denied, repository does not exist or may require authorization")
}
return "", ocispec.Descriptor{}, err
}
resp.Body.Close() // don't care about body contents.
if resp.StatusCode > 299 {
if resp.StatusCode == http.StatusNotFound {
continue
}
return "", ocispec.Descriptor{}, errors.Errorf("unexpected status code %v: %v", u, resp.Status)
}
// this is the only point at which we trust the registry. we use the
// content headers to assemble a descriptor for the name. when this becomes
// more robust, we mostly get this information from a secure trust store.
dgstHeader := digest.Digest(resp.Header.Get("Docker-Content-Digest"))
if dgstHeader != "" {
if err := dgstHeader.Validate(); err != nil {
return "", ocispec.Descriptor{}, errors.Wrapf(err, "%q in header not a valid digest", dgstHeader)
}
dgst = dgstHeader
}
if dgst == "" {
return "", ocispec.Descriptor{}, errors.Errorf("could not resolve digest for %v", ref)
}
var (
size int64
sizeHeader = resp.Header.Get("Content-Length")
)
size, err = strconv.ParseInt(sizeHeader, 10, 64)
if err != nil {
return "", ocispec.Descriptor{}, errors.Wrapf(err, "invalid size header: %q", sizeHeader)
}
if size < 0 {
return "", ocispec.Descriptor{}, errors.Errorf("%q in header not a valid size", sizeHeader)
}
desc := ocispec.Descriptor{
Digest: dgst,
MediaType: resp.Header.Get("Content-Type"), // need to strip disposition?
Size: size,
}
log.G(ctx).WithField("desc.digest", desc.Digest).Debug("resolved")
return ref, desc, nil
}
return "", ocispec.Descriptor{}, errors.Errorf("%v not found", ref)
}
func (r *dockerResolver) Fetcher(ctx context.Context, ref string) (remotes.Fetcher, error) {
refspec, err := reference.Parse(ref)
if err != nil {
return nil, err
}
base, err := r.base(refspec)
if err != nil {
return nil, err
}
return dockerFetcher{
dockerBase: base,
}, nil
}
func (r *dockerResolver) Pusher(ctx context.Context, ref string) (remotes.Pusher, error) {
refspec, err := reference.Parse(ref)
if err != nil {
return nil, err
}
// Manifests can be pushed by digest like any other object, but the passed in
// reference cannot take a digest without the associated content. A tag is allowed
// and will be used to tag pushed manifests.
if refspec.Object != "" && strings.Contains(refspec.Object, "@") {
return nil, errors.New("cannot use digest reference for push locator")
}
base, err := r.base(refspec)
if err != nil {
return nil, err
}
return dockerPusher{
dockerBase: base,
tag: refspec.Object,
tracker: r.tracker,
}, nil
}
type dockerBase struct {
refspec reference.Spec
base url.URL
client *http.Client
auth Authorizer
}
func (r *dockerResolver) base(refspec reference.Spec) (*dockerBase, error) {
var (
err error
base url.URL
)
host := refspec.Hostname()
base.Host = host
if r.host != nil {
base.Host, err = r.host(host)
if err != nil {
return nil, err
}
}
base.Scheme = "https"
if r.plainHTTP || strings.HasPrefix(base.Host, "localhost:") {
base.Scheme = "http"
}
prefix := strings.TrimPrefix(refspec.Locator, host+"/")
base.Path = path.Join("/v2", prefix)
return &dockerBase{
refspec: refspec,
base: base,
client: r.client,
auth: r.auth,
}, nil
}
func (r *dockerBase) url(ps ...string) string {
url := r.base
url.Path = path.Join(url.Path, path.Join(ps...))
return url.String()
}
func (r *dockerBase) authorize(ctx context.Context, req *http.Request) error {
// Check if has header for host
if r.auth != nil {
if err := r.auth.Authorize(ctx, req); err != nil {
return err
}
}
return nil
}
func (r *dockerBase) doRequest(ctx context.Context, req *http.Request) (*http.Response, error) {
ctx = log.WithLogger(ctx, log.G(ctx).WithField("url", req.URL.String()))
log.G(ctx).WithField("request.headers", req.Header).WithField("request.method", req.Method).Debug("do request")
if err := r.authorize(ctx, req); err != nil {
return nil, errors.Wrap(err, "failed to authorize")
}
resp, err := ctxhttp.Do(ctx, r.client, req)
if err != nil {
return nil, errors.Wrap(err, "failed to do request")
}
log.G(ctx).WithFields(logrus.Fields{
"status": resp.Status,
"response.headers": resp.Header,
}).Debug("fetch response received")
return resp, nil
}
func (r *dockerBase) doRequestWithRetries(ctx context.Context, req *http.Request, responses []*http.Response) (*http.Response, error) {
resp, err := r.doRequest(ctx, req)
if err != nil {
return nil, err
}
responses = append(responses, resp)
req, err = r.retryRequest(ctx, req, responses)
if err != nil {
resp.Body.Close()
return nil, err
}
if req != nil {
resp.Body.Close()
return r.doRequestWithRetries(ctx, req, responses)
}
return resp, err
}
func (r *dockerBase) retryRequest(ctx context.Context, req *http.Request, responses []*http.Response) (*http.Request, error) {
if len(responses) > 5 {
return nil, nil
}
last := responses[len(responses)-1]
if last.StatusCode == http.StatusUnauthorized {
log.G(ctx).WithField("header", last.Header.Get("WWW-Authenticate")).Debug("Unauthorized")
if r.auth != nil {
if err := r.auth.AddResponses(ctx, responses); err == nil {
return copyRequest(req)
} else if !errdefs.IsNotImplemented(err) {
return nil, err
}
}
return nil, nil
} else if last.StatusCode == http.StatusMethodNotAllowed && req.Method == http.MethodHead {
// Support registries which have not properly implemented the HEAD method for
// manifests endpoint
if strings.Contains(req.URL.Path, "/manifests/") {
// TODO: copy request?
req.Method = http.MethodGet
return copyRequest(req)
}
}
// TODO: Handle 50x errors accounting for attempt history
return nil, nil
}
func copyRequest(req *http.Request) (*http.Request, error) {
ireq := *req
if ireq.GetBody != nil {
var err error
ireq.Body, err = ireq.GetBody()
if err != nil {
return nil, err
}
}
return &ireq, nil
}

View File

@@ -0,0 +1,595 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package schema1
import (
"bytes"
"context"
"encoding/base64"
"encoding/json"
"fmt"
"io"
"io/ioutil"
"strconv"
"strings"
"sync"
"time"
"golang.org/x/sync/errgroup"
"github.com/containerd/containerd/archive/compression"
"github.com/containerd/containerd/content"
"github.com/containerd/containerd/errdefs"
"github.com/containerd/containerd/images"
"github.com/containerd/containerd/log"
"github.com/containerd/containerd/remotes"
digest "github.com/opencontainers/go-digest"
specs "github.com/opencontainers/image-spec/specs-go"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
)
const (
manifestSizeLimit = 8e6 // 8MB
labelDockerSchema1EmptyLayer = "containerd.io/docker.schema1.empty-layer"
)
type blobState struct {
diffID digest.Digest
empty bool
}
// Converter converts schema1 manifests to schema2 on fetch
type Converter struct {
contentStore content.Store
fetcher remotes.Fetcher
pulledManifest *manifest
mu sync.Mutex
blobMap map[digest.Digest]blobState
layerBlobs map[digest.Digest]ocispec.Descriptor
}
// NewConverter returns a new converter
func NewConverter(contentStore content.Store, fetcher remotes.Fetcher) *Converter {
return &Converter{
contentStore: contentStore,
fetcher: fetcher,
blobMap: map[digest.Digest]blobState{},
layerBlobs: map[digest.Digest]ocispec.Descriptor{},
}
}
// Handle fetching descriptors for a docker media type
func (c *Converter) Handle(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
switch desc.MediaType {
case images.MediaTypeDockerSchema1Manifest:
if err := c.fetchManifest(ctx, desc); err != nil {
return nil, err
}
m := c.pulledManifest
if len(m.FSLayers) != len(m.History) {
return nil, errors.New("invalid schema 1 manifest, history and layer mismatch")
}
descs := make([]ocispec.Descriptor, 0, len(c.pulledManifest.FSLayers))
for i := range m.FSLayers {
if _, ok := c.blobMap[c.pulledManifest.FSLayers[i].BlobSum]; !ok {
empty, err := isEmptyLayer([]byte(m.History[i].V1Compatibility))
if err != nil {
return nil, err
}
// Do no attempt to download a known empty blob
if !empty {
descs = append([]ocispec.Descriptor{
{
MediaType: images.MediaTypeDockerSchema2LayerGzip,
Digest: c.pulledManifest.FSLayers[i].BlobSum,
Size: -1,
},
}, descs...)
}
c.blobMap[c.pulledManifest.FSLayers[i].BlobSum] = blobState{
empty: empty,
}
}
}
return descs, nil
case images.MediaTypeDockerSchema2LayerGzip:
if c.pulledManifest == nil {
return nil, errors.New("manifest required for schema 1 blob pull")
}
return nil, c.fetchBlob(ctx, desc)
default:
return nil, fmt.Errorf("%v not support for schema 1 manifests", desc.MediaType)
}
}
// ConvertOptions provides options on converting a docker schema1 manifest.
type ConvertOptions struct {
// ManifestMediaType specifies the media type of the manifest OCI descriptor.
ManifestMediaType string
// ConfigMediaType specifies the media type of the manifest config OCI
// descriptor.
ConfigMediaType string
}
// ConvertOpt allows configuring a convert operation.
type ConvertOpt func(context.Context, *ConvertOptions) error
// UseDockerSchema2 is used to indicate that a schema1 manifest should be
// converted into the media types for a docker schema2 manifest.
func UseDockerSchema2() ConvertOpt {
return func(ctx context.Context, o *ConvertOptions) error {
o.ManifestMediaType = images.MediaTypeDockerSchema2Manifest
o.ConfigMediaType = images.MediaTypeDockerSchema2Config
return nil
}
}
// Convert a docker manifest to an OCI descriptor
func (c *Converter) Convert(ctx context.Context, opts ...ConvertOpt) (ocispec.Descriptor, error) {
co := ConvertOptions{
ManifestMediaType: ocispec.MediaTypeImageManifest,
ConfigMediaType: ocispec.MediaTypeImageConfig,
}
for _, opt := range opts {
if err := opt(ctx, &co); err != nil {
return ocispec.Descriptor{}, err
}
}
history, diffIDs, err := c.schema1ManifestHistory()
if err != nil {
return ocispec.Descriptor{}, errors.Wrap(err, "schema 1 conversion failed")
}
var img ocispec.Image
if err := json.Unmarshal([]byte(c.pulledManifest.History[0].V1Compatibility), &img); err != nil {
return ocispec.Descriptor{}, errors.Wrap(err, "failed to unmarshal image from schema 1 history")
}
img.History = history
img.RootFS = ocispec.RootFS{
Type: "layers",
DiffIDs: diffIDs,
}
b, err := json.MarshalIndent(img, "", " ")
if err != nil {
return ocispec.Descriptor{}, errors.Wrap(err, "failed to marshal image")
}
config := ocispec.Descriptor{
MediaType: co.ConfigMediaType,
Digest: digest.Canonical.FromBytes(b),
Size: int64(len(b)),
}
layers := make([]ocispec.Descriptor, len(diffIDs))
for i, diffID := range diffIDs {
layers[i] = c.layerBlobs[diffID]
}
manifest := ocispec.Manifest{
Versioned: specs.Versioned{
SchemaVersion: 2,
},
Config: config,
Layers: layers,
}
mb, err := json.MarshalIndent(manifest, "", " ")
if err != nil {
return ocispec.Descriptor{}, errors.Wrap(err, "failed to marshal image")
}
desc := ocispec.Descriptor{
MediaType: co.ManifestMediaType,
Digest: digest.Canonical.FromBytes(mb),
Size: int64(len(mb)),
}
labels := map[string]string{}
labels["containerd.io/gc.ref.content.0"] = manifest.Config.Digest.String()
for i, ch := range manifest.Layers {
labels[fmt.Sprintf("containerd.io/gc.ref.content.%d", i+1)] = ch.Digest.String()
}
ref := remotes.MakeRefKey(ctx, desc)
if err := content.WriteBlob(ctx, c.contentStore, ref, bytes.NewReader(mb), desc, content.WithLabels(labels)); err != nil {
return ocispec.Descriptor{}, errors.Wrap(err, "failed to write config")
}
ref = remotes.MakeRefKey(ctx, config)
if err := content.WriteBlob(ctx, c.contentStore, ref, bytes.NewReader(b), config); err != nil {
return ocispec.Descriptor{}, errors.Wrap(err, "failed to write config")
}
return desc, nil
}
func (c *Converter) fetchManifest(ctx context.Context, desc ocispec.Descriptor) error {
log.G(ctx).Debug("fetch schema 1")
rc, err := c.fetcher.Fetch(ctx, desc)
if err != nil {
return err
}
b, err := ioutil.ReadAll(io.LimitReader(rc, manifestSizeLimit)) // limit to 8MB
rc.Close()
if err != nil {
return err
}
b, err = stripSignature(b)
if err != nil {
return err
}
var m manifest
if err := json.Unmarshal(b, &m); err != nil {
return err
}
c.pulledManifest = &m
return nil
}
func (c *Converter) fetchBlob(ctx context.Context, desc ocispec.Descriptor) error {
log.G(ctx).Debug("fetch blob")
var (
ref = remotes.MakeRefKey(ctx, desc)
calc = newBlobStateCalculator()
compressMethod = compression.Gzip
)
// size may be unknown, set to zero for content ingest
ingestDesc := desc
if ingestDesc.Size == -1 {
ingestDesc.Size = 0
}
cw, err := content.OpenWriter(ctx, c.contentStore, content.WithRef(ref), content.WithDescriptor(ingestDesc))
if err != nil {
if !errdefs.IsAlreadyExists(err) {
return err
}
reuse, err := c.reuseLabelBlobState(ctx, desc)
if err != nil {
return err
}
if reuse {
return nil
}
ra, err := c.contentStore.ReaderAt(ctx, desc)
if err != nil {
return err
}
defer ra.Close()
r, err := compression.DecompressStream(content.NewReader(ra))
if err != nil {
return err
}
compressMethod = r.GetCompression()
_, err = io.Copy(calc, r)
r.Close()
if err != nil {
return err
}
} else {
defer cw.Close()
rc, err := c.fetcher.Fetch(ctx, desc)
if err != nil {
return err
}
defer rc.Close()
eg, _ := errgroup.WithContext(ctx)
pr, pw := io.Pipe()
eg.Go(func() error {
r, err := compression.DecompressStream(pr)
if err != nil {
return err
}
compressMethod = r.GetCompression()
_, err = io.Copy(calc, r)
r.Close()
pr.CloseWithError(err)
return err
})
eg.Go(func() error {
defer pw.Close()
return content.Copy(ctx, cw, io.TeeReader(rc, pw), ingestDesc.Size, ingestDesc.Digest)
})
if err := eg.Wait(); err != nil {
return err
}
}
if desc.Size == -1 {
info, err := c.contentStore.Info(ctx, desc.Digest)
if err != nil {
return errors.Wrap(err, "failed to get blob info")
}
desc.Size = info.Size
}
if compressMethod == compression.Uncompressed {
log.G(ctx).WithField("id", desc.Digest).Debugf("changed media type for uncompressed schema1 layer blob")
desc.MediaType = images.MediaTypeDockerSchema2Layer
}
state := calc.State()
cinfo := content.Info{
Digest: desc.Digest,
Labels: map[string]string{
"containerd.io/uncompressed": state.diffID.String(),
labelDockerSchema1EmptyLayer: strconv.FormatBool(state.empty),
},
}
if _, err := c.contentStore.Update(ctx, cinfo, "labels.containerd.io/uncompressed", fmt.Sprintf("labels.%s", labelDockerSchema1EmptyLayer)); err != nil {
return errors.Wrap(err, "failed to update uncompressed label")
}
c.mu.Lock()
c.blobMap[desc.Digest] = state
c.layerBlobs[state.diffID] = desc
c.mu.Unlock()
return nil
}
func (c *Converter) reuseLabelBlobState(ctx context.Context, desc ocispec.Descriptor) (bool, error) {
cinfo, err := c.contentStore.Info(ctx, desc.Digest)
if err != nil {
return false, errors.Wrap(err, "failed to get blob info")
}
desc.Size = cinfo.Size
diffID, ok := cinfo.Labels["containerd.io/uncompressed"]
if !ok {
return false, nil
}
emptyVal, ok := cinfo.Labels[labelDockerSchema1EmptyLayer]
if !ok {
return false, nil
}
isEmpty, err := strconv.ParseBool(emptyVal)
if err != nil {
log.G(ctx).WithField("id", desc.Digest).Warnf("failed to parse bool from label %s: %v", labelDockerSchema1EmptyLayer, isEmpty)
return false, nil
}
bState := blobState{empty: isEmpty}
if bState.diffID, err = digest.Parse(diffID); err != nil {
log.G(ctx).WithField("id", desc.Digest).Warnf("failed to parse digest from label containerd.io/uncompressed: %v", diffID)
return false, nil
}
// NOTE: there is no need to read header to get compression method
// because there are only two kinds of methods.
if bState.diffID == desc.Digest {
desc.MediaType = images.MediaTypeDockerSchema2Layer
} else {
desc.MediaType = images.MediaTypeDockerSchema2LayerGzip
}
c.mu.Lock()
c.blobMap[desc.Digest] = bState
c.layerBlobs[bState.diffID] = desc
c.mu.Unlock()
return true, nil
}
func (c *Converter) schema1ManifestHistory() ([]ocispec.History, []digest.Digest, error) {
if c.pulledManifest == nil {
return nil, nil, errors.New("missing schema 1 manifest for conversion")
}
m := *c.pulledManifest
if len(m.History) == 0 {
return nil, nil, errors.New("no history")
}
history := make([]ocispec.History, len(m.History))
diffIDs := []digest.Digest{}
for i := range m.History {
var h v1History
if err := json.Unmarshal([]byte(m.History[i].V1Compatibility), &h); err != nil {
return nil, nil, errors.Wrap(err, "failed to unmarshal history")
}
blobSum := m.FSLayers[i].BlobSum
state := c.blobMap[blobSum]
history[len(history)-i-1] = ocispec.History{
Author: h.Author,
Comment: h.Comment,
Created: &h.Created,
CreatedBy: strings.Join(h.ContainerConfig.Cmd, " "),
EmptyLayer: state.empty,
}
if !state.empty {
diffIDs = append([]digest.Digest{state.diffID}, diffIDs...)
}
}
return history, diffIDs, nil
}
type fsLayer struct {
BlobSum digest.Digest `json:"blobSum"`
}
type history struct {
V1Compatibility string `json:"v1Compatibility"`
}
type manifest struct {
FSLayers []fsLayer `json:"fsLayers"`
History []history `json:"history"`
}
type v1History struct {
Author string `json:"author,omitempty"`
Created time.Time `json:"created"`
Comment string `json:"comment,omitempty"`
ThrowAway *bool `json:"throwaway,omitempty"`
Size *int `json:"Size,omitempty"` // used before ThrowAway field
ContainerConfig struct {
Cmd []string `json:"Cmd,omitempty"`
} `json:"container_config,omitempty"`
}
// isEmptyLayer returns whether the v1 compatibility history describes an
// empty layer. A return value of true indicates the layer is empty,
// however false does not indicate non-empty.
func isEmptyLayer(compatHistory []byte) (bool, error) {
var h v1History
if err := json.Unmarshal(compatHistory, &h); err != nil {
return false, err
}
if h.ThrowAway != nil {
return *h.ThrowAway, nil
}
if h.Size != nil {
return *h.Size == 0, nil
}
// If no `Size` or `throwaway` field is given, then
// it cannot be determined whether the layer is empty
// from the history, return false
return false, nil
}
type signature struct {
Signatures []jsParsedSignature `json:"signatures"`
}
type jsParsedSignature struct {
Protected string `json:"protected"`
}
type protectedBlock struct {
Length int `json:"formatLength"`
Tail string `json:"formatTail"`
}
// joseBase64UrlDecode decodes the given string using the standard base64 url
// decoder but first adds the appropriate number of trailing '=' characters in
// accordance with the jose specification.
// http://tools.ietf.org/html/draft-ietf-jose-json-web-signature-31#section-2
func joseBase64UrlDecode(s string) ([]byte, error) {
switch len(s) % 4 {
case 0:
case 2:
s += "=="
case 3:
s += "="
default:
return nil, errors.New("illegal base64url string")
}
return base64.URLEncoding.DecodeString(s)
}
func stripSignature(b []byte) ([]byte, error) {
var sig signature
if err := json.Unmarshal(b, &sig); err != nil {
return nil, err
}
if len(sig.Signatures) == 0 {
return nil, errors.New("no signatures")
}
pb, err := joseBase64UrlDecode(sig.Signatures[0].Protected)
if err != nil {
return nil, errors.Wrapf(err, "could not decode %s", sig.Signatures[0].Protected)
}
var protected protectedBlock
if err := json.Unmarshal(pb, &protected); err != nil {
return nil, err
}
if protected.Length > len(b) {
return nil, errors.New("invalid protected length block")
}
tail, err := joseBase64UrlDecode(protected.Tail)
if err != nil {
return nil, errors.Wrap(err, "invalid tail base 64 value")
}
return append(b[:protected.Length], tail...), nil
}
type blobStateCalculator struct {
empty bool
digester digest.Digester
}
func newBlobStateCalculator() *blobStateCalculator {
return &blobStateCalculator{
empty: true,
digester: digest.Canonical.Digester(),
}
}
func (c *blobStateCalculator) Write(p []byte) (int, error) {
if c.empty {
for _, b := range p {
if b != 0x00 {
c.empty = false
break
}
}
}
return c.digester.Hash().Write(p)
}
func (c *blobStateCalculator) State() blobState {
return blobState{
empty: c.empty,
diffID: c.digester.Digest(),
}
}

View File

@@ -0,0 +1,76 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package docker
import (
"context"
"net/url"
"sort"
"strings"
"github.com/containerd/containerd/reference"
)
// repositoryScope returns a repository scope string such as "repository:foo/bar:pull"
// for "host/foo/bar:baz".
// When push is true, both pull and push are added to the scope.
func repositoryScope(refspec reference.Spec, push bool) (string, error) {
u, err := url.Parse("dummy://" + refspec.Locator)
if err != nil {
return "", err
}
s := "repository:" + strings.TrimPrefix(u.Path, "/") + ":pull"
if push {
s += ",push"
}
return s, nil
}
// tokenScopesKey is used for the key for context.WithValue().
// value: []string (e.g. {"registry:foo/bar:pull"})
type tokenScopesKey struct{}
// contextWithRepositoryScope returns a context with tokenScopesKey{} and the repository scope value.
func contextWithRepositoryScope(ctx context.Context, refspec reference.Spec, push bool) (context.Context, error) {
s, err := repositoryScope(refspec, push)
if err != nil {
return nil, err
}
return context.WithValue(ctx, tokenScopesKey{}, []string{s}), nil
}
// getTokenScopes returns deduplicated and sorted scopes from ctx.Value(tokenScopesKey{}) and params["scope"].
func getTokenScopes(ctx context.Context, params map[string]string) []string {
var scopes []string
if x := ctx.Value(tokenScopesKey{}); x != nil {
scopes = append(scopes, x.([]string)...)
}
if scope, ok := params["scope"]; ok {
for _, s := range scopes {
// Note: this comparison is unaware of the scope grammar (https://docs.docker.com/registry/spec/auth/scope/)
// So, "repository:foo/bar:pull,push" != "repository:foo/bar:push,pull", although semantically they are equal.
if s == scope {
// already appended
goto Sort
}
}
scopes = append(scopes, scope)
}
Sort:
sort.Strings(scopes)
return scopes
}

View File

@@ -0,0 +1,67 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package docker
import (
"sync"
"github.com/containerd/containerd/content"
"github.com/containerd/containerd/errdefs"
"github.com/pkg/errors"
)
// Status of a content operation
type Status struct {
content.Status
// UploadUUID is used by the Docker registry to reference blob uploads
UploadUUID string
}
// StatusTracker to track status of operations
type StatusTracker interface {
GetStatus(string) (Status, error)
SetStatus(string, Status)
}
type memoryStatusTracker struct {
statuses map[string]Status
m sync.Mutex
}
// NewInMemoryTracker returns a StatusTracker that tracks content status in-memory
func NewInMemoryTracker() StatusTracker {
return &memoryStatusTracker{
statuses: map[string]Status{},
}
}
func (t *memoryStatusTracker) GetStatus(ref string) (Status, error) {
t.m.Lock()
defer t.m.Unlock()
status, ok := t.statuses[ref]
if !ok {
return Status{}, errors.Wrapf(errdefs.ErrNotFound, "status for ref %v", ref)
}
return status, nil
}
func (t *memoryStatusTracker) SetStatus(ref string, status Status) {
t.m.Lock()
t.statuses[ref] = status
t.m.Unlock()
}

View File

@@ -0,0 +1,205 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package remotes
import (
"context"
"fmt"
"io"
"strings"
"sync"
"github.com/containerd/containerd/content"
"github.com/containerd/containerd/errdefs"
"github.com/containerd/containerd/images"
"github.com/containerd/containerd/log"
"github.com/containerd/containerd/platforms"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
// MakeRefKey returns a unique reference for the descriptor. This reference can be
// used to lookup ongoing processes related to the descriptor. This function
// may look to the context to namespace the reference appropriately.
func MakeRefKey(ctx context.Context, desc ocispec.Descriptor) string {
// TODO(stevvooe): Need better remote key selection here. Should be a
// product of the context, which may include information about the ongoing
// fetch process.
switch desc.MediaType {
case images.MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest:
return "manifest-" + desc.Digest.String()
case images.MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageIndex:
return "index-" + desc.Digest.String()
case images.MediaTypeDockerSchema2Layer, images.MediaTypeDockerSchema2LayerGzip,
images.MediaTypeDockerSchema2LayerForeign, images.MediaTypeDockerSchema2LayerForeignGzip,
ocispec.MediaTypeImageLayer, ocispec.MediaTypeImageLayerGzip,
ocispec.MediaTypeImageLayerNonDistributable, ocispec.MediaTypeImageLayerNonDistributableGzip:
return "layer-" + desc.Digest.String()
case images.MediaTypeDockerSchema2Config, ocispec.MediaTypeImageConfig:
return "config-" + desc.Digest.String()
default:
log.G(ctx).Warnf("reference for unknown type: %s", desc.MediaType)
return "unknown-" + desc.Digest.String()
}
}
// FetchHandler returns a handler that will fetch all content into the ingester
// discovered in a call to Dispatch. Use with ChildrenHandler to do a full
// recursive fetch.
func FetchHandler(ingester content.Ingester, fetcher Fetcher) images.HandlerFunc {
return func(ctx context.Context, desc ocispec.Descriptor) (subdescs []ocispec.Descriptor, err error) {
ctx = log.WithLogger(ctx, log.G(ctx).WithFields(logrus.Fields{
"digest": desc.Digest,
"mediatype": desc.MediaType,
"size": desc.Size,
}))
switch desc.MediaType {
case images.MediaTypeDockerSchema1Manifest:
return nil, fmt.Errorf("%v not supported", desc.MediaType)
default:
err := fetch(ctx, ingester, fetcher, desc)
return nil, err
}
}
}
func fetch(ctx context.Context, ingester content.Ingester, fetcher Fetcher, desc ocispec.Descriptor) error {
log.G(ctx).Debug("fetch")
cw, err := content.OpenWriter(ctx, ingester, content.WithRef(MakeRefKey(ctx, desc)), content.WithDescriptor(desc))
if err != nil {
if errdefs.IsAlreadyExists(err) {
return nil
}
return err
}
defer cw.Close()
ws, err := cw.Status()
if err != nil {
return err
}
if ws.Offset == desc.Size {
// If writer is already complete, commit and return
err := cw.Commit(ctx, desc.Size, desc.Digest)
if err != nil && !errdefs.IsAlreadyExists(err) {
return errors.Wrapf(err, "failed commit on ref %q", ws.Ref)
}
return nil
}
rc, err := fetcher.Fetch(ctx, desc)
if err != nil {
return err
}
defer rc.Close()
return content.Copy(ctx, cw, rc, desc.Size, desc.Digest)
}
// PushHandler returns a handler that will push all content from the provider
// using a writer from the pusher.
func PushHandler(pusher Pusher, provider content.Provider) images.HandlerFunc {
return func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
ctx = log.WithLogger(ctx, log.G(ctx).WithFields(logrus.Fields{
"digest": desc.Digest,
"mediatype": desc.MediaType,
"size": desc.Size,
}))
err := push(ctx, provider, pusher, desc)
return nil, err
}
}
func push(ctx context.Context, provider content.Provider, pusher Pusher, desc ocispec.Descriptor) error {
log.G(ctx).Debug("push")
cw, err := pusher.Push(ctx, desc)
if err != nil {
if !errdefs.IsAlreadyExists(err) {
return err
}
return nil
}
defer cw.Close()
ra, err := provider.ReaderAt(ctx, desc)
if err != nil {
return err
}
defer ra.Close()
rd := io.NewSectionReader(ra, 0, desc.Size)
return content.Copy(ctx, cw, rd, desc.Size, desc.Digest)
}
// PushContent pushes content specified by the descriptor from the provider.
//
// Base handlers can be provided which will be called before any push specific
// handlers.
func PushContent(ctx context.Context, pusher Pusher, desc ocispec.Descriptor, provider content.Provider, platform platforms.MatchComparer, baseHandlers ...images.Handler) error {
var m sync.Mutex
manifestStack := []ocispec.Descriptor{}
filterHandler := images.HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
switch desc.MediaType {
case images.MediaTypeDockerSchema2Manifest, ocispec.MediaTypeImageManifest,
images.MediaTypeDockerSchema2ManifestList, ocispec.MediaTypeImageIndex:
m.Lock()
manifestStack = append(manifestStack, desc)
m.Unlock()
return nil, images.ErrStopHandler
default:
return nil, nil
}
})
pushHandler := PushHandler(pusher, provider)
handlers := append(baseHandlers,
images.FilterPlatforms(images.ChildrenHandler(provider), platform),
filterHandler,
pushHandler,
)
if err := images.Dispatch(ctx, images.Handlers(handlers...), desc); err != nil {
return err
}
// Iterate in reverse order as seen, parent always uploaded after child
for i := len(manifestStack) - 1; i >= 0; i-- {
_, err := pushHandler(ctx, manifestStack[i])
if err != nil {
// TODO(estesp): until we have a more complete method for index push, we need to report
// missing dependencies in an index/manifest list by sensing the "400 Bad Request"
// as a marker for this problem
if (manifestStack[i].MediaType == ocispec.MediaTypeImageIndex ||
manifestStack[i].MediaType == images.MediaTypeDockerSchema2ManifestList) &&
errors.Cause(err) != nil && strings.Contains(errors.Cause(err).Error(), "400 Bad Request") {
return errors.Wrap(err, "manifest list/index references to blobs and/or manifests are missing in your target registry")
}
return err
}
}
return nil
}

View File

@@ -0,0 +1,80 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package remotes
import (
"context"
"io"
"github.com/containerd/containerd/content"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
)
// Resolver provides remotes based on a locator.
type Resolver interface {
// Resolve attempts to resolve the reference into a name and descriptor.
//
// The argument `ref` should be a scheme-less URI representing the remote.
// Structurally, it has a host and path. The "host" can be used to directly
// reference a specific host or be matched against a specific handler.
//
// The returned name should be used to identify the referenced entity.
// Dependending on the remote namespace, this may be immutable or mutable.
// While the name may differ from ref, it should itself be a valid ref.
//
// If the resolution fails, an error will be returned.
Resolve(ctx context.Context, ref string) (name string, desc ocispec.Descriptor, err error)
// Fetcher returns a new fetcher for the provided reference.
// All content fetched from the returned fetcher will be
// from the namespace referred to by ref.
Fetcher(ctx context.Context, ref string) (Fetcher, error)
// Pusher returns a new pusher for the provided reference
Pusher(ctx context.Context, ref string) (Pusher, error)
}
// Fetcher fetches content
type Fetcher interface {
// Fetch the resource identified by the descriptor.
Fetch(ctx context.Context, desc ocispec.Descriptor) (io.ReadCloser, error)
}
// Pusher pushes content
type Pusher interface {
// Push returns a content writer for the given resource identified
// by the descriptor.
Push(ctx context.Context, d ocispec.Descriptor) (content.Writer, error)
}
// FetcherFunc allows package users to implement a Fetcher with just a
// function.
type FetcherFunc func(ctx context.Context, desc ocispec.Descriptor) (io.ReadCloser, error)
// Fetch content
func (fn FetcherFunc) Fetch(ctx context.Context, desc ocispec.Descriptor) (io.ReadCloser, error) {
return fn(ctx, desc)
}
// PusherFunc allows package users to implement a Pusher with just a
// function.
type PusherFunc func(ctx context.Context, desc ocispec.Descriptor, r io.Reader) error
// Push content
func (fn PusherFunc) Push(ctx context.Context, desc ocispec.Descriptor, r io.Reader) error {
return fn(ctx, desc, r)
}