mirror of
https://gitea.com/Lydanne/buildx.git
synced 2025-07-09 21:17:09 +08:00
vendor: update buildkit to 2f99651
Signed-off-by: CrazyMax <crazy-max@users.noreply.github.com>
This commit is contained in:
191
vendor/github.com/containerd/containerd/api/LICENSE
generated
vendored
191
vendor/github.com/containerd/containerd/api/LICENSE
generated
vendored
@ -1,191 +0,0 @@
|
||||
|
||||
Apache License
|
||||
Version 2.0, January 2004
|
||||
https://www.apache.org/licenses/
|
||||
|
||||
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
|
||||
|
||||
1. Definitions.
|
||||
|
||||
"License" shall mean the terms and conditions for use, reproduction,
|
||||
and distribution as defined by Sections 1 through 9 of this document.
|
||||
|
||||
"Licensor" shall mean the copyright owner or entity authorized by
|
||||
the copyright owner that is granting the License.
|
||||
|
||||
"Legal Entity" shall mean the union of the acting entity and all
|
||||
other entities that control, are controlled by, or are under common
|
||||
control with that entity. For the purposes of this definition,
|
||||
"control" means (i) the power, direct or indirect, to cause the
|
||||
direction or management of such entity, whether by contract or
|
||||
otherwise, or (ii) ownership of fifty percent (50%) or more of the
|
||||
outstanding shares, or (iii) beneficial ownership of such entity.
|
||||
|
||||
"You" (or "Your") shall mean an individual or Legal Entity
|
||||
exercising permissions granted by this License.
|
||||
|
||||
"Source" form shall mean the preferred form for making modifications,
|
||||
including but not limited to software source code, documentation
|
||||
source, and configuration files.
|
||||
|
||||
"Object" form shall mean any form resulting from mechanical
|
||||
transformation or translation of a Source form, including but
|
||||
not limited to compiled object code, generated documentation,
|
||||
and conversions to other media types.
|
||||
|
||||
"Work" shall mean the work of authorship, whether in Source or
|
||||
Object form, made available under the License, as indicated by a
|
||||
copyright notice that is included in or attached to the work
|
||||
(an example is provided in the Appendix below).
|
||||
|
||||
"Derivative Works" shall mean any work, whether in Source or Object
|
||||
form, that is based on (or derived from) the Work and for which the
|
||||
editorial revisions, annotations, elaborations, or other modifications
|
||||
represent, as a whole, an original work of authorship. For the purposes
|
||||
of this License, Derivative Works shall not include works that remain
|
||||
separable from, or merely link (or bind by name) to the interfaces of,
|
||||
the Work and Derivative Works thereof.
|
||||
|
||||
"Contribution" shall mean any work of authorship, including
|
||||
the original version of the Work and any modifications or additions
|
||||
to that Work or Derivative Works thereof, that is intentionally
|
||||
submitted to Licensor for inclusion in the Work by the copyright owner
|
||||
or by an individual or Legal Entity authorized to submit on behalf of
|
||||
the copyright owner. For the purposes of this definition, "submitted"
|
||||
means any form of electronic, verbal, or written communication sent
|
||||
to the Licensor or its representatives, including but not limited to
|
||||
communication on electronic mailing lists, source code control systems,
|
||||
and issue tracking systems that are managed by, or on behalf of, the
|
||||
Licensor for the purpose of discussing and improving the Work, but
|
||||
excluding communication that is conspicuously marked or otherwise
|
||||
designated in writing by the copyright owner as "Not a Contribution."
|
||||
|
||||
"Contributor" shall mean Licensor and any individual or Legal Entity
|
||||
on behalf of whom a Contribution has been received by Licensor and
|
||||
subsequently incorporated within the Work.
|
||||
|
||||
2. Grant of Copyright License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
copyright license to reproduce, prepare Derivative Works of,
|
||||
publicly display, publicly perform, sublicense, and distribute the
|
||||
Work and such Derivative Works in Source or Object form.
|
||||
|
||||
3. Grant of Patent License. Subject to the terms and conditions of
|
||||
this License, each Contributor hereby grants to You a perpetual,
|
||||
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
|
||||
(except as stated in this section) patent license to make, have made,
|
||||
use, offer to sell, sell, import, and otherwise transfer the Work,
|
||||
where such license applies only to those patent claims licensable
|
||||
by such Contributor that are necessarily infringed by their
|
||||
Contribution(s) alone or by combination of their Contribution(s)
|
||||
with the Work to which such Contribution(s) was submitted. If You
|
||||
institute patent litigation against any entity (including a
|
||||
cross-claim or counterclaim in a lawsuit) alleging that the Work
|
||||
or a Contribution incorporated within the Work constitutes direct
|
||||
or contributory patent infringement, then any patent licenses
|
||||
granted to You under this License for that Work shall terminate
|
||||
as of the date such litigation is filed.
|
||||
|
||||
4. Redistribution. You may reproduce and distribute copies of the
|
||||
Work or Derivative Works thereof in any medium, with or without
|
||||
modifications, and in Source or Object form, provided that You
|
||||
meet the following conditions:
|
||||
|
||||
(a) You must give any other recipients of the Work or
|
||||
Derivative Works a copy of this License; and
|
||||
|
||||
(b) You must cause any modified files to carry prominent notices
|
||||
stating that You changed the files; and
|
||||
|
||||
(c) You must retain, in the Source form of any Derivative Works
|
||||
that You distribute, all copyright, patent, trademark, and
|
||||
attribution notices from the Source form of the Work,
|
||||
excluding those notices that do not pertain to any part of
|
||||
the Derivative Works; and
|
||||
|
||||
(d) If the Work includes a "NOTICE" text file as part of its
|
||||
distribution, then any Derivative Works that You distribute must
|
||||
include a readable copy of the attribution notices contained
|
||||
within such NOTICE file, excluding those notices that do not
|
||||
pertain to any part of the Derivative Works, in at least one
|
||||
of the following places: within a NOTICE text file distributed
|
||||
as part of the Derivative Works; within the Source form or
|
||||
documentation, if provided along with the Derivative Works; or,
|
||||
within a display generated by the Derivative Works, if and
|
||||
wherever such third-party notices normally appear. The contents
|
||||
of the NOTICE file are for informational purposes only and
|
||||
do not modify the License. You may add Your own attribution
|
||||
notices within Derivative Works that You distribute, alongside
|
||||
or as an addendum to the NOTICE text from the Work, provided
|
||||
that such additional attribution notices cannot be construed
|
||||
as modifying the License.
|
||||
|
||||
You may add Your own copyright statement to Your modifications and
|
||||
may provide additional or different license terms and conditions
|
||||
for use, reproduction, or distribution of Your modifications, or
|
||||
for any such Derivative Works as a whole, provided Your use,
|
||||
reproduction, and distribution of the Work otherwise complies with
|
||||
the conditions stated in this License.
|
||||
|
||||
5. Submission of Contributions. Unless You explicitly state otherwise,
|
||||
any Contribution intentionally submitted for inclusion in the Work
|
||||
by You to the Licensor shall be under the terms and conditions of
|
||||
this License, without any additional terms or conditions.
|
||||
Notwithstanding the above, nothing herein shall supersede or modify
|
||||
the terms of any separate license agreement you may have executed
|
||||
with Licensor regarding such Contributions.
|
||||
|
||||
6. Trademarks. This License does not grant permission to use the trade
|
||||
names, trademarks, service marks, or product names of the Licensor,
|
||||
except as required for reasonable and customary use in describing the
|
||||
origin of the Work and reproducing the content of the NOTICE file.
|
||||
|
||||
7. Disclaimer of Warranty. Unless required by applicable law or
|
||||
agreed to in writing, Licensor provides the Work (and each
|
||||
Contributor provides its Contributions) on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
|
||||
implied, including, without limitation, any warranties or conditions
|
||||
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
|
||||
PARTICULAR PURPOSE. You are solely responsible for determining the
|
||||
appropriateness of using or redistributing the Work and assume any
|
||||
risks associated with Your exercise of permissions under this License.
|
||||
|
||||
8. Limitation of Liability. In no event and under no legal theory,
|
||||
whether in tort (including negligence), contract, or otherwise,
|
||||
unless required by applicable law (such as deliberate and grossly
|
||||
negligent acts) or agreed to in writing, shall any Contributor be
|
||||
liable to You for damages, including any direct, indirect, special,
|
||||
incidental, or consequential damages of any character arising as a
|
||||
result of this License or out of the use or inability to use the
|
||||
Work (including but not limited to damages for loss of goodwill,
|
||||
work stoppage, computer failure or malfunction, or any and all
|
||||
other commercial damages or losses), even if such Contributor
|
||||
has been advised of the possibility of such damages.
|
||||
|
||||
9. Accepting Warranty or Additional Liability. While redistributing
|
||||
the Work or Derivative Works thereof, You may choose to offer,
|
||||
and charge a fee for, acceptance of support, warranty, indemnity,
|
||||
or other liability obligations and/or rights consistent with this
|
||||
License. However, in accepting such obligations, You may act only
|
||||
on Your own behalf and on Your sole responsibility, not on behalf
|
||||
of any other Contributor, and only if You agree to indemnify,
|
||||
defend, and hold each Contributor harmless for any liability
|
||||
incurred by, or claims asserted against, such Contributor by reason
|
||||
of your accepting any such warranty or additional liability.
|
||||
|
||||
END OF TERMS AND CONDITIONS
|
||||
|
||||
Copyright The containerd Authors
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
https://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
32
vendor/github.com/containerd/containerd/content/helpers.go
generated
vendored
32
vendor/github.com/containerd/containerd/content/helpers.go
generated
vendored
@ -18,6 +18,8 @@ package content
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"math/rand"
|
||||
"sync"
|
||||
@ -26,7 +28,6 @@ import (
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
"github.com/opencontainers/go-digest"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var bufPool = sync.Pool{
|
||||
@ -76,7 +77,7 @@ func WriteBlob(ctx context.Context, cs Ingester, ref string, r io.Reader, desc o
|
||||
cw, err := OpenWriter(ctx, cs, WithRef(ref), WithDescriptor(desc))
|
||||
if err != nil {
|
||||
if !errdefs.IsAlreadyExists(err) {
|
||||
return errors.Wrap(err, "failed to open writer")
|
||||
return fmt.Errorf("failed to open writer: %w", err)
|
||||
}
|
||||
|
||||
return nil // all ready present
|
||||
@ -133,28 +134,28 @@ func OpenWriter(ctx context.Context, cs Ingester, opts ...WriterOpt) (Writer, er
|
||||
func Copy(ctx context.Context, cw Writer, r io.Reader, size int64, expected digest.Digest, opts ...Opt) error {
|
||||
ws, err := cw.Status()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to get status")
|
||||
return fmt.Errorf("failed to get status: %w", err)
|
||||
}
|
||||
|
||||
if ws.Offset > 0 {
|
||||
r, err = seekReader(r, ws.Offset, size)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "unable to resume write to %v", ws.Ref)
|
||||
return fmt.Errorf("unable to resume write to %v: %w", ws.Ref, err)
|
||||
}
|
||||
}
|
||||
|
||||
copied, err := copyWithBuffer(cw, r)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to copy")
|
||||
return fmt.Errorf("failed to copy: %w", err)
|
||||
}
|
||||
if size != 0 && copied < size-ws.Offset {
|
||||
// Short writes would return its own error, this indicates a read failure
|
||||
return errors.Wrapf(io.ErrUnexpectedEOF, "failed to read expected number of bytes")
|
||||
return fmt.Errorf("failed to read expected number of bytes: %w", io.ErrUnexpectedEOF)
|
||||
}
|
||||
|
||||
if err := cw.Commit(ctx, size, expected, opts...); err != nil {
|
||||
if !errdefs.IsAlreadyExists(err) {
|
||||
return errors.Wrapf(err, "failed commit on ref %q", ws.Ref)
|
||||
return fmt.Errorf("failed commit on ref %q: %w", ws.Ref, err)
|
||||
}
|
||||
}
|
||||
|
||||
@ -171,11 +172,11 @@ func CopyReaderAt(cw Writer, ra ReaderAt, n int64) error {
|
||||
|
||||
copied, err := copyWithBuffer(cw, io.NewSectionReader(ra, ws.Offset, n))
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to copy")
|
||||
return fmt.Errorf("failed to copy: %w", err)
|
||||
}
|
||||
if copied < n {
|
||||
// Short writes would return its own error, this indicates a read failure
|
||||
return errors.Wrap(io.ErrUnexpectedEOF, "failed to read expected number of bytes")
|
||||
return fmt.Errorf("failed to read expected number of bytes: %w", io.ErrUnexpectedEOF)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@ -189,13 +190,13 @@ func CopyReaderAt(cw Writer, ra ReaderAt, n int64) error {
|
||||
func CopyReader(cw Writer, r io.Reader) (int64, error) {
|
||||
ws, err := cw.Status()
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(err, "failed to get status")
|
||||
return 0, fmt.Errorf("failed to get status: %w", err)
|
||||
}
|
||||
|
||||
if ws.Offset > 0 {
|
||||
r, err = seekReader(r, ws.Offset, 0)
|
||||
if err != nil {
|
||||
return 0, errors.Wrapf(err, "unable to resume write to %v", ws.Ref)
|
||||
return 0, fmt.Errorf("unable to resume write to %v: %w", ws.Ref, err)
|
||||
}
|
||||
}
|
||||
|
||||
@ -211,7 +212,10 @@ func seekReader(r io.Reader, offset, size int64) (io.Reader, error) {
|
||||
if ok {
|
||||
nn, err := seeker.Seek(offset, io.SeekStart)
|
||||
if nn != offset {
|
||||
return nil, errors.Wrapf(err, "failed to seek to offset %v", offset)
|
||||
if err == nil {
|
||||
err = fmt.Errorf("unexpected seek location without seek error")
|
||||
}
|
||||
return nil, fmt.Errorf("failed to seek to offset %v: %w", offset, err)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
@ -231,10 +235,10 @@ func seekReader(r io.Reader, offset, size int64) (io.Reader, error) {
|
||||
// well then, let's just discard up to the offset
|
||||
n, err := copyWithBuffer(io.Discard, io.LimitReader(r, offset))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to discard to offset")
|
||||
return nil, fmt.Errorf("failed to discard to offset: %w", err)
|
||||
}
|
||||
if n != offset {
|
||||
return nil, errors.Errorf("unable to discard to offset")
|
||||
return nil, errors.New("unable to discard to offset")
|
||||
}
|
||||
|
||||
return r, nil
|
||||
|
6
vendor/github.com/containerd/containerd/content/local/locks.go
generated
vendored
6
vendor/github.com/containerd/containerd/content/local/locks.go
generated
vendored
@ -17,11 +17,11 @@
|
||||
package local
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// Handles locking references
|
||||
@ -44,9 +44,9 @@ func tryLock(ref string) error {
|
||||
// Returning the duration may help developers distinguish dead locks (long duration) from
|
||||
// lock contentions (short duration).
|
||||
now := time.Now()
|
||||
return errors.Wrapf(
|
||||
return fmt.Errorf(
|
||||
"ref %s locked for %s (since %s): %w", ref, now.Sub(v.since), v.since,
|
||||
errdefs.ErrUnavailable,
|
||||
"ref %s locked for %s (since %s)", ref, now.Sub(v.since), v.since,
|
||||
)
|
||||
}
|
||||
|
||||
|
7
vendor/github.com/containerd/containerd/content/local/readerat.go
generated
vendored
7
vendor/github.com/containerd/containerd/content/local/readerat.go
generated
vendored
@ -17,10 +17,9 @@
|
||||
package local
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"github.com/containerd/containerd/content"
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
)
|
||||
@ -40,7 +39,7 @@ func OpenReader(p string) (content.ReaderAt, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return nil, errors.Wrap(errdefs.ErrNotFound, "blob not found")
|
||||
return nil, fmt.Errorf("blob not found: %w", errdefs.ErrNotFound)
|
||||
}
|
||||
|
||||
fp, err := os.Open(p)
|
||||
@ -49,7 +48,7 @@ func OpenReader(p string) (content.ReaderAt, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return nil, errors.Wrap(errdefs.ErrNotFound, "blob not found")
|
||||
return nil, fmt.Errorf("blob not found: %w", errdefs.ErrNotFound)
|
||||
}
|
||||
|
||||
return sizeReaderAt{size: fi.Size(), fp: fp}, nil
|
||||
|
60
vendor/github.com/containerd/containerd/content/local/store.go
generated
vendored
60
vendor/github.com/containerd/containerd/content/local/store.go
generated
vendored
@ -36,7 +36,6 @@ import (
|
||||
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var bufPool = sync.Pool{
|
||||
@ -93,13 +92,13 @@ func NewLabeledStore(root string, ls LabelStore) (content.Store, error) {
|
||||
func (s *store) Info(ctx context.Context, dgst digest.Digest) (content.Info, error) {
|
||||
p, err := s.blobPath(dgst)
|
||||
if err != nil {
|
||||
return content.Info{}, errors.Wrapf(err, "calculating blob info path")
|
||||
return content.Info{}, fmt.Errorf("calculating blob info path: %w", err)
|
||||
}
|
||||
|
||||
fi, err := os.Stat(p)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
err = errors.Wrapf(errdefs.ErrNotFound, "content %v", dgst)
|
||||
err = fmt.Errorf("content %v: %w", dgst, errdefs.ErrNotFound)
|
||||
}
|
||||
|
||||
return content.Info{}, err
|
||||
@ -128,12 +127,12 @@ func (s *store) info(dgst digest.Digest, fi os.FileInfo, labels map[string]strin
|
||||
func (s *store) ReaderAt(ctx context.Context, desc ocispec.Descriptor) (content.ReaderAt, error) {
|
||||
p, err := s.blobPath(desc.Digest)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "calculating blob path for ReaderAt")
|
||||
return nil, fmt.Errorf("calculating blob path for ReaderAt: %w", err)
|
||||
}
|
||||
|
||||
reader, err := OpenReader(p)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "blob %s expected at %s", desc.Digest, p)
|
||||
return nil, fmt.Errorf("blob %s expected at %s: %w", desc.Digest, p, err)
|
||||
}
|
||||
|
||||
return reader, nil
|
||||
@ -146,7 +145,7 @@ func (s *store) ReaderAt(ctx context.Context, desc ocispec.Descriptor) (content.
|
||||
func (s *store) Delete(ctx context.Context, dgst digest.Digest) error {
|
||||
bp, err := s.blobPath(dgst)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "calculating blob path for delete")
|
||||
return fmt.Errorf("calculating blob path for delete: %w", err)
|
||||
}
|
||||
|
||||
if err := os.RemoveAll(bp); err != nil {
|
||||
@ -154,7 +153,7 @@ func (s *store) Delete(ctx context.Context, dgst digest.Digest) error {
|
||||
return err
|
||||
}
|
||||
|
||||
return errors.Wrapf(errdefs.ErrNotFound, "content %v", dgst)
|
||||
return fmt.Errorf("content %v: %w", dgst, errdefs.ErrNotFound)
|
||||
}
|
||||
|
||||
return nil
|
||||
@ -162,18 +161,18 @@ func (s *store) Delete(ctx context.Context, dgst digest.Digest) error {
|
||||
|
||||
func (s *store) Update(ctx context.Context, info content.Info, fieldpaths ...string) (content.Info, error) {
|
||||
if s.ls == nil {
|
||||
return content.Info{}, errors.Wrapf(errdefs.ErrFailedPrecondition, "update not supported on immutable content store")
|
||||
return content.Info{}, fmt.Errorf("update not supported on immutable content store: %w", errdefs.ErrFailedPrecondition)
|
||||
}
|
||||
|
||||
p, err := s.blobPath(info.Digest)
|
||||
if err != nil {
|
||||
return content.Info{}, errors.Wrapf(err, "calculating blob path for update")
|
||||
return content.Info{}, fmt.Errorf("calculating blob path for update: %w", err)
|
||||
}
|
||||
|
||||
fi, err := os.Stat(p)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
err = errors.Wrapf(errdefs.ErrNotFound, "content %v", info.Digest)
|
||||
err = fmt.Errorf("content %v: %w", info.Digest, errdefs.ErrNotFound)
|
||||
}
|
||||
|
||||
return content.Info{}, err
|
||||
@ -200,7 +199,7 @@ func (s *store) Update(ctx context.Context, info content.Info, fieldpaths ...str
|
||||
all = true
|
||||
labels = info.Labels
|
||||
default:
|
||||
return content.Info{}, errors.Wrapf(errdefs.ErrInvalidArgument, "cannot update %q field on content info %q", path, info.Digest)
|
||||
return content.Info{}, fmt.Errorf("cannot update %q field on content info %q: %w", path, info.Digest, errdefs.ErrInvalidArgument)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
@ -377,7 +376,7 @@ func (s *store) status(ingestPath string) (content.Status, error) {
|
||||
fi, err := os.Stat(dp)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
err = errors.Wrap(errdefs.ErrNotFound, err.Error())
|
||||
err = fmt.Errorf("%s: %w", err.Error(), errdefs.ErrNotFound)
|
||||
}
|
||||
return content.Status{}, err
|
||||
}
|
||||
@ -385,19 +384,19 @@ func (s *store) status(ingestPath string) (content.Status, error) {
|
||||
ref, err := readFileString(filepath.Join(ingestPath, "ref"))
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
err = errors.Wrap(errdefs.ErrNotFound, err.Error())
|
||||
err = fmt.Errorf("%s: %w", err.Error(), errdefs.ErrNotFound)
|
||||
}
|
||||
return content.Status{}, err
|
||||
}
|
||||
|
||||
startedAt, err := readFileTimestamp(filepath.Join(ingestPath, "startedat"))
|
||||
if err != nil {
|
||||
return content.Status{}, errors.Wrapf(err, "could not read startedat")
|
||||
return content.Status{}, fmt.Errorf("could not read startedat: %w", err)
|
||||
}
|
||||
|
||||
updatedAt, err := readFileTimestamp(filepath.Join(ingestPath, "updatedat"))
|
||||
if err != nil {
|
||||
return content.Status{}, errors.Wrapf(err, "could not read updatedat")
|
||||
return content.Status{}, fmt.Errorf("could not read updatedat: %w", err)
|
||||
}
|
||||
|
||||
// because we don't write updatedat on every write, the mod time may
|
||||
@ -460,7 +459,7 @@ func (s *store) Writer(ctx context.Context, opts ...content.WriterOpt) (content.
|
||||
// TODO(AkihiroSuda): we could create a random string or one calculated based on the context
|
||||
// https://github.com/containerd/containerd/issues/2129#issuecomment-380255019
|
||||
if wOpts.Ref == "" {
|
||||
return nil, errors.Wrap(errdefs.ErrInvalidArgument, "ref must not be empty")
|
||||
return nil, fmt.Errorf("ref must not be empty: %w", errdefs.ErrInvalidArgument)
|
||||
}
|
||||
var lockErr error
|
||||
for count := uint64(0); count < 10; count++ {
|
||||
@ -494,16 +493,16 @@ func (s *store) resumeStatus(ref string, total int64, digester digest.Digester)
|
||||
path, _, data := s.ingestPaths(ref)
|
||||
status, err := s.status(path)
|
||||
if err != nil {
|
||||
return status, errors.Wrap(err, "failed reading status of resume write")
|
||||
return status, fmt.Errorf("failed reading status of resume write: %w", err)
|
||||
}
|
||||
if ref != status.Ref {
|
||||
// NOTE(stevvooe): This is fairly catastrophic. Either we have some
|
||||
// layout corruption or a hash collision for the ref key.
|
||||
return status, errors.Errorf("ref key does not match: %v != %v", ref, status.Ref)
|
||||
return status, fmt.Errorf("ref key does not match: %v != %v", ref, status.Ref)
|
||||
}
|
||||
|
||||
if total > 0 && status.Total > 0 && total != status.Total {
|
||||
return status, errors.Errorf("provided total differs from status: %v != %v", total, status.Total)
|
||||
return status, fmt.Errorf("provided total differs from status: %v != %v", total, status.Total)
|
||||
}
|
||||
|
||||
// TODO(stevvooe): slow slow slow!!, send to goroutine or use resumable hashes
|
||||
@ -527,10 +526,10 @@ func (s *store) writer(ctx context.Context, ref string, total int64, expected di
|
||||
if expected != "" {
|
||||
p, err := s.blobPath(expected)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "calculating expected blob path for writer")
|
||||
return nil, fmt.Errorf("calculating expected blob path for writer: %w", err)
|
||||
}
|
||||
if _, err := os.Stat(p); err == nil {
|
||||
return nil, errors.Wrapf(errdefs.ErrAlreadyExists, "content %v", expected)
|
||||
return nil, fmt.Errorf("content %v: %w", expected, errdefs.ErrAlreadyExists)
|
||||
}
|
||||
}
|
||||
|
||||
@ -588,11 +587,12 @@ func (s *store) writer(ctx context.Context, ref string, total int64, expected di
|
||||
|
||||
fp, err := os.OpenFile(data, os.O_WRONLY|os.O_CREATE, 0666)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to open data file")
|
||||
return nil, fmt.Errorf("failed to open data file: %w", err)
|
||||
}
|
||||
|
||||
if _, err := fp.Seek(offset, io.SeekStart); err != nil {
|
||||
return nil, errors.Wrap(err, "could not seek to current write offset")
|
||||
fp.Close()
|
||||
return nil, fmt.Errorf("could not seek to current write offset: %w", err)
|
||||
}
|
||||
|
||||
return &writer{
|
||||
@ -614,7 +614,7 @@ func (s *store) Abort(ctx context.Context, ref string) error {
|
||||
root := s.ingestRoot(ref)
|
||||
if err := os.RemoveAll(root); err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return errors.Wrapf(errdefs.ErrNotFound, "ingest ref %q", ref)
|
||||
return fmt.Errorf("ingest ref %q: %w", ref, errdefs.ErrNotFound)
|
||||
}
|
||||
|
||||
return err
|
||||
@ -625,7 +625,7 @@ func (s *store) Abort(ctx context.Context, ref string) error {
|
||||
|
||||
func (s *store) blobPath(dgst digest.Digest) (string, error) {
|
||||
if err := dgst.Validate(); err != nil {
|
||||
return "", errors.Wrapf(errdefs.ErrInvalidArgument, "cannot calculate blob path from invalid digest: %v", err)
|
||||
return "", fmt.Errorf("cannot calculate blob path from invalid digest: %v: %w", err, errdefs.ErrInvalidArgument)
|
||||
}
|
||||
|
||||
return filepath.Join(s.root, "blobs", dgst.Algorithm().String(), dgst.Hex()), nil
|
||||
@ -664,14 +664,14 @@ func readFileTimestamp(p string) (time.Time, error) {
|
||||
b, err := os.ReadFile(p)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
err = errors.Wrap(errdefs.ErrNotFound, err.Error())
|
||||
err = fmt.Errorf("%s: %w", err.Error(), errdefs.ErrNotFound)
|
||||
}
|
||||
return time.Time{}, err
|
||||
}
|
||||
|
||||
var t time.Time
|
||||
if err := t.UnmarshalText(b); err != nil {
|
||||
return time.Time{}, errors.Wrapf(err, "could not parse timestamp file %v", p)
|
||||
return time.Time{}, fmt.Errorf("could not parse timestamp file %v: %w", p, err)
|
||||
}
|
||||
|
||||
return t, nil
|
||||
@ -689,16 +689,16 @@ func writeToCompletion(path string, data []byte, mode os.FileMode) error {
|
||||
tmp := fmt.Sprintf("%s.tmp", path)
|
||||
f, err := os.OpenFile(tmp, os.O_RDWR|os.O_CREATE|os.O_TRUNC|os.O_SYNC, mode)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "create tmp file")
|
||||
return fmt.Errorf("create tmp file: %w", err)
|
||||
}
|
||||
_, err = f.Write(data)
|
||||
f.Close()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "write tmp file")
|
||||
return fmt.Errorf("write tmp file: %w", err)
|
||||
}
|
||||
err = os.Rename(tmp, path)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "rename tmp file")
|
||||
return fmt.Errorf("rename tmp file: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
2
vendor/github.com/containerd/containerd/content/local/store_bsd.go
generated
vendored
2
vendor/github.com/containerd/containerd/content/local/store_bsd.go
generated
vendored
@ -27,7 +27,7 @@ import (
|
||||
|
||||
func getATime(fi os.FileInfo) time.Time {
|
||||
if st, ok := fi.Sys().(*syscall.Stat_t); ok {
|
||||
return time.Unix(int64(st.Atimespec.Sec), int64(st.Atimespec.Nsec)) //nolint: unconvert // int64 conversions ensure the line compiles for 32-bit systems as well.
|
||||
return time.Unix(st.Atimespec.Unix())
|
||||
}
|
||||
|
||||
return fi.ModTime()
|
||||
|
2
vendor/github.com/containerd/containerd/content/local/store_openbsd.go
generated
vendored
2
vendor/github.com/containerd/containerd/content/local/store_openbsd.go
generated
vendored
@ -27,7 +27,7 @@ import (
|
||||
|
||||
func getATime(fi os.FileInfo) time.Time {
|
||||
if st, ok := fi.Sys().(*syscall.Stat_t); ok {
|
||||
return time.Unix(int64(st.Atim.Sec), int64(st.Atim.Nsec)) //nolint: unconvert // int64 conversions ensure the line compiles for 32-bit systems as well.
|
||||
return time.Unix(st.Atim.Unix())
|
||||
}
|
||||
|
||||
return fi.ModTime()
|
||||
|
2
vendor/github.com/containerd/containerd/content/local/store_unix.go
generated
vendored
2
vendor/github.com/containerd/containerd/content/local/store_unix.go
generated
vendored
@ -27,7 +27,7 @@ import (
|
||||
|
||||
func getATime(fi os.FileInfo) time.Time {
|
||||
if st, ok := fi.Sys().(*syscall.Stat_t); ok {
|
||||
return time.Unix(int64(st.Atim.Sec), int64(st.Atim.Nsec)) //nolint: unconvert // int64 conversions ensure the line compiles for 32-bit systems as well.
|
||||
return time.Unix(st.Atim.Unix())
|
||||
}
|
||||
|
||||
return fi.ModTime()
|
||||
|
27
vendor/github.com/containerd/containerd/content/local/writer.go
generated
vendored
27
vendor/github.com/containerd/containerd/content/local/writer.go
generated
vendored
@ -18,6 +18,8 @@ package local
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"path/filepath"
|
||||
@ -28,7 +30,6 @@ import (
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
"github.com/containerd/containerd/log"
|
||||
"github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// writer represents a write transaction against the blob store.
|
||||
@ -88,30 +89,30 @@ func (w *writer) Commit(ctx context.Context, size int64, expected digest.Digest,
|
||||
w.fp = nil
|
||||
|
||||
if fp == nil {
|
||||
return errors.Wrap(errdefs.ErrFailedPrecondition, "cannot commit on closed writer")
|
||||
return fmt.Errorf("cannot commit on closed writer: %w", errdefs.ErrFailedPrecondition)
|
||||
}
|
||||
|
||||
if err := fp.Sync(); err != nil {
|
||||
fp.Close()
|
||||
return errors.Wrap(err, "sync failed")
|
||||
return fmt.Errorf("sync failed: %w", err)
|
||||
}
|
||||
|
||||
fi, err := fp.Stat()
|
||||
closeErr := fp.Close()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "stat on ingest file failed")
|
||||
return fmt.Errorf("stat on ingest file failed: %w", err)
|
||||
}
|
||||
if closeErr != nil {
|
||||
return errors.Wrap(err, "failed to close ingest file")
|
||||
return fmt.Errorf("failed to close ingest file: %w", closeErr)
|
||||
}
|
||||
|
||||
if size > 0 && size != fi.Size() {
|
||||
return errors.Wrapf(errdefs.ErrFailedPrecondition, "unexpected commit size %d, expected %d", fi.Size(), size)
|
||||
return fmt.Errorf("unexpected commit size %d, expected %d: %w", fi.Size(), size, errdefs.ErrFailedPrecondition)
|
||||
}
|
||||
|
||||
dgst := w.digester.Digest()
|
||||
if expected != "" && expected != dgst {
|
||||
return errors.Wrapf(errdefs.ErrFailedPrecondition, "unexpected commit digest %s, expected %s", dgst, expected)
|
||||
return fmt.Errorf("unexpected commit digest %s, expected %s: %w", dgst, expected, errdefs.ErrFailedPrecondition)
|
||||
}
|
||||
|
||||
var (
|
||||
@ -127,9 +128,9 @@ func (w *writer) Commit(ctx context.Context, size int64, expected digest.Digest,
|
||||
if _, err := os.Stat(target); err == nil {
|
||||
// collision with the target file!
|
||||
if err := os.RemoveAll(w.path); err != nil {
|
||||
log.G(ctx).WithField("ref", w.ref).WithField("path", w.path).Errorf("failed to remove ingest directory")
|
||||
log.G(ctx).WithField("ref", w.ref).WithField("path", w.path).Error("failed to remove ingest directory")
|
||||
}
|
||||
return errors.Wrapf(errdefs.ErrAlreadyExists, "content %v", dgst)
|
||||
return fmt.Errorf("content %v: %w", dgst, errdefs.ErrAlreadyExists)
|
||||
}
|
||||
|
||||
if err := os.Rename(ingest, target); err != nil {
|
||||
@ -142,17 +143,17 @@ func (w *writer) Commit(ctx context.Context, size int64, expected digest.Digest,
|
||||
|
||||
commitTime := time.Now()
|
||||
if err := os.Chtimes(target, commitTime, commitTime); err != nil {
|
||||
log.G(ctx).WithField("digest", dgst).Errorf("failed to change file time to commit time")
|
||||
log.G(ctx).WithField("digest", dgst).Error("failed to change file time to commit time")
|
||||
}
|
||||
|
||||
// clean up!!
|
||||
if err := os.RemoveAll(w.path); err != nil {
|
||||
log.G(ctx).WithField("ref", w.ref).WithField("path", w.path).Errorf("failed to remove ingest directory")
|
||||
log.G(ctx).WithField("ref", w.ref).WithField("path", w.path).Error("failed to remove ingest directory")
|
||||
}
|
||||
|
||||
if w.s.ls != nil && base.Labels != nil {
|
||||
if err := w.s.ls.Set(dgst, base.Labels); err != nil {
|
||||
log.G(ctx).WithField("digest", dgst).Errorf("failed to set labels")
|
||||
log.G(ctx).WithField("digest", dgst).Error("failed to set labels")
|
||||
}
|
||||
}
|
||||
|
||||
@ -165,7 +166,7 @@ func (w *writer) Commit(ctx context.Context, size int64, expected digest.Digest,
|
||||
// NOTE: Windows does not support this operation
|
||||
if runtime.GOOS != "windows" {
|
||||
if err := os.Chmod(target, (fi.Mode()&os.ModePerm)&^0333); err != nil {
|
||||
log.G(ctx).WithField("ref", w.ref).Errorf("failed to make readonly")
|
||||
log.G(ctx).WithField("ref", w.ref).Error("failed to make readonly")
|
||||
}
|
||||
}
|
||||
|
||||
|
12
vendor/github.com/containerd/containerd/content/proxy/content_writer.go
generated
vendored
12
vendor/github.com/containerd/containerd/content/proxy/content_writer.go
generated
vendored
@ -18,13 +18,13 @@ package proxy
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
contentapi "github.com/containerd/containerd/api/services/content/v1"
|
||||
"github.com/containerd/containerd/content"
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type remoteWriter struct {
|
||||
@ -57,7 +57,7 @@ func (rw *remoteWriter) Status() (content.Status, error) {
|
||||
Action: contentapi.WriteActionStat,
|
||||
})
|
||||
if err != nil {
|
||||
return content.Status{}, errors.Wrap(errdefs.FromGRPC(err), "error getting writer status")
|
||||
return content.Status{}, fmt.Errorf("error getting writer status: %w", errdefs.FromGRPC(err))
|
||||
}
|
||||
|
||||
return content.Status{
|
||||
@ -82,7 +82,7 @@ func (rw *remoteWriter) Write(p []byte) (n int, err error) {
|
||||
Data: p,
|
||||
})
|
||||
if err != nil {
|
||||
return 0, errors.Wrap(errdefs.FromGRPC(err), "failed to send write")
|
||||
return 0, fmt.Errorf("failed to send write: %w", errdefs.FromGRPC(err))
|
||||
}
|
||||
|
||||
n = int(resp.Offset - offset)
|
||||
@ -119,15 +119,15 @@ func (rw *remoteWriter) Commit(ctx context.Context, size int64, expected digest.
|
||||
Labels: base.Labels,
|
||||
})
|
||||
if err != nil {
|
||||
return errors.Wrap(errdefs.FromGRPC(err), "commit failed")
|
||||
return fmt.Errorf("commit failed: %w", errdefs.FromGRPC(err))
|
||||
}
|
||||
|
||||
if size != 0 && resp.Offset != size {
|
||||
return errors.Errorf("unexpected size: %v != %v", resp.Offset, size)
|
||||
return fmt.Errorf("unexpected size: %v != %v", resp.Offset, size)
|
||||
}
|
||||
|
||||
if expected != "" && resp.Digest != expected {
|
||||
return errors.Errorf("unexpected digest: %v != %v", resp.Digest, expected)
|
||||
return fmt.Errorf("unexpected digest: %v != %v", resp.Digest, expected)
|
||||
}
|
||||
|
||||
rw.digest = resp.Digest
|
||||
|
5
vendor/github.com/containerd/containerd/errdefs/errors.go
generated
vendored
5
vendor/github.com/containerd/containerd/errdefs/errors.go
generated
vendored
@ -17,7 +17,7 @@
|
||||
// Package errdefs defines the common errors used throughout containerd
|
||||
// packages.
|
||||
//
|
||||
// Use with errors.Wrap and error.Wrapf to add context to an error.
|
||||
// Use with fmt.Errorf to add context to an error.
|
||||
//
|
||||
// To detect an error class, use the IsXXX functions to tell whether an error
|
||||
// is of a certain type.
|
||||
@ -28,8 +28,7 @@ package errdefs
|
||||
|
||||
import (
|
||||
"context"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"errors"
|
||||
)
|
||||
|
||||
// Definitions of common error types used throughout containerd. All containerd
|
||||
|
10
vendor/github.com/containerd/containerd/errdefs/grpc.go
generated
vendored
10
vendor/github.com/containerd/containerd/errdefs/grpc.go
generated
vendored
@ -18,9 +18,9 @@ package errdefs
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/status"
|
||||
)
|
||||
@ -68,9 +68,9 @@ func ToGRPC(err error) error {
|
||||
// ToGRPCf maps the error to grpc error codes, assembling the formatting string
|
||||
// and combining it with the target error string.
|
||||
//
|
||||
// This is equivalent to errors.ToGRPC(errors.Wrapf(err, format, args...))
|
||||
// This is equivalent to errdefs.ToGRPC(fmt.Errorf("%s: %w", fmt.Sprintf(format, args...), err))
|
||||
func ToGRPCf(err error, format string, args ...interface{}) error {
|
||||
return ToGRPC(errors.Wrapf(err, format, args...))
|
||||
return ToGRPC(fmt.Errorf("%s: %w", fmt.Sprintf(format, args...), err))
|
||||
}
|
||||
|
||||
// FromGRPC returns the underlying error from a grpc service based on the grpc error code
|
||||
@ -104,9 +104,9 @@ func FromGRPC(err error) error {
|
||||
|
||||
msg := rebaseMessage(cls, err)
|
||||
if msg != "" {
|
||||
err = errors.Wrap(cls, msg)
|
||||
err = fmt.Errorf("%s: %w", msg, cls)
|
||||
} else {
|
||||
err = errors.WithStack(cls)
|
||||
err = cls
|
||||
}
|
||||
|
||||
return err
|
||||
|
9
vendor/github.com/containerd/containerd/filters/parser.go
generated
vendored
9
vendor/github.com/containerd/containerd/filters/parser.go
generated
vendored
@ -21,7 +21,6 @@ import (
|
||||
"io"
|
||||
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
/*
|
||||
@ -71,7 +70,7 @@ func ParseAll(ss ...string) (Filter, error) {
|
||||
for _, s := range ss {
|
||||
f, err := Parse(s)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(errdefs.ErrInvalidArgument, err.Error())
|
||||
return nil, fmt.Errorf("%s: %w", err.Error(), errdefs.ErrInvalidArgument)
|
||||
}
|
||||
|
||||
fs = append(fs, f)
|
||||
@ -90,7 +89,7 @@ func (p *parser) parse() (Filter, error) {
|
||||
|
||||
ss, err := p.selectors()
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "filters")
|
||||
return nil, fmt.Errorf("filters: %w", err)
|
||||
}
|
||||
|
||||
return ss, nil
|
||||
@ -284,9 +283,9 @@ func (pe parseError) Error() string {
|
||||
}
|
||||
|
||||
func (p *parser) mkerr(pos int, format string, args ...interface{}) error {
|
||||
return errors.Wrap(parseError{
|
||||
return fmt.Errorf("parse error: %w", parseError{
|
||||
input: p.input,
|
||||
pos: pos,
|
||||
msg: fmt.Sprintf(format, args...),
|
||||
}, "parse error")
|
||||
})
|
||||
}
|
||||
|
3
vendor/github.com/containerd/containerd/filters/quote.go
generated
vendored
3
vendor/github.com/containerd/containerd/filters/quote.go
generated
vendored
@ -17,9 +17,8 @@
|
||||
package filters
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"unicode/utf8"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// NOTE(stevvooe): Most of this code in this file is copied from the stdlib
|
||||
|
10
vendor/github.com/containerd/containerd/images/handlers.go
generated
vendored
10
vendor/github.com/containerd/containerd/images/handlers.go
generated
vendored
@ -18,6 +18,7 @@ package images
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"sort"
|
||||
|
||||
@ -25,7 +26,6 @@ import (
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
"github.com/containerd/containerd/platforms"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/sync/errgroup"
|
||||
"golang.org/x/sync/semaphore"
|
||||
)
|
||||
@ -33,17 +33,17 @@ import (
|
||||
var (
|
||||
// ErrSkipDesc is used to skip processing of a descriptor and
|
||||
// its descendants.
|
||||
ErrSkipDesc = fmt.Errorf("skip descriptor")
|
||||
ErrSkipDesc = errors.New("skip descriptor")
|
||||
|
||||
// ErrStopHandler is used to signify that the descriptor
|
||||
// has been handled and should not be handled further.
|
||||
// This applies only to a single descriptor in a handler
|
||||
// chain and does not apply to descendant descriptors.
|
||||
ErrStopHandler = fmt.Errorf("stop handler")
|
||||
ErrStopHandler = errors.New("stop handler")
|
||||
|
||||
// ErrEmptyWalk is used when the WalkNotEmpty handlers return no
|
||||
// children (e.g.: they were filtered out).
|
||||
ErrEmptyWalk = fmt.Errorf("image might be filtered out")
|
||||
ErrEmptyWalk = errors.New("image might be filtered out")
|
||||
)
|
||||
|
||||
// Handler handles image manifests
|
||||
@ -308,7 +308,7 @@ func LimitManifests(f HandlerFunc, m platforms.MatchComparer, n int) HandlerFunc
|
||||
|
||||
if n > 0 {
|
||||
if len(children) == 0 {
|
||||
return children, errors.Wrap(errdefs.ErrNotFound, "no match for platform in manifest")
|
||||
return children, fmt.Errorf("no match for platform in manifest: %w", errdefs.ErrNotFound)
|
||||
}
|
||||
if len(children) > n {
|
||||
children = children[:n]
|
||||
|
21
vendor/github.com/containerd/containerd/images/image.go
generated
vendored
21
vendor/github.com/containerd/containerd/images/image.go
generated
vendored
@ -29,7 +29,6 @@ import (
|
||||
"github.com/containerd/containerd/platforms"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// Image provides the model for how containerd views container images.
|
||||
@ -115,7 +114,7 @@ func (image *Image) Size(ctx context.Context, provider content.Provider, platfor
|
||||
var size int64
|
||||
return size, Walk(ctx, Handlers(HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
|
||||
if desc.Size < 0 {
|
||||
return nil, errors.Errorf("invalid size %v in %v (%v)", desc.Size, desc.Digest, desc.MediaType)
|
||||
return nil, fmt.Errorf("invalid size %v in %v (%v)", desc.Size, desc.Digest, desc.MediaType)
|
||||
}
|
||||
size += desc.Size
|
||||
return nil, nil
|
||||
@ -156,7 +155,7 @@ func Manifest(ctx context.Context, provider content.Provider, image ocispec.Desc
|
||||
}
|
||||
|
||||
if err := validateMediaType(p, desc.MediaType); err != nil {
|
||||
return nil, errors.Wrapf(err, "manifest: invalid desc %s", desc.Digest)
|
||||
return nil, fmt.Errorf("manifest: invalid desc %s: %w", desc.Digest, err)
|
||||
}
|
||||
|
||||
var manifest ocispec.Manifest
|
||||
@ -200,7 +199,7 @@ func Manifest(ctx context.Context, provider content.Provider, image ocispec.Desc
|
||||
}
|
||||
|
||||
if err := validateMediaType(p, desc.MediaType); err != nil {
|
||||
return nil, errors.Wrapf(err, "manifest: invalid desc %s", desc.Digest)
|
||||
return nil, fmt.Errorf("manifest: invalid desc %s: %w", desc.Digest, err)
|
||||
}
|
||||
|
||||
var idx ocispec.Index
|
||||
@ -236,15 +235,15 @@ func Manifest(ctx context.Context, provider content.Provider, image ocispec.Desc
|
||||
}
|
||||
return descs, nil
|
||||
}
|
||||
return nil, errors.Wrapf(errdefs.ErrNotFound, "unexpected media type %v for %v", desc.MediaType, desc.Digest)
|
||||
return nil, fmt.Errorf("unexpected media type %v for %v: %w", desc.MediaType, desc.Digest, errdefs.ErrNotFound)
|
||||
}), image); err != nil {
|
||||
return ocispec.Manifest{}, err
|
||||
}
|
||||
|
||||
if len(m) == 0 {
|
||||
err := errors.Wrapf(errdefs.ErrNotFound, "manifest %v", image.Digest)
|
||||
err := fmt.Errorf("manifest %v: %w", image.Digest, errdefs.ErrNotFound)
|
||||
if wasIndex {
|
||||
err = errors.Wrapf(errdefs.ErrNotFound, "no match for platform in manifest %v", image.Digest)
|
||||
err = fmt.Errorf("no match for platform in manifest %v: %w", image.Digest, errdefs.ErrNotFound)
|
||||
}
|
||||
return ocispec.Manifest{}, err
|
||||
}
|
||||
@ -309,7 +308,7 @@ func Check(ctx context.Context, provider content.Provider, image ocispec.Descrip
|
||||
return false, []ocispec.Descriptor{image}, nil, []ocispec.Descriptor{image}, nil
|
||||
}
|
||||
|
||||
return false, nil, nil, nil, errors.Wrapf(err, "failed to check image %v", image.Digest)
|
||||
return false, nil, nil, nil, fmt.Errorf("failed to check image %v: %w", image.Digest, err)
|
||||
}
|
||||
|
||||
// TODO(stevvooe): It is possible that referenced conponents could have
|
||||
@ -324,7 +323,7 @@ func Check(ctx context.Context, provider content.Provider, image ocispec.Descrip
|
||||
missing = append(missing, desc)
|
||||
continue
|
||||
} else {
|
||||
return false, nil, nil, nil, errors.Wrapf(err, "failed to check image %v", desc.Digest)
|
||||
return false, nil, nil, nil, fmt.Errorf("failed to check image %v: %w", desc.Digest, err)
|
||||
}
|
||||
}
|
||||
ra.Close()
|
||||
@ -346,7 +345,7 @@ func Children(ctx context.Context, provider content.Provider, desc ocispec.Descr
|
||||
}
|
||||
|
||||
if err := validateMediaType(p, desc.MediaType); err != nil {
|
||||
return nil, errors.Wrapf(err, "children: invalid desc %s", desc.Digest)
|
||||
return nil, fmt.Errorf("children: invalid desc %s: %w", desc.Digest, err)
|
||||
}
|
||||
|
||||
// TODO(stevvooe): We just assume oci manifest, for now. There may be
|
||||
@ -365,7 +364,7 @@ func Children(ctx context.Context, provider content.Provider, desc ocispec.Descr
|
||||
}
|
||||
|
||||
if err := validateMediaType(p, desc.MediaType); err != nil {
|
||||
return nil, errors.Wrapf(err, "children: invalid desc %s", desc.Digest)
|
||||
return nil, fmt.Errorf("children: invalid desc %s: %w", desc.Digest, err)
|
||||
}
|
||||
|
||||
var index ocispec.Index
|
||||
|
4
vendor/github.com/containerd/containerd/images/mediatypes.go
generated
vendored
4
vendor/github.com/containerd/containerd/images/mediatypes.go
generated
vendored
@ -18,12 +18,12 @@ package images
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// mediatype definitions for image components handled in containerd.
|
||||
@ -87,7 +87,7 @@ func DiffCompression(ctx context.Context, mediaType string) (string, error) {
|
||||
}
|
||||
return "", nil
|
||||
default:
|
||||
return "", errors.Wrapf(errdefs.ErrNotImplemented, "unrecognised mediatype %s", mediaType)
|
||||
return "", fmt.Errorf("unrecognised mediatype %s: %w", mediaType, errdefs.ErrNotImplemented)
|
||||
}
|
||||
}
|
||||
|
||||
|
4
vendor/github.com/containerd/containerd/labels/labels.go
generated
vendored
4
vendor/github.com/containerd/containerd/labels/labels.go
generated
vendored
@ -19,7 +19,3 @@ package labels
|
||||
// LabelUncompressed is added to compressed layer contents.
|
||||
// The value is digest of the uncompressed content.
|
||||
const LabelUncompressed = "containerd.io/uncompressed"
|
||||
|
||||
// LabelSharedNamespace is added to a namespace to allow that namespaces
|
||||
// contents to be shared.
|
||||
const LabelSharedNamespace = "containerd.io/namespace.shareable"
|
||||
|
5
vendor/github.com/containerd/containerd/labels/validate.go
generated
vendored
5
vendor/github.com/containerd/containerd/labels/validate.go
generated
vendored
@ -17,8 +17,9 @@
|
||||
package labels
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -31,7 +32,7 @@ func Validate(k, v string) error {
|
||||
if len(k) > 10 {
|
||||
k = k[:10]
|
||||
}
|
||||
return errors.Wrapf(errdefs.ErrInvalidArgument, "label key and value greater than maximum size (%d bytes), key: %s", maxSize, k)
|
||||
return fmt.Errorf("label key and value greater than maximum size (%d bytes), key: %s: %w", maxSize, k, errdefs.ErrInvalidArgument)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
6
vendor/github.com/containerd/containerd/platforms/cpuinfo.go
generated
vendored
6
vendor/github.com/containerd/containerd/platforms/cpuinfo.go
generated
vendored
@ -18,6 +18,7 @@ package platforms
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"os"
|
||||
"runtime"
|
||||
"strings"
|
||||
@ -25,7 +26,6 @@ import (
|
||||
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
"github.com/containerd/containerd/log"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// Present the ARM instruction set architecture, eg: v7, v8
|
||||
@ -48,7 +48,7 @@ func cpuVariant() string {
|
||||
// by ourselves. We can just parse these information from /proc/cpuinfo
|
||||
func getCPUInfo(pattern string) (info string, err error) {
|
||||
if !isLinuxOS(runtime.GOOS) {
|
||||
return "", errors.Wrapf(errdefs.ErrNotImplemented, "getCPUInfo for OS %s", runtime.GOOS)
|
||||
return "", fmt.Errorf("getCPUInfo for OS %s: %w", runtime.GOOS, errdefs.ErrNotImplemented)
|
||||
}
|
||||
|
||||
cpuinfo, err := os.Open("/proc/cpuinfo")
|
||||
@ -75,7 +75,7 @@ func getCPUInfo(pattern string) (info string, err error) {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return "", errors.Wrapf(errdefs.ErrNotFound, "getCPUInfo for pattern: %s", pattern)
|
||||
return "", fmt.Errorf("getCPUInfo for pattern: %s: %w", pattern, errdefs.ErrNotFound)
|
||||
}
|
||||
|
||||
func getCPUVariant() string {
|
||||
|
10
vendor/github.com/containerd/containerd/platforms/platforms.go
generated
vendored
10
vendor/github.com/containerd/containerd/platforms/platforms.go
generated
vendored
@ -107,6 +107,7 @@
|
||||
package platforms
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"path"
|
||||
"regexp"
|
||||
"runtime"
|
||||
@ -115,7 +116,6 @@ import (
|
||||
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
specs "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
var (
|
||||
@ -167,14 +167,14 @@ func (m *matcher) String() string {
|
||||
func Parse(specifier string) (specs.Platform, error) {
|
||||
if strings.Contains(specifier, "*") {
|
||||
// TODO(stevvooe): need to work out exact wildcard handling
|
||||
return specs.Platform{}, errors.Wrapf(errdefs.ErrInvalidArgument, "%q: wildcards not yet supported", specifier)
|
||||
return specs.Platform{}, fmt.Errorf("%q: wildcards not yet supported: %w", specifier, errdefs.ErrInvalidArgument)
|
||||
}
|
||||
|
||||
parts := strings.Split(specifier, "/")
|
||||
|
||||
for _, part := range parts {
|
||||
if !specifierRe.MatchString(part) {
|
||||
return specs.Platform{}, errors.Wrapf(errdefs.ErrInvalidArgument, "%q is an invalid component of %q: platform specifier component must match %q", part, specifier, specifierRe.String())
|
||||
return specs.Platform{}, fmt.Errorf("%q is an invalid component of %q: platform specifier component must match %q: %w", part, specifier, specifierRe.String(), errdefs.ErrInvalidArgument)
|
||||
}
|
||||
}
|
||||
|
||||
@ -206,7 +206,7 @@ func Parse(specifier string) (specs.Platform, error) {
|
||||
return p, nil
|
||||
}
|
||||
|
||||
return specs.Platform{}, errors.Wrapf(errdefs.ErrInvalidArgument, "%q: unknown operating system or architecture", specifier)
|
||||
return specs.Platform{}, fmt.Errorf("%q: unknown operating system or architecture: %w", specifier, errdefs.ErrInvalidArgument)
|
||||
case 2:
|
||||
// In this case, we treat as a regular os/arch pair. We don't care
|
||||
// about whether or not we know of the platform.
|
||||
@ -228,7 +228,7 @@ func Parse(specifier string) (specs.Platform, error) {
|
||||
return p, nil
|
||||
}
|
||||
|
||||
return specs.Platform{}, errors.Wrapf(errdefs.ErrInvalidArgument, "%q: cannot parse platform specifier", specifier)
|
||||
return specs.Platform{}, fmt.Errorf("%q: cannot parse platform specifier: %w", specifier, errdefs.ErrInvalidArgument)
|
||||
}
|
||||
|
||||
// MustParse is like Parses but panics if the specifier cannot be parsed.
|
||||
|
33
vendor/github.com/containerd/containerd/remotes/docker/auth/fetch.go
generated
vendored
33
vendor/github.com/containerd/containerd/remotes/docker/auth/fetch.go
generated
vendored
@ -19,6 +19,8 @@ package auth
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strings"
|
||||
@ -27,7 +29,6 @@ import (
|
||||
"github.com/containerd/containerd/log"
|
||||
remoteserrors "github.com/containerd/containerd/remotes/errors"
|
||||
"github.com/containerd/containerd/version"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/net/context/ctxhttp"
|
||||
)
|
||||
|
||||
@ -46,7 +47,7 @@ func GenerateTokenOptions(ctx context.Context, host, username, secret string, c
|
||||
|
||||
realmURL, err := url.Parse(realm)
|
||||
if err != nil {
|
||||
return TokenOptions{}, errors.Wrap(err, "invalid token auth challenge realm")
|
||||
return TokenOptions{}, fmt.Errorf("invalid token auth challenge realm: %w", err)
|
||||
}
|
||||
|
||||
to := TokenOptions{
|
||||
@ -73,6 +74,15 @@ type TokenOptions struct {
|
||||
Scopes []string
|
||||
Username string
|
||||
Secret string
|
||||
|
||||
// FetchRefreshToken enables fetching a refresh token (aka "identity token", "offline token") along with the bearer token.
|
||||
//
|
||||
// For HTTP GET mode (FetchToken), FetchRefreshToken sets `offline_token=true` in the request.
|
||||
// https://docs.docker.com/registry/spec/auth/token/#requesting-a-token
|
||||
//
|
||||
// For HTTP POST mode (FetchTokenWithOAuth), FetchRefreshToken sets `access_type=offline` in the request.
|
||||
// https://docs.docker.com/registry/spec/auth/oauth/#getting-a-token
|
||||
FetchRefreshToken bool
|
||||
}
|
||||
|
||||
// OAuthTokenResponse is response from fetching token with a OAuth POST request
|
||||
@ -101,6 +111,9 @@ func FetchTokenWithOAuth(ctx context.Context, client *http.Client, headers http.
|
||||
form.Set("username", to.Username)
|
||||
form.Set("password", to.Secret)
|
||||
}
|
||||
if to.FetchRefreshToken {
|
||||
form.Set("access_type", "offline")
|
||||
}
|
||||
|
||||
req, err := http.NewRequest("POST", to.Realm, strings.NewReader(form.Encode()))
|
||||
if err != nil {
|
||||
@ -121,18 +134,18 @@ func FetchTokenWithOAuth(ctx context.Context, client *http.Client, headers http.
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode < 200 || resp.StatusCode >= 400 {
|
||||
return nil, errors.WithStack(remoteserrors.NewUnexpectedStatusErr(resp))
|
||||
return nil, remoteserrors.NewUnexpectedStatusErr(resp)
|
||||
}
|
||||
|
||||
decoder := json.NewDecoder(resp.Body)
|
||||
|
||||
var tr OAuthTokenResponse
|
||||
if err = decoder.Decode(&tr); err != nil {
|
||||
return nil, errors.Wrap(err, "unable to decode token response")
|
||||
return nil, fmt.Errorf("unable to decode token response: %w", err)
|
||||
}
|
||||
|
||||
if tr.AccessToken == "" {
|
||||
return nil, errors.WithStack(ErrNoToken)
|
||||
return nil, ErrNoToken
|
||||
}
|
||||
|
||||
return &tr, nil
|
||||
@ -175,6 +188,10 @@ func FetchToken(ctx context.Context, client *http.Client, headers http.Header, t
|
||||
req.SetBasicAuth(to.Username, to.Secret)
|
||||
}
|
||||
|
||||
if to.FetchRefreshToken {
|
||||
reqParams.Add("offline_token", "true")
|
||||
}
|
||||
|
||||
req.URL.RawQuery = reqParams.Encode()
|
||||
|
||||
resp, err := ctxhttp.Do(ctx, client, req)
|
||||
@ -184,14 +201,14 @@ func FetchToken(ctx context.Context, client *http.Client, headers http.Header, t
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode < 200 || resp.StatusCode >= 400 {
|
||||
return nil, errors.WithStack(remoteserrors.NewUnexpectedStatusErr(resp))
|
||||
return nil, remoteserrors.NewUnexpectedStatusErr(resp)
|
||||
}
|
||||
|
||||
decoder := json.NewDecoder(resp.Body)
|
||||
|
||||
var tr FetchTokenResponse
|
||||
if err = decoder.Decode(&tr); err != nil {
|
||||
return nil, errors.Wrap(err, "unable to decode token response")
|
||||
return nil, fmt.Errorf("unable to decode token response: %w", err)
|
||||
}
|
||||
|
||||
// `access_token` is equivalent to `token` and if both are specified
|
||||
@ -202,7 +219,7 @@ func FetchToken(ctx context.Context, client *http.Client, headers http.Header, t
|
||||
}
|
||||
|
||||
if tr.Token == "" {
|
||||
return nil, errors.WithStack(ErrNoToken)
|
||||
return nil, ErrNoToken
|
||||
}
|
||||
|
||||
return &tr, nil
|
||||
|
85
vendor/github.com/containerd/containerd/remotes/docker/authorizer.go
generated
vendored
85
vendor/github.com/containerd/containerd/remotes/docker/authorizer.go
generated
vendored
@ -19,6 +19,7 @@ package docker
|
||||
import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"errors"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"strings"
|
||||
@ -28,7 +29,6 @@ import (
|
||||
"github.com/containerd/containerd/log"
|
||||
"github.com/containerd/containerd/remotes/docker/auth"
|
||||
remoteerrors "github.com/containerd/containerd/remotes/errors"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
)
|
||||
|
||||
@ -37,10 +37,12 @@ type dockerAuthorizer struct {
|
||||
|
||||
client *http.Client
|
||||
header http.Header
|
||||
mu sync.Mutex
|
||||
mu sync.RWMutex
|
||||
|
||||
// indexed by host name
|
||||
handlers map[string]*authHandler
|
||||
|
||||
onFetchRefreshToken OnFetchRefreshToken
|
||||
}
|
||||
|
||||
// NewAuthorizer creates a Docker authorizer using the provided function to
|
||||
@ -51,9 +53,10 @@ func NewAuthorizer(client *http.Client, f func(string) (string, string, error))
|
||||
}
|
||||
|
||||
type authorizerConfig struct {
|
||||
credentials func(string) (string, string, error)
|
||||
client *http.Client
|
||||
header http.Header
|
||||
credentials func(string) (string, string, error)
|
||||
client *http.Client
|
||||
header http.Header
|
||||
onFetchRefreshToken OnFetchRefreshToken
|
||||
}
|
||||
|
||||
// AuthorizerOpt configures an authorizer
|
||||
@ -80,6 +83,16 @@ func WithAuthHeader(hdr http.Header) AuthorizerOpt {
|
||||
}
|
||||
}
|
||||
|
||||
// OnFetchRefreshToken is called on fetching request token.
|
||||
type OnFetchRefreshToken func(ctx context.Context, refreshToken string, req *http.Request)
|
||||
|
||||
// WithFetchRefreshToken enables fetching "refresh token" (aka "identity token", "offline token").
|
||||
func WithFetchRefreshToken(f OnFetchRefreshToken) AuthorizerOpt {
|
||||
return func(opt *authorizerConfig) {
|
||||
opt.onFetchRefreshToken = f
|
||||
}
|
||||
}
|
||||
|
||||
// NewDockerAuthorizer creates an authorizer using Docker's registry
|
||||
// authentication spec.
|
||||
// See https://docs.docker.com/registry/spec/auth/
|
||||
@ -94,10 +107,11 @@ func NewDockerAuthorizer(opts ...AuthorizerOpt) Authorizer {
|
||||
}
|
||||
|
||||
return &dockerAuthorizer{
|
||||
credentials: ao.credentials,
|
||||
client: ao.client,
|
||||
header: ao.header,
|
||||
handlers: make(map[string]*authHandler),
|
||||
credentials: ao.credentials,
|
||||
client: ao.client,
|
||||
header: ao.header,
|
||||
handlers: make(map[string]*authHandler),
|
||||
onFetchRefreshToken: ao.onFetchRefreshToken,
|
||||
}
|
||||
}
|
||||
|
||||
@ -109,12 +123,21 @@ func (a *dockerAuthorizer) Authorize(ctx context.Context, req *http.Request) err
|
||||
return nil
|
||||
}
|
||||
|
||||
auth, err := ah.authorize(ctx)
|
||||
auth, refreshToken, err := ah.authorize(ctx)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
req.Header.Set("Authorization", auth)
|
||||
|
||||
if refreshToken != "" {
|
||||
a.mu.RLock()
|
||||
onFetchRefreshToken := a.onFetchRefreshToken
|
||||
a.mu.RUnlock()
|
||||
if onFetchRefreshToken != nil {
|
||||
onFetchRefreshToken(ctx, refreshToken, req)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
@ -161,6 +184,7 @@ func (a *dockerAuthorizer) AddResponses(ctx context.Context, responses []*http.R
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
common.FetchRefreshToken = a.onFetchRefreshToken != nil
|
||||
|
||||
a.handlers[host] = newAuthHandler(a.client, a.header, c.Scheme, common)
|
||||
return nil
|
||||
@ -181,14 +205,15 @@ func (a *dockerAuthorizer) AddResponses(ctx context.Context, responses []*http.R
|
||||
}
|
||||
}
|
||||
}
|
||||
return errors.Wrap(errdefs.ErrNotImplemented, "failed to find supported auth scheme")
|
||||
return fmt.Errorf("failed to find supported auth scheme: %w", errdefs.ErrNotImplemented)
|
||||
}
|
||||
|
||||
// authResult is used to control limit rate.
|
||||
type authResult struct {
|
||||
sync.WaitGroup
|
||||
token string
|
||||
err error
|
||||
token string
|
||||
refreshToken string
|
||||
err error
|
||||
}
|
||||
|
||||
// authHandler is used to handle auth request per registry server.
|
||||
@ -220,29 +245,29 @@ func newAuthHandler(client *http.Client, hdr http.Header, scheme auth.Authentica
|
||||
}
|
||||
}
|
||||
|
||||
func (ah *authHandler) authorize(ctx context.Context) (string, error) {
|
||||
func (ah *authHandler) authorize(ctx context.Context) (string, string, error) {
|
||||
switch ah.scheme {
|
||||
case auth.BasicAuth:
|
||||
return ah.doBasicAuth(ctx)
|
||||
case auth.BearerAuth:
|
||||
return ah.doBearerAuth(ctx)
|
||||
default:
|
||||
return "", errors.Wrapf(errdefs.ErrNotImplemented, "failed to find supported auth scheme: %s", string(ah.scheme))
|
||||
return "", "", fmt.Errorf("failed to find supported auth scheme: %s: %w", string(ah.scheme), errdefs.ErrNotImplemented)
|
||||
}
|
||||
}
|
||||
|
||||
func (ah *authHandler) doBasicAuth(ctx context.Context) (string, error) {
|
||||
func (ah *authHandler) doBasicAuth(ctx context.Context) (string, string, error) {
|
||||
username, secret := ah.common.Username, ah.common.Secret
|
||||
|
||||
if username == "" || secret == "" {
|
||||
return "", fmt.Errorf("failed to handle basic auth because missing username or secret")
|
||||
return "", "", fmt.Errorf("failed to handle basic auth because missing username or secret")
|
||||
}
|
||||
|
||||
auth := base64.StdEncoding.EncodeToString([]byte(username + ":" + secret))
|
||||
return fmt.Sprintf("Basic %s", auth), nil
|
||||
return fmt.Sprintf("Basic %s", auth), "", nil
|
||||
}
|
||||
|
||||
func (ah *authHandler) doBearerAuth(ctx context.Context) (token string, err error) {
|
||||
func (ah *authHandler) doBearerAuth(ctx context.Context) (token, refreshToken string, err error) {
|
||||
// copy common tokenOptions
|
||||
to := ah.common
|
||||
|
||||
@ -255,7 +280,7 @@ func (ah *authHandler) doBearerAuth(ctx context.Context) (token string, err erro
|
||||
if r, exist := ah.scopedTokens[scoped]; exist {
|
||||
ah.Unlock()
|
||||
r.Wait()
|
||||
return r.token, r.err
|
||||
return r.token, r.refreshToken, r.err
|
||||
}
|
||||
|
||||
// only one fetch token job
|
||||
@ -266,14 +291,16 @@ func (ah *authHandler) doBearerAuth(ctx context.Context) (token string, err erro
|
||||
|
||||
defer func() {
|
||||
token = fmt.Sprintf("Bearer %s", token)
|
||||
r.token, r.err = token, err
|
||||
r.token, r.refreshToken, r.err = token, refreshToken, err
|
||||
r.Done()
|
||||
}()
|
||||
|
||||
// fetch token for the resource scope
|
||||
if to.Secret != "" {
|
||||
defer func() {
|
||||
err = errors.Wrap(err, "failed to fetch oauth token")
|
||||
if err != nil {
|
||||
err = fmt.Errorf("failed to fetch oauth token: %w", err)
|
||||
}
|
||||
}()
|
||||
// credential information is provided, use oauth POST endpoint
|
||||
// TODO: Allow setting client_id
|
||||
@ -287,25 +314,25 @@ func (ah *authHandler) doBearerAuth(ctx context.Context) (token string, err erro
|
||||
if (errStatus.StatusCode == 405 && to.Username != "") || errStatus.StatusCode == 404 || errStatus.StatusCode == 401 {
|
||||
resp, err := auth.FetchToken(ctx, ah.client, ah.header, to)
|
||||
if err != nil {
|
||||
return "", err
|
||||
return "", "", err
|
||||
}
|
||||
return resp.Token, nil
|
||||
return resp.Token, resp.RefreshToken, nil
|
||||
}
|
||||
log.G(ctx).WithFields(logrus.Fields{
|
||||
"status": errStatus.Status,
|
||||
"body": string(errStatus.Body),
|
||||
}).Debugf("token request failed")
|
||||
}
|
||||
return "", err
|
||||
return "", "", err
|
||||
}
|
||||
return resp.AccessToken, nil
|
||||
return resp.AccessToken, resp.RefreshToken, nil
|
||||
}
|
||||
// do request anonymously
|
||||
resp, err := auth.FetchToken(ctx, ah.client, ah.header, to)
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "failed to fetch anonymous token")
|
||||
return "", "", fmt.Errorf("failed to fetch anonymous token: %w", err)
|
||||
}
|
||||
return resp.Token, nil
|
||||
return resp.Token, resp.RefreshToken, nil
|
||||
}
|
||||
|
||||
func invalidAuthorization(c auth.Challenge, responses []*http.Response) error {
|
||||
@ -319,7 +346,7 @@ func invalidAuthorization(c auth.Challenge, responses []*http.Response) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
return errors.Wrapf(ErrInvalidAuthorization, "server message: %s", errStr)
|
||||
return fmt.Errorf("server message: %s: %w", errStr, ErrInvalidAuthorization)
|
||||
}
|
||||
|
||||
func sameRequest(r1, r2 *http.Request) bool {
|
||||
|
9
vendor/github.com/containerd/containerd/remotes/docker/converter.go
generated
vendored
9
vendor/github.com/containerd/containerd/remotes/docker/converter.go
generated
vendored
@ -28,7 +28,6 @@ import (
|
||||
"github.com/containerd/containerd/remotes"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// LegacyConfigMediaType should be replaced by OCI image spec.
|
||||
@ -52,12 +51,12 @@ func ConvertManifest(ctx context.Context, store content.Store, desc ocispec.Desc
|
||||
// read manifest data
|
||||
mb, err := content.ReadBlob(ctx, store, desc)
|
||||
if err != nil {
|
||||
return ocispec.Descriptor{}, errors.Wrap(err, "failed to read index data")
|
||||
return ocispec.Descriptor{}, fmt.Errorf("failed to read index data: %w", err)
|
||||
}
|
||||
|
||||
var manifest ocispec.Manifest
|
||||
if err := json.Unmarshal(mb, &manifest); err != nil {
|
||||
return ocispec.Descriptor{}, errors.Wrap(err, "failed to unmarshal data into manifest")
|
||||
return ocispec.Descriptor{}, fmt.Errorf("failed to unmarshal data into manifest: %w", err)
|
||||
}
|
||||
|
||||
// check config media type
|
||||
@ -68,7 +67,7 @@ func ConvertManifest(ctx context.Context, store content.Store, desc ocispec.Desc
|
||||
manifest.Config.MediaType = images.MediaTypeDockerSchema2Config
|
||||
data, err := json.MarshalIndent(manifest, "", " ")
|
||||
if err != nil {
|
||||
return ocispec.Descriptor{}, errors.Wrap(err, "failed to marshal manifest")
|
||||
return ocispec.Descriptor{}, fmt.Errorf("failed to marshal manifest: %w", err)
|
||||
}
|
||||
|
||||
// update manifest with gc labels
|
||||
@ -82,7 +81,7 @@ func ConvertManifest(ctx context.Context, store content.Store, desc ocispec.Desc
|
||||
|
||||
ref := remotes.MakeRefKey(ctx, desc)
|
||||
if err := content.WriteBlob(ctx, store, ref, bytes.NewReader(data), desc, content.WithLabels(labels)); err != nil {
|
||||
return ocispec.Descriptor{}, errors.Wrap(err, "failed to update content")
|
||||
return ocispec.Descriptor{}, fmt.Errorf("failed to update content: %w", err)
|
||||
}
|
||||
return desc, nil
|
||||
}
|
||||
|
22
vendor/github.com/containerd/containerd/remotes/docker/fetcher.go
generated
vendored
22
vendor/github.com/containerd/containerd/remotes/docker/fetcher.go
generated
vendored
@ -19,6 +19,7 @@ package docker
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
@ -29,7 +30,6 @@ import (
|
||||
"github.com/containerd/containerd/images"
|
||||
"github.com/containerd/containerd/log"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type dockerFetcher struct {
|
||||
@ -41,7 +41,7 @@ func (r dockerFetcher) Fetch(ctx context.Context, desc ocispec.Descriptor) (io.R
|
||||
|
||||
hosts := r.filterHosts(HostCapabilityPull)
|
||||
if len(hosts) == 0 {
|
||||
return nil, errors.Wrap(errdefs.ErrNotFound, "no pull hosts")
|
||||
return nil, fmt.Errorf("no pull hosts: %w", errdefs.ErrNotFound)
|
||||
}
|
||||
|
||||
ctx, err := ContextWithRepositoryScope(ctx, r.refspec, false)
|
||||
@ -141,9 +141,9 @@ func (r dockerFetcher) Fetch(ctx context.Context, desc ocispec.Descriptor) (io.R
|
||||
}
|
||||
|
||||
if errdefs.IsNotFound(firstErr) {
|
||||
firstErr = errors.Wrapf(errdefs.ErrNotFound,
|
||||
"could not fetch content descriptor %v (%v) from remote",
|
||||
desc.Digest, desc.MediaType)
|
||||
firstErr = fmt.Errorf("could not fetch content descriptor %v (%v) from remote: %w",
|
||||
desc.Digest, desc.MediaType, errdefs.ErrNotFound,
|
||||
)
|
||||
}
|
||||
|
||||
return nil, firstErr
|
||||
@ -178,19 +178,19 @@ func (r dockerFetcher) open(ctx context.Context, req *request, mediatype string,
|
||||
// implementation.
|
||||
|
||||
if resp.StatusCode == http.StatusNotFound {
|
||||
return nil, errors.Wrapf(errdefs.ErrNotFound, "content at %v not found", req.String())
|
||||
return nil, fmt.Errorf("content at %v not found: %w", req.String(), errdefs.ErrNotFound)
|
||||
}
|
||||
var registryErr Errors
|
||||
if err := json.NewDecoder(resp.Body).Decode(®istryErr); err != nil || registryErr.Len() < 1 {
|
||||
return nil, errors.Errorf("unexpected status code %v: %v", req.String(), resp.Status)
|
||||
return nil, fmt.Errorf("unexpected status code %v: %v", req.String(), resp.Status)
|
||||
}
|
||||
return nil, errors.Errorf("unexpected status code %v: %s - Server message: %s", req.String(), resp.Status, registryErr.Error())
|
||||
return nil, fmt.Errorf("unexpected status code %v: %s - Server message: %s", req.String(), resp.Status, registryErr.Error())
|
||||
}
|
||||
if offset > 0 {
|
||||
cr := resp.Header.Get("content-range")
|
||||
if cr != "" {
|
||||
if !strings.HasPrefix(cr, fmt.Sprintf("bytes %d-", offset)) {
|
||||
return nil, errors.Errorf("unhandled content range in response: %v", cr)
|
||||
return nil, fmt.Errorf("unhandled content range in response: %v", cr)
|
||||
|
||||
}
|
||||
} else {
|
||||
@ -202,10 +202,10 @@ func (r dockerFetcher) open(ctx context.Context, req *request, mediatype string,
|
||||
// Could use buffer pool here but this case should be rare
|
||||
n, err := io.Copy(io.Discard, io.LimitReader(resp.Body, offset))
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to discard to offset")
|
||||
return nil, fmt.Errorf("failed to discard to offset: %w", err)
|
||||
}
|
||||
if n != offset {
|
||||
return nil, errors.Errorf("unable to discard to offset")
|
||||
return nil, errors.New("unable to discard to offset")
|
||||
}
|
||||
|
||||
}
|
||||
|
20
vendor/github.com/containerd/containerd/remotes/docker/httpreadseeker.go
generated
vendored
20
vendor/github.com/containerd/containerd/remotes/docker/httpreadseeker.go
generated
vendored
@ -18,11 +18,11 @@ package docker
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
"github.com/containerd/containerd/log"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
const maxRetry = 3
|
||||
@ -69,7 +69,7 @@ func (hrs *httpReadSeeker) Read(p []byte) (n int, err error) {
|
||||
}
|
||||
if hrs.rc != nil {
|
||||
if clsErr := hrs.rc.Close(); clsErr != nil {
|
||||
log.L.WithError(clsErr).Errorf("httpReadSeeker: failed to close ReadCloser")
|
||||
log.L.WithError(clsErr).Error("httpReadSeeker: failed to close ReadCloser")
|
||||
}
|
||||
hrs.rc = nil
|
||||
}
|
||||
@ -94,7 +94,7 @@ func (hrs *httpReadSeeker) Close() error {
|
||||
|
||||
func (hrs *httpReadSeeker) Seek(offset int64, whence int) (int64, error) {
|
||||
if hrs.closed {
|
||||
return 0, errors.Wrap(errdefs.ErrUnavailable, "Fetcher.Seek: closed")
|
||||
return 0, fmt.Errorf("Fetcher.Seek: closed: %w", errdefs.ErrUnavailable)
|
||||
}
|
||||
|
||||
abs := hrs.offset
|
||||
@ -105,21 +105,21 @@ func (hrs *httpReadSeeker) Seek(offset int64, whence int) (int64, error) {
|
||||
abs += offset
|
||||
case io.SeekEnd:
|
||||
if hrs.size == -1 {
|
||||
return 0, errors.Wrap(errdefs.ErrUnavailable, "Fetcher.Seek: unknown size, cannot seek from end")
|
||||
return 0, fmt.Errorf("Fetcher.Seek: unknown size, cannot seek from end: %w", errdefs.ErrUnavailable)
|
||||
}
|
||||
abs = hrs.size + offset
|
||||
default:
|
||||
return 0, errors.Wrap(errdefs.ErrInvalidArgument, "Fetcher.Seek: invalid whence")
|
||||
return 0, fmt.Errorf("Fetcher.Seek: invalid whence: %w", errdefs.ErrInvalidArgument)
|
||||
}
|
||||
|
||||
if abs < 0 {
|
||||
return 0, errors.Wrapf(errdefs.ErrInvalidArgument, "Fetcher.Seek: negative offset")
|
||||
return 0, fmt.Errorf("Fetcher.Seek: negative offset: %w", errdefs.ErrInvalidArgument)
|
||||
}
|
||||
|
||||
if abs != hrs.offset {
|
||||
if hrs.rc != nil {
|
||||
if err := hrs.rc.Close(); err != nil {
|
||||
log.L.WithError(err).Errorf("Fetcher.Seek: failed to close ReadCloser")
|
||||
log.L.WithError(err).Error("Fetcher.Seek: failed to close ReadCloser")
|
||||
}
|
||||
|
||||
hrs.rc = nil
|
||||
@ -140,17 +140,17 @@ func (hrs *httpReadSeeker) reader() (io.Reader, error) {
|
||||
// only try to reopen the body request if we are seeking to a value
|
||||
// less than the actual size.
|
||||
if hrs.open == nil {
|
||||
return nil, errors.Wrapf(errdefs.ErrNotImplemented, "cannot open")
|
||||
return nil, fmt.Errorf("cannot open: %w", errdefs.ErrNotImplemented)
|
||||
}
|
||||
|
||||
rc, err := hrs.open(hrs.offset)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "httpReadSeeker: failed open")
|
||||
return nil, fmt.Errorf("httpReadSeeker: failed open: %w", err)
|
||||
}
|
||||
|
||||
if hrs.rc != nil {
|
||||
if err := hrs.rc.Close(); err != nil {
|
||||
log.L.WithError(err).Errorf("httpReadSeeker: failed to close ReadCloser")
|
||||
log.L.WithError(err).Error("httpReadSeeker: failed to close ReadCloser")
|
||||
}
|
||||
}
|
||||
hrs.rc = rc
|
||||
|
31
vendor/github.com/containerd/containerd/remotes/docker/pusher.go
generated
vendored
31
vendor/github.com/containerd/containerd/remotes/docker/pusher.go
generated
vendored
@ -18,6 +18,8 @@ package docker
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"net/url"
|
||||
@ -32,7 +34,6 @@ import (
|
||||
remoteserrors "github.com/containerd/containerd/remotes/errors"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
type dockerPusher struct {
|
||||
@ -55,7 +56,7 @@ func (p dockerPusher) Writer(ctx context.Context, opts ...content.WriterOpt) (co
|
||||
}
|
||||
}
|
||||
if wOpts.Ref == "" {
|
||||
return nil, errors.Wrap(errdefs.ErrInvalidArgument, "ref must not be empty")
|
||||
return nil, fmt.Errorf("ref must not be empty: %w", errdefs.ErrInvalidArgument)
|
||||
}
|
||||
return p.push(ctx, wOpts.Desc, wOpts.Ref, true)
|
||||
}
|
||||
@ -76,22 +77,22 @@ func (p dockerPusher) push(ctx context.Context, desc ocispec.Descriptor, ref str
|
||||
status, err := p.tracker.GetStatus(ref)
|
||||
if err == nil {
|
||||
if status.Committed && status.Offset == status.Total {
|
||||
return nil, errors.Wrapf(errdefs.ErrAlreadyExists, "ref %v", ref)
|
||||
return nil, fmt.Errorf("ref %v: %w", ref, errdefs.ErrAlreadyExists)
|
||||
}
|
||||
if unavailableOnFail {
|
||||
// Another push of this ref is happening elsewhere. The rest of function
|
||||
// will continue only when `errdefs.IsNotFound(err) == true` (i.e. there
|
||||
// is no actively-tracked ref already).
|
||||
return nil, errors.Wrap(errdefs.ErrUnavailable, "push is on-going")
|
||||
return nil, fmt.Errorf("push is on-going: %w", errdefs.ErrUnavailable)
|
||||
}
|
||||
// TODO: Handle incomplete status
|
||||
} else if !errdefs.IsNotFound(err) {
|
||||
return nil, errors.Wrap(err, "failed to get status")
|
||||
return nil, fmt.Errorf("failed to get status: %w", err)
|
||||
}
|
||||
|
||||
hosts := p.filterHosts(HostCapabilityPush)
|
||||
if len(hosts) == 0 {
|
||||
return nil, errors.Wrap(errdefs.ErrNotFound, "no push hosts")
|
||||
return nil, fmt.Errorf("no push hosts: %w", errdefs.ErrNotFound)
|
||||
}
|
||||
|
||||
var (
|
||||
@ -143,7 +144,7 @@ func (p dockerPusher) push(ctx context.Context, desc ocispec.Descriptor, ref str
|
||||
},
|
||||
})
|
||||
resp.Body.Close()
|
||||
return nil, errors.Wrapf(errdefs.ErrAlreadyExists, "content %v on remote", desc.Digest)
|
||||
return nil, fmt.Errorf("content %v on remote: %w", desc.Digest, errdefs.ErrAlreadyExists)
|
||||
}
|
||||
} else if resp.StatusCode != http.StatusNotFound {
|
||||
err := remoteserrors.NewUnexpectedStatusErr(resp)
|
||||
@ -205,7 +206,7 @@ func (p dockerPusher) push(ctx context.Context, desc ocispec.Descriptor, ref str
|
||||
Offset: desc.Size,
|
||||
},
|
||||
})
|
||||
return nil, errors.Wrapf(errdefs.ErrAlreadyExists, "content %v on remote", desc.Digest)
|
||||
return nil, fmt.Errorf("content %v on remote: %w", desc.Digest, errdefs.ErrAlreadyExists)
|
||||
default:
|
||||
err := remoteserrors.NewUnexpectedStatusErr(resp)
|
||||
log.G(ctx).WithField("resp", resp).WithField("body", string(err.(remoteserrors.ErrUnexpectedStatus).Body)).Debug("unexpected response")
|
||||
@ -221,7 +222,7 @@ func (p dockerPusher) push(ctx context.Context, desc ocispec.Descriptor, ref str
|
||||
if strings.HasPrefix(location, "/") {
|
||||
lurl, err = url.Parse(lhost.Scheme + "://" + lhost.Host + location)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "unable to parse location %v", location)
|
||||
return nil, fmt.Errorf("unable to parse location %v: %w", location, err)
|
||||
}
|
||||
} else {
|
||||
if !strings.Contains(location, "://") {
|
||||
@ -229,7 +230,7 @@ func (p dockerPusher) push(ctx context.Context, desc ocispec.Descriptor, ref str
|
||||
}
|
||||
lurl, err = url.Parse(location)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "unable to parse location %v", location)
|
||||
return nil, fmt.Errorf("unable to parse location %v: %w", location, err)
|
||||
}
|
||||
|
||||
if lurl.Host != lhost.Host || lhost.Scheme != lurl.Scheme {
|
||||
@ -374,7 +375,7 @@ func (pw *pushWriter) Digest() digest.Digest {
|
||||
func (pw *pushWriter) Commit(ctx context.Context, size int64, expected digest.Digest, opts ...content.Opt) error {
|
||||
// Check whether read has already thrown an error
|
||||
if _, err := pw.pipe.Write([]byte{}); err != nil && err != io.ErrClosedPipe {
|
||||
return errors.Wrap(err, "pipe error before commit")
|
||||
return fmt.Errorf("pipe error before commit: %w", err)
|
||||
}
|
||||
|
||||
if err := pw.pipe.Close(); err != nil {
|
||||
@ -397,11 +398,11 @@ func (pw *pushWriter) Commit(ctx context.Context, size int64, expected digest.Di
|
||||
|
||||
status, err := pw.tracker.GetStatus(pw.ref)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to get status")
|
||||
return fmt.Errorf("failed to get status: %w", err)
|
||||
}
|
||||
|
||||
if size > 0 && size != status.Offset {
|
||||
return errors.Errorf("unexpected size %d, expected %d", status.Offset, size)
|
||||
return fmt.Errorf("unexpected size %d, expected %d", status.Offset, size)
|
||||
}
|
||||
|
||||
if expected == "" {
|
||||
@ -410,11 +411,11 @@ func (pw *pushWriter) Commit(ctx context.Context, size int64, expected digest.Di
|
||||
|
||||
actual, err := digest.Parse(resp.Header.Get("Docker-Content-Digest"))
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "invalid content digest in response")
|
||||
return fmt.Errorf("invalid content digest in response: %w", err)
|
||||
}
|
||||
|
||||
if actual != expected {
|
||||
return errors.Errorf("got digest %s, expected %s", actual, expected)
|
||||
return fmt.Errorf("got digest %s, expected %s", actual, expected)
|
||||
}
|
||||
|
||||
status.Committed = true
|
||||
|
3
vendor/github.com/containerd/containerd/remotes/docker/registry.go
generated
vendored
3
vendor/github.com/containerd/containerd/remotes/docker/registry.go
generated
vendored
@ -17,10 +17,9 @@
|
||||
package docker
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"net"
|
||||
"net/http"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// HostCapabilities represent the capabilities of the registry
|
||||
|
25
vendor/github.com/containerd/containerd/remotes/docker/resolver.go
generated
vendored
25
vendor/github.com/containerd/containerd/remotes/docker/resolver.go
generated
vendored
@ -18,6 +18,7 @@ package docker
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
@ -34,7 +35,6 @@ import (
|
||||
"github.com/containerd/containerd/version"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/net/context/ctxhttp"
|
||||
)
|
||||
@ -254,7 +254,7 @@ func (r *dockerResolver) Resolve(ctx context.Context, ref string) (string, ocisp
|
||||
|
||||
hosts := base.filterHosts(caps)
|
||||
if len(hosts) == 0 {
|
||||
return "", ocispec.Descriptor{}, errors.Wrap(errdefs.ErrNotFound, "no resolve hosts")
|
||||
return "", ocispec.Descriptor{}, fmt.Errorf("no resolve hosts: %w", errdefs.ErrNotFound)
|
||||
}
|
||||
|
||||
ctx, err = ContextWithRepositoryScope(ctx, refspec, false)
|
||||
@ -279,7 +279,7 @@ func (r *dockerResolver) Resolve(ctx context.Context, ref string) (string, ocisp
|
||||
resp, err := req.doWithRetries(ctx, nil)
|
||||
if err != nil {
|
||||
if errors.Is(err, ErrInvalidAuthorization) {
|
||||
err = errors.Wrapf(err, "pull access denied, repository does not exist or may require authorization")
|
||||
err = fmt.Errorf("pull access denied, repository does not exist or may require authorization: %w", err)
|
||||
}
|
||||
// Store the error for referencing later
|
||||
if firstErr == nil {
|
||||
@ -298,11 +298,11 @@ func (r *dockerResolver) Resolve(ctx context.Context, ref string) (string, ocisp
|
||||
if resp.StatusCode > 399 {
|
||||
// Set firstErr when encountering the first non-404 status code.
|
||||
if firstErr == nil {
|
||||
firstErr = errors.Errorf("pulling from host %s failed with status code %v: %v", host.Host, u, resp.Status)
|
||||
firstErr = fmt.Errorf("pulling from host %s failed with status code %v: %v", host.Host, u, resp.Status)
|
||||
}
|
||||
continue // try another host
|
||||
}
|
||||
return "", ocispec.Descriptor{}, errors.Errorf("pulling from host %s failed with unexpected status code %v: %v", host.Host, u, resp.Status)
|
||||
return "", ocispec.Descriptor{}, fmt.Errorf("pulling from host %s failed with unexpected status code %v: %v", host.Host, u, resp.Status)
|
||||
}
|
||||
size := resp.ContentLength
|
||||
contentType := getManifestMediaType(resp)
|
||||
@ -318,7 +318,7 @@ func (r *dockerResolver) Resolve(ctx context.Context, ref string) (string, ocisp
|
||||
|
||||
if dgstHeader != "" && size != -1 {
|
||||
if err := dgstHeader.Validate(); err != nil {
|
||||
return "", ocispec.Descriptor{}, errors.Wrapf(err, "%q in header not a valid digest", dgstHeader)
|
||||
return "", ocispec.Descriptor{}, fmt.Errorf("%q in header not a valid digest: %w", dgstHeader, err)
|
||||
}
|
||||
dgst = dgstHeader
|
||||
}
|
||||
@ -366,7 +366,7 @@ func (r *dockerResolver) Resolve(ctx context.Context, ref string) (string, ocisp
|
||||
// Prevent resolving to excessively large manifests
|
||||
if size > MaxManifestSize {
|
||||
if firstErr == nil {
|
||||
firstErr = errors.Wrapf(errdefs.ErrNotFound, "rejecting %d byte manifest for %s", size, ref)
|
||||
firstErr = fmt.Errorf("rejecting %d byte manifest for %s: %w", size, ref, errdefs.ErrNotFound)
|
||||
}
|
||||
continue
|
||||
}
|
||||
@ -387,7 +387,7 @@ func (r *dockerResolver) Resolve(ctx context.Context, ref string) (string, ocisp
|
||||
// means that either no registries were given or each registry returned 404.
|
||||
|
||||
if firstErr == nil {
|
||||
firstErr = errors.Wrap(errdefs.ErrNotFound, ref)
|
||||
firstErr = fmt.Errorf("%s: %w", ref, errdefs.ErrNotFound)
|
||||
}
|
||||
|
||||
return "", ocispec.Descriptor{}, firstErr
|
||||
@ -547,7 +547,7 @@ func (r *request) do(ctx context.Context) (*http.Response, error) {
|
||||
ctx = log.WithLogger(ctx, log.G(ctx).WithField("url", u))
|
||||
log.G(ctx).WithFields(requestFields(req)).Debug("do request")
|
||||
if err := r.authorize(ctx, req); err != nil {
|
||||
return nil, errors.Wrap(err, "failed to authorize")
|
||||
return nil, fmt.Errorf("failed to authorize: %w", err)
|
||||
}
|
||||
|
||||
var client = &http.Client{}
|
||||
@ -559,13 +559,16 @@ func (r *request) do(ctx context.Context) (*http.Response, error) {
|
||||
if len(via) >= 10 {
|
||||
return errors.New("stopped after 10 redirects")
|
||||
}
|
||||
return errors.Wrap(r.authorize(ctx, req), "failed to authorize redirect")
|
||||
if err := r.authorize(ctx, req); err != nil {
|
||||
return fmt.Errorf("failed to authorize redirect: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
resp, err := ctxhttp.Do(ctx, client, req)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "failed to do request")
|
||||
return nil, fmt.Errorf("failed to do request: %w", err)
|
||||
}
|
||||
log.G(ctx).WithFields(responseFields(resp)).Debug("fetch response received")
|
||||
return resp, nil
|
||||
|
29
vendor/github.com/containerd/containerd/remotes/docker/schema1/converter.go
generated
vendored
29
vendor/github.com/containerd/containerd/remotes/docker/schema1/converter.go
generated
vendored
@ -21,6 +21,7 @@ import (
|
||||
"context"
|
||||
"encoding/base64"
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"strconv"
|
||||
@ -28,8 +29,6 @@ import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"golang.org/x/sync/errgroup"
|
||||
|
||||
"github.com/containerd/containerd/archive/compression"
|
||||
"github.com/containerd/containerd/content"
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
@ -39,7 +38,7 @@ import (
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
specs "github.com/opencontainers/image-spec/specs-go"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
"golang.org/x/sync/errgroup"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -158,12 +157,12 @@ func (c *Converter) Convert(ctx context.Context, opts ...ConvertOpt) (ocispec.De
|
||||
|
||||
history, diffIDs, err := c.schema1ManifestHistory()
|
||||
if err != nil {
|
||||
return ocispec.Descriptor{}, errors.Wrap(err, "schema 1 conversion failed")
|
||||
return ocispec.Descriptor{}, fmt.Errorf("schema 1 conversion failed: %w", err)
|
||||
}
|
||||
|
||||
var img ocispec.Image
|
||||
if err := json.Unmarshal([]byte(c.pulledManifest.History[0].V1Compatibility), &img); err != nil {
|
||||
return ocispec.Descriptor{}, errors.Wrap(err, "failed to unmarshal image from schema 1 history")
|
||||
return ocispec.Descriptor{}, fmt.Errorf("failed to unmarshal image from schema 1 history: %w", err)
|
||||
}
|
||||
|
||||
img.History = history
|
||||
@ -174,7 +173,7 @@ func (c *Converter) Convert(ctx context.Context, opts ...ConvertOpt) (ocispec.De
|
||||
|
||||
b, err := json.MarshalIndent(img, "", " ")
|
||||
if err != nil {
|
||||
return ocispec.Descriptor{}, errors.Wrap(err, "failed to marshal image")
|
||||
return ocispec.Descriptor{}, fmt.Errorf("failed to marshal image: %w", err)
|
||||
}
|
||||
|
||||
config := ocispec.Descriptor{
|
||||
@ -198,7 +197,7 @@ func (c *Converter) Convert(ctx context.Context, opts ...ConvertOpt) (ocispec.De
|
||||
|
||||
mb, err := json.MarshalIndent(manifest, "", " ")
|
||||
if err != nil {
|
||||
return ocispec.Descriptor{}, errors.Wrap(err, "failed to marshal image")
|
||||
return ocispec.Descriptor{}, fmt.Errorf("failed to marshal image: %w", err)
|
||||
}
|
||||
|
||||
desc := ocispec.Descriptor{
|
||||
@ -215,12 +214,12 @@ func (c *Converter) Convert(ctx context.Context, opts ...ConvertOpt) (ocispec.De
|
||||
|
||||
ref := remotes.MakeRefKey(ctx, desc)
|
||||
if err := content.WriteBlob(ctx, c.contentStore, ref, bytes.NewReader(mb), desc, content.WithLabels(labels)); err != nil {
|
||||
return ocispec.Descriptor{}, errors.Wrap(err, "failed to write image manifest")
|
||||
return ocispec.Descriptor{}, fmt.Errorf("failed to write image manifest: %w", err)
|
||||
}
|
||||
|
||||
ref = remotes.MakeRefKey(ctx, config)
|
||||
if err := content.WriteBlob(ctx, c.contentStore, ref, bytes.NewReader(b), config); err != nil {
|
||||
return ocispec.Descriptor{}, errors.Wrap(err, "failed to write image config")
|
||||
return ocispec.Descriptor{}, fmt.Errorf("failed to write image config: %w", err)
|
||||
}
|
||||
|
||||
return desc, nil
|
||||
@ -349,7 +348,7 @@ func (c *Converter) fetchBlob(ctx context.Context, desc ocispec.Descriptor) erro
|
||||
if desc.Size == -1 {
|
||||
info, err := c.contentStore.Info(ctx, desc.Digest)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "failed to get blob info")
|
||||
return fmt.Errorf("failed to get blob info: %w", err)
|
||||
}
|
||||
desc.Size = info.Size
|
||||
}
|
||||
@ -370,7 +369,7 @@ func (c *Converter) fetchBlob(ctx context.Context, desc ocispec.Descriptor) erro
|
||||
}
|
||||
|
||||
if _, err := c.contentStore.Update(ctx, cinfo, "labels.containerd.io/uncompressed", fmt.Sprintf("labels.%s", labelDockerSchema1EmptyLayer)); err != nil {
|
||||
return errors.Wrap(err, "failed to update uncompressed label")
|
||||
return fmt.Errorf("failed to update uncompressed label: %w", err)
|
||||
}
|
||||
|
||||
c.mu.Lock()
|
||||
@ -384,7 +383,7 @@ func (c *Converter) fetchBlob(ctx context.Context, desc ocispec.Descriptor) erro
|
||||
func (c *Converter) reuseLabelBlobState(ctx context.Context, desc ocispec.Descriptor) (bool, error) {
|
||||
cinfo, err := c.contentStore.Info(ctx, desc.Digest)
|
||||
if err != nil {
|
||||
return false, errors.Wrap(err, "failed to get blob info")
|
||||
return false, fmt.Errorf("failed to get blob info: %w", err)
|
||||
}
|
||||
desc.Size = cinfo.Size
|
||||
|
||||
@ -441,7 +440,7 @@ func (c *Converter) schema1ManifestHistory() ([]ocispec.History, []digest.Digest
|
||||
for i := range m.History {
|
||||
var h v1History
|
||||
if err := json.Unmarshal([]byte(m.History[i].V1Compatibility), &h); err != nil {
|
||||
return nil, nil, errors.Wrap(err, "failed to unmarshal history")
|
||||
return nil, nil, fmt.Errorf("failed to unmarshal history: %w", err)
|
||||
}
|
||||
|
||||
blobSum := m.FSLayers[i].BlobSum
|
||||
@ -553,7 +552,7 @@ func stripSignature(b []byte) ([]byte, error) {
|
||||
}
|
||||
pb, err := joseBase64UrlDecode(sig.Signatures[0].Protected)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not decode %s", sig.Signatures[0].Protected)
|
||||
return nil, fmt.Errorf("could not decode %s: %w", sig.Signatures[0].Protected, err)
|
||||
}
|
||||
|
||||
var protected protectedBlock
|
||||
@ -567,7 +566,7 @@ func stripSignature(b []byte) ([]byte, error) {
|
||||
|
||||
tail, err := joseBase64UrlDecode(protected.Tail)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "invalid tail base 64 value")
|
||||
return nil, fmt.Errorf("invalid tail base 64 value: %w", err)
|
||||
}
|
||||
|
||||
return append(b[:protected.Length], tail...), nil
|
||||
|
4
vendor/github.com/containerd/containerd/remotes/docker/status.go
generated
vendored
4
vendor/github.com/containerd/containerd/remotes/docker/status.go
generated
vendored
@ -17,12 +17,12 @@
|
||||
package docker
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sync"
|
||||
|
||||
"github.com/containerd/containerd/content"
|
||||
"github.com/containerd/containerd/errdefs"
|
||||
"github.com/moby/locker"
|
||||
"github.com/pkg/errors"
|
||||
)
|
||||
|
||||
// Status of a content operation
|
||||
@ -67,7 +67,7 @@ func (t *memoryStatusTracker) GetStatus(ref string) (Status, error) {
|
||||
defer t.m.Unlock()
|
||||
status, ok := t.statuses[ref]
|
||||
if !ok {
|
||||
return Status{}, errors.Wrapf(errdefs.ErrNotFound, "status for ref %v", ref)
|
||||
return Status{}, fmt.Errorf("status for ref %v: %w", ref, errdefs.ErrNotFound)
|
||||
}
|
||||
return status, nil
|
||||
}
|
||||
|
47
vendor/github.com/containerd/containerd/remotes/handlers.go
generated
vendored
47
vendor/github.com/containerd/containerd/remotes/handlers.go
generated
vendored
@ -18,6 +18,7 @@ package remotes
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
@ -29,7 +30,6 @@ import (
|
||||
"github.com/containerd/containerd/log"
|
||||
"github.com/containerd/containerd/platforms"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/sync/semaphore"
|
||||
)
|
||||
@ -127,13 +127,13 @@ func fetch(ctx context.Context, ingester content.Ingester, fetcher Fetcher, desc
|
||||
// most likely a poorly configured registry/web front end which responded with no
|
||||
// Content-Length header; unable (not to mention useless) to commit a 0-length entry
|
||||
// into the content store. Error out here otherwise the error sent back is confusing
|
||||
return errors.Wrapf(errdefs.ErrInvalidArgument, "unable to fetch descriptor (%s) which reports content size of zero", desc.Digest)
|
||||
return fmt.Errorf("unable to fetch descriptor (%s) which reports content size of zero: %w", desc.Digest, errdefs.ErrInvalidArgument)
|
||||
}
|
||||
if ws.Offset == desc.Size {
|
||||
// If writer is already complete, commit and return
|
||||
err := cw.Commit(ctx, desc.Size, desc.Digest)
|
||||
if err != nil && !errdefs.IsAlreadyExists(err) {
|
||||
return errors.Wrapf(err, "failed commit on ref %q", ws.Ref)
|
||||
return fmt.Errorf("failed commit on ref %q: %w", ws.Ref, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@ -243,8 +243,8 @@ func PushContent(ctx context.Context, pusher Pusher, desc ocispec.Descriptor, st
|
||||
// as a marker for this problem
|
||||
if (manifestStack[i].MediaType == ocispec.MediaTypeImageIndex ||
|
||||
manifestStack[i].MediaType == images.MediaTypeDockerSchema2ManifestList) &&
|
||||
errors.Cause(err) != nil && strings.Contains(errors.Cause(err).Error(), "400 Bad Request") {
|
||||
return errors.Wrap(err, "manifest list/index references to blobs and/or manifests are missing in your target registry")
|
||||
errors.Unwrap(err) != nil && strings.Contains(errors.Unwrap(err).Error(), "400 Bad Request") {
|
||||
return fmt.Errorf("manifest list/index references to blobs and/or manifests are missing in your target registry: %w", err)
|
||||
}
|
||||
return err
|
||||
}
|
||||
@ -253,6 +253,43 @@ func PushContent(ctx context.Context, pusher Pusher, desc ocispec.Descriptor, st
|
||||
return nil
|
||||
}
|
||||
|
||||
// SkipNonDistributableBlobs returns a handler that skips blobs that have a media type that is "non-distributeable".
|
||||
// An example of this kind of content would be a Windows base layer, which is not supposed to be redistributed.
|
||||
//
|
||||
// This is based on the media type of the content:
|
||||
// - application/vnd.oci.image.layer.nondistributable
|
||||
// - application/vnd.docker.image.rootfs.foreign
|
||||
func SkipNonDistributableBlobs(f images.HandlerFunc) images.HandlerFunc {
|
||||
return func(ctx context.Context, desc ocispec.Descriptor) ([]ocispec.Descriptor, error) {
|
||||
if images.IsNonDistributable(desc.MediaType) {
|
||||
log.G(ctx).WithField("digest", desc.Digest).WithField("mediatype", desc.MediaType).Debug("Skipping non-distributable blob")
|
||||
return nil, images.ErrSkipDesc
|
||||
}
|
||||
|
||||
if images.IsLayerType(desc.MediaType) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
children, err := f(ctx, desc)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(children) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
out := make([]ocispec.Descriptor, 0, len(children))
|
||||
for _, child := range children {
|
||||
if !images.IsNonDistributable(child.MediaType) {
|
||||
out = append(out, child)
|
||||
} else {
|
||||
log.G(ctx).WithField("digest", child.Digest).WithField("mediatype", child.MediaType).Debug("Skipping non-distributable blob")
|
||||
}
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
}
|
||||
|
||||
// FilterManifestByPlatformHandler allows Handler to handle non-target
|
||||
// platform's manifest and configuration data.
|
||||
func FilterManifestByPlatformHandler(f images.HandlerFunc, m platforms.Matcher) images.HandlerFunc {
|
||||
|
@ -18,6 +18,7 @@ package contentserver
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"sync"
|
||||
|
||||
@ -28,7 +29,6 @@ import (
|
||||
ptypes "github.com/gogo/protobuf/types"
|
||||
digest "github.com/opencontainers/go-digest"
|
||||
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/sirupsen/logrus"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
@ -383,7 +383,7 @@ func (s *service) Write(session api.Content_WriteServer) (err error) {
|
||||
|
||||
if req.Offset == 0 && ws.Offset > 0 {
|
||||
if err := wr.Truncate(req.Offset); err != nil {
|
||||
return errors.Wrapf(err, "truncate failed")
|
||||
return fmt.Errorf("truncate failed: %w", err)
|
||||
}
|
||||
msg.Offset = req.Offset
|
||||
}
|
||||
|
2
vendor/github.com/containerd/containerd/version/version.go
generated
vendored
2
vendor/github.com/containerd/containerd/version/version.go
generated
vendored
@ -23,7 +23,7 @@ var (
|
||||
Package = "github.com/containerd/containerd"
|
||||
|
||||
// Version holds the complete version number. Filled in at linking time.
|
||||
Version = "1.6.0-beta.3+unknown"
|
||||
Version = "1.6.0-rc.1+unknown"
|
||||
|
||||
// Revision is filled with the VCS (e.g. git) revision being used to build
|
||||
// the program at linking time.
|
||||
|
4
vendor/github.com/containerd/continuity/sysx/nodata_unix.go
generated
vendored
4
vendor/github.com/containerd/continuity/sysx/nodata_unix.go
generated
vendored
@ -1,5 +1,5 @@
|
||||
//go:build darwin || freebsd || openbsd
|
||||
// +build darwin freebsd openbsd
|
||||
//go:build !(linux || solaris || windows)
|
||||
// +build !linux,!solaris,!windows
|
||||
|
||||
/*
|
||||
Copyright The containerd Authors.
|
||||
|
Reference in New Issue
Block a user