OptKeyNameCanonical ImageExporterOptKey = "name-canonical"
diff --git a/vendor/github.com/moby/buildkit/exporter/containerimage/exptypes/parse.go b/vendor/github.com/moby/buildkit/exporter/containerimage/exptypes/parse.go
index cd2256b5..4d2b02f5 100644
--- a/vendor/github.com/moby/buildkit/exporter/containerimage/exptypes/parse.go
+++ b/vendor/github.com/moby/buildkit/exporter/containerimage/exptypes/parse.go
@@ -32,7 +32,7 @@ func ParsePlatforms(meta map[string][]byte) (Platforms, error) {
return ps, nil
}
- p := platforms.DefaultSpec()
+ var p ocispecs.Platform
if imgConfig, ok := meta[ExporterImageConfigKey]; ok {
var img ocispecs.Image
err := json.Unmarshal(imgConfig, &img)
@@ -51,6 +51,8 @@ func ParsePlatforms(meta map[string][]byte) (Platforms, error) {
} else if img.OS != "" || img.Architecture != "" {
return Platforms{}, errors.Errorf("invalid image config: os and architecture must be specified together")
}
+ } else {
+ p = platforms.DefaultSpec()
}
p = platforms.Normalize(p)
pk := platforms.FormatAll(p)
diff --git a/vendor/github.com/moby/buildkit/exporter/containerimage/exptypes/types.go b/vendor/github.com/moby/buildkit/exporter/containerimage/exptypes/types.go
index 056485b6..d7cc98b9 100644
--- a/vendor/github.com/moby/buildkit/exporter/containerimage/exptypes/types.go
+++ b/vendor/github.com/moby/buildkit/exporter/containerimage/exptypes/types.go
@@ -9,6 +9,7 @@ import (
const (
ExporterConfigDigestKey = "config.digest"
+ ExporterImageNameKey = "image.name"
ExporterImageDigestKey = "containerimage.digest"
ExporterImageConfigKey = "containerimage.config"
ExporterImageConfigDigestKey = "containerimage.config.digest"
diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/directives.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/directives.go
index 4f1e1300..b757e754 100644
--- a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/directives.go
+++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/directives.go
@@ -148,14 +148,16 @@ func parseDirective(key string, dt []byte, anyFormat bool) (string, string, []Ra
}
// use json directive, and search for { "key": "..." }
- jsonDirective := map[string]string{}
+ jsonDirective := map[string]any{}
if err := json.Unmarshal(dt, &jsonDirective); err == nil {
- if v, ok := jsonDirective[key]; ok {
- loc := []Range{{
- Start: Position{Line: line},
- End: Position{Line: line},
- }}
- return v, v, loc, true
+ if vAny, ok := jsonDirective[key]; ok {
+ if v, ok := vAny.(string); ok {
+ loc := []Range{{
+ Start: Position{Line: line},
+ End: Position{Line: line},
+ }}
+ return v, v, loc, true
+ }
}
}
diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/line_parsers.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/line_parsers.go
index f8d891c7..10497816 100644
--- a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/line_parsers.go
+++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/line_parsers.go
@@ -281,7 +281,7 @@ func parseJSON(rest string) (*Node, map[string]bool, error) {
return nil, nil, errDockerfileNotJSONArray
}
- var myJSON []interface{}
+ var myJSON []any
if err := json.Unmarshal([]byte(rest), &myJSON); err != nil {
return nil, nil, err
}
diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/parser.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/parser.go
index 1b0a9622..f1667a10 100644
--- a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/parser.go
+++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/parser.go
@@ -220,7 +220,7 @@ func init() {
// based on the command and command arguments. A Node is created from the
// result of the dispatch.
func newNodeFromLine(line string, d *directives, comments []string) (*Node, error) {
- cmd, flags, args, err := splitCommand(line)
+ cmd, flags, args, err := splitCommand(line, d)
if err != nil {
return nil, err
}
diff --git a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/split_command.go b/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/split_command.go
index d1c87522..87b73f24 100644
--- a/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/split_command.go
+++ b/vendor/github.com/moby/buildkit/frontend/dockerfile/parser/split_command.go
@@ -7,7 +7,7 @@ import (
// splitCommand takes a single line of text and parses out the cmd and args,
// which are used for dispatching to more exact parsing functions.
-func splitCommand(line string) (string, []string, string, error) {
+func splitCommand(line string, d *directives) (string, []string, string, error) {
var args string
var flags []string
@@ -16,7 +16,7 @@ func splitCommand(line string) (string, []string, string, error) {
if len(cmdline) == 2 {
var err error
- args, flags, err = extractBuilderFlags(cmdline[1])
+ args, flags, err = extractBuilderFlags(cmdline[1], d)
if err != nil {
return "", nil, "", err
}
@@ -25,7 +25,7 @@ func splitCommand(line string) (string, []string, string, error) {
return cmdline[0], flags, strings.TrimSpace(args), nil
}
-func extractBuilderFlags(line string) (string, []string, error) {
+func extractBuilderFlags(line string, d *directives) (string, []string, error) {
// Parses the BuilderFlags and returns the remaining part of the line
const (
@@ -87,7 +87,7 @@ func extractBuilderFlags(line string) (string, []string, error) {
phase = inQuote
continue
}
- if ch == '\\' {
+ if ch == d.escapeToken {
if pos+1 == len(line) {
continue // just skip \ at end
}
@@ -104,7 +104,7 @@ func extractBuilderFlags(line string) (string, []string, error) {
phase = inWord
continue
}
- if ch == '\\' {
+ if ch == d.escapeToken {
if pos+1 == len(line) {
phase = inWord
continue // just skip \ at end
diff --git a/vendor/github.com/moby/buildkit/frontend/dockerui/build.go b/vendor/github.com/moby/buildkit/frontend/dockerui/build.go
index cc93dfaf..fe2dbd05 100644
--- a/vendor/github.com/moby/buildkit/frontend/dockerui/build.go
+++ b/vendor/github.com/moby/buildkit/frontend/dockerui/build.go
@@ -4,6 +4,7 @@ import (
"context"
"encoding/json"
"fmt"
+ "slices"
"github.com/containerd/platforms"
"github.com/moby/buildkit/exporter/containerimage/exptypes"
@@ -54,23 +55,16 @@ func (bc *Client) Build(ctx context.Context, fn BuildFunc) (*ResultBuilder, erro
}
}
- p := platforms.DefaultSpec()
+ var p ocispecs.Platform
if tp != nil {
p = *tp
+ } else {
+ p = platforms.DefaultSpec()
}
- // in certain conditions we allow input platform to be extended from base image
- if p.OS == "windows" && img.OS == p.OS {
- if p.OSVersion == "" && img.OSVersion != "" {
- p.OSVersion = img.OSVersion
- }
- if p.OSFeatures == nil && len(img.OSFeatures) > 0 {
- p.OSFeatures = append([]string{}, img.OSFeatures...)
- }
- }
-
- p = platforms.Normalize(p)
k := platforms.FormatAll(p)
+ p = extendWindowsPlatform(p, img.Platform)
+ p = platforms.Normalize(p)
if bc.MultiPlatformRequested {
res.AddRef(k, ref)
@@ -126,3 +120,16 @@ func (rb *ResultBuilder) EachPlatform(ctx context.Context, fn func(ctx context.C
}
return eg.Wait()
}
+
+func extendWindowsPlatform(p, imgP ocispecs.Platform) ocispecs.Platform {
+ // in certain conditions we allow input platform to be extended from base image
+ if p.OS == "windows" && imgP.OS == p.OS {
+ if p.OSVersion == "" && imgP.OSVersion != "" {
+ p.OSVersion = imgP.OSVersion
+ }
+ if p.OSFeatures == nil && len(imgP.OSFeatures) > 0 {
+ p.OSFeatures = slices.Clone(imgP.OSFeatures)
+ }
+ }
+ return p
+}
diff --git a/vendor/github.com/moby/buildkit/frontend/dockerui/config.go b/vendor/github.com/moby/buildkit/frontend/dockerui/config.go
index ee8a2667..e16c6e05 100644
--- a/vendor/github.com/moby/buildkit/frontend/dockerui/config.go
+++ b/vendor/github.com/moby/buildkit/frontend/dockerui/config.go
@@ -148,9 +148,11 @@ func (bc *Client) BuildOpts() client.BuildOpts {
func (bc *Client) init() error {
opts := bc.bopts.Opts
- defaultBuildPlatform := platforms.Normalize(platforms.DefaultSpec())
+ var defaultBuildPlatform ocispecs.Platform
if workers := bc.bopts.Workers; len(workers) > 0 && len(workers[0].Platforms) > 0 {
defaultBuildPlatform = workers[0].Platforms[0]
+ } else {
+ defaultBuildPlatform = platforms.Normalize(platforms.DefaultSpec())
}
buildPlatforms := []ocispecs.Platform{defaultBuildPlatform}
targetPlatforms := []ocispecs.Platform{}
@@ -459,9 +461,11 @@ func (bc *Client) NamedContext(name string, opt ContextOpt) (*NamedContext, erro
}
name = strings.TrimSuffix(reference.FamiliarString(named), ":latest")
- pp := platforms.DefaultSpec()
+ var pp ocispecs.Platform
if opt.Platform != nil {
pp = *opt.Platform
+ } else {
+ pp = platforms.DefaultSpec()
}
pname := name + "::" + platforms.FormatAll(platforms.Normalize(pp))
nc, err := bc.namedContext(name, pname, opt)
diff --git a/vendor/github.com/moby/buildkit/frontend/subrequests/lint/lint.go b/vendor/github.com/moby/buildkit/frontend/subrequests/lint/lint.go
index 45a4c0ee..3b1e980e 100644
--- a/vendor/github.com/moby/buildkit/frontend/subrequests/lint/lint.go
+++ b/vendor/github.com/moby/buildkit/frontend/subrequests/lint/lint.go
@@ -134,7 +134,7 @@ func (results *LintResults) ToResult(scb SourceInfoMap) (*client.Result, error)
if len(results.Warnings) > 0 || results.Error != nil {
status = 1
}
- res.AddMeta("result.statuscode", []byte(fmt.Sprintf("%d", status)))
+ res.AddMeta("result.statuscode", fmt.Appendf(nil, "%d", status))
res.AddMeta("version", []byte(SubrequestLintDefinition.Version))
return res, nil
diff --git a/vendor/github.com/moby/buildkit/session/auth/authprovider/authprovider.go b/vendor/github.com/moby/buildkit/session/auth/authprovider/authprovider.go
index 0280e96d..54e458e5 100644
--- a/vendor/github.com/moby/buildkit/session/auth/authprovider/authprovider.go
+++ b/vendor/github.com/moby/buildkit/session/auth/authprovider/authprovider.go
@@ -45,7 +45,7 @@ type DockerAuthProviderConfig struct {
TLSConfigs map[string]*AuthTLSConfig
// ExpireCachedAuth is a function that returns true auth config should be refreshed
// instead of using a pre-cached result.
- // If nil then the cached result will expire after 10 minutes.
+ // If nil then the cached result will expire after 4 minutes and 50 seconds.
// The function is called with the time the cached auth config was created
// and the server URL the auth config is for.
ExpireCachedAuth func(created time.Time, serverURL string) bool
@@ -59,7 +59,8 @@ type authConfigCacheEntry struct {
func NewDockerAuthProvider(cfg DockerAuthProviderConfig) session.Attachable {
if cfg.ExpireCachedAuth == nil {
cfg.ExpireCachedAuth = func(created time.Time, _ string) bool {
- return time.Since(created) > 10*time.Minute
+ // Tokens for Google Artifact Registry via Workload Identity expire after 5 minutes.
+ return time.Since(created) > 4*time.Minute+50*time.Second
}
}
return &authProvider{
diff --git a/vendor/github.com/moby/buildkit/session/filesync/diffcopy.go b/vendor/github.com/moby/buildkit/session/filesync/diffcopy.go
index 1f276c77..edf6ce41 100644
--- a/vendor/github.com/moby/buildkit/session/filesync/diffcopy.go
+++ b/vendor/github.com/moby/buildkit/session/filesync/diffcopy.go
@@ -17,8 +17,8 @@ import (
type Stream interface {
Context() context.Context
- SendMsg(m interface{}) error
- RecvMsg(m interface{}) error
+ SendMsg(m any) error
+ RecvMsg(m any) error
}
func newStreamWriter(stream grpc.ClientStream) io.WriteCloser {
diff --git a/vendor/github.com/moby/buildkit/session/grpchijack/dial.go b/vendor/github.com/moby/buildkit/session/grpchijack/dial.go
index 25e3ddee..019c2bbf 100644
--- a/vendor/github.com/moby/buildkit/session/grpchijack/dial.go
+++ b/vendor/github.com/moby/buildkit/session/grpchijack/dial.go
@@ -32,8 +32,8 @@ func Dialer(api controlapi.ControlClient) session.Dialer {
type stream interface {
Context() context.Context
- SendMsg(m interface{}) error
- RecvMsg(m interface{}) error
+ SendMsg(m any) error
+ RecvMsg(m any) error
}
func streamToConn(stream stream) (net.Conn, <-chan struct{}) {
diff --git a/vendor/github.com/moby/buildkit/session/sshforward/copy.go b/vendor/github.com/moby/buildkit/session/sshforward/copy.go
index 804debd1..902063c5 100644
--- a/vendor/github.com/moby/buildkit/session/sshforward/copy.go
+++ b/vendor/github.com/moby/buildkit/session/sshforward/copy.go
@@ -9,8 +9,8 @@ import (
)
type Stream interface {
- SendMsg(m interface{}) error
- RecvMsg(m interface{}) error
+ SendMsg(m any) error
+ RecvMsg(m any) error
}
func Copy(ctx context.Context, conn io.ReadWriteCloser, stream Stream, closeStream func() error) error {
diff --git a/vendor/github.com/moby/buildkit/session/upload/uploadprovider/provider.go b/vendor/github.com/moby/buildkit/session/upload/uploadprovider/provider.go
index 2af434d8..96815302 100644
--- a/vendor/github.com/moby/buildkit/session/upload/uploadprovider/provider.go
+++ b/vendor/github.com/moby/buildkit/session/upload/uploadprovider/provider.go
@@ -63,23 +63,20 @@ type writer struct {
grpc.ServerStream
}
-func (w *writer) Write(dt []byte) (int, error) {
- // avoid sending too big messages on grpc stream
+func (w *writer) Write(dt []byte) (n int, err error) {
const maxChunkSize = 3 * 1024 * 1024
- if len(dt) > maxChunkSize {
- n1, err := w.Write(dt[:maxChunkSize])
- if err != nil {
- return n1, err
+ for len(dt) > 0 {
+ data := dt
+ if len(data) > maxChunkSize {
+ data = data[:maxChunkSize]
}
- dt = dt[maxChunkSize:]
- var n2 int
- if n2, err := w.Write(dt); err != nil {
- return n1 + n2, err
+
+ msg := &upload.BytesMessage{Data: data}
+ if err := w.SendMsg(msg); err != nil {
+ return n, err
}
- return n1 + n2, nil
+ n += len(data)
+ dt = dt[len(data):]
}
- if err := w.SendMsg(&upload.BytesMessage{Data: dt}); err != nil {
- return 0, err
- }
- return len(dt), nil
+ return n, nil
}
diff --git a/vendor/github.com/moby/buildkit/solver/errdefs/op.go b/vendor/github.com/moby/buildkit/solver/errdefs/op.go
index 6fe44976..519636b0 100644
--- a/vendor/github.com/moby/buildkit/solver/errdefs/op.go
+++ b/vendor/github.com/moby/buildkit/solver/errdefs/op.go
@@ -12,7 +12,7 @@ func (e *OpError) Unwrap() error {
return e.error
}
-func WithOp(err error, anyOp interface{}, opDesc map[string]string) error {
+func WithOp(err error, anyOp any, opDesc map[string]string) error {
op, ok := anyOp.(*pb.Op)
if err == nil || !ok {
return err
diff --git a/vendor/github.com/moby/buildkit/solver/errdefs/source.go b/vendor/github.com/moby/buildkit/solver/errdefs/source.go
index 06e3cd86..c599f667 100644
--- a/vendor/github.com/moby/buildkit/solver/errdefs/source.go
+++ b/vendor/github.com/moby/buildkit/solver/errdefs/source.go
@@ -98,10 +98,7 @@ func (s *Source) Print(w io.Writer) error {
func containsLine(rr []*pb.Range, l int) bool {
for _, r := range rr {
- e := r.End.Line
- if e < r.Start.Line {
- e = r.Start.Line
- }
+ e := max(r.End.Line, r.Start.Line)
if r.Start.Line <= int32(l) && e >= int32(l) {
return true
}
@@ -112,10 +109,7 @@ func containsLine(rr []*pb.Range, l int) bool {
func getStartEndLine(rr []*pb.Range) (start int, end int, ok bool) {
first := true
for _, r := range rr {
- e := r.End.Line
- if e < r.Start.Line {
- e = r.Start.Line
- }
+ e := max(r.End.Line, r.Start.Line)
if first || int(r.Start.Line) < start {
start = int(r.Start.Line)
}
diff --git a/vendor/github.com/moby/buildkit/solver/pb/attr.go b/vendor/github.com/moby/buildkit/solver/pb/attr.go
index 85e7cce6..473ac05e 100644
--- a/vendor/github.com/moby/buildkit/solver/pb/attr.go
+++ b/vendor/github.com/moby/buildkit/solver/pb/attr.go
@@ -20,6 +20,8 @@ const AttrHTTPFilename = "http.filename"
const AttrHTTPPerm = "http.perm"
const AttrHTTPUID = "http.uid"
const AttrHTTPGID = "http.gid"
+const AttrHTTPAuthHeaderSecret = "http.authheadersecret"
+const AttrHTTPHeaderPrefix = "http.header."
const AttrImageResolveMode = "image.resolvemode"
const AttrImageResolveModeDefault = "default"
diff --git a/vendor/github.com/moby/buildkit/solver/pb/caps.go b/vendor/github.com/moby/buildkit/solver/pb/caps.go
index 173791e4..16b546d7 100644
--- a/vendor/github.com/moby/buildkit/solver/pb/caps.go
+++ b/vendor/github.com/moby/buildkit/solver/pb/caps.go
@@ -31,9 +31,12 @@ const (
CapSourceGitSubdir apicaps.CapID = "source.git.subdir"
CapSourceHTTP apicaps.CapID = "source.http"
+ CapSourceHTTPAuth apicaps.CapID = "source.http.auth"
CapSourceHTTPChecksum apicaps.CapID = "source.http.checksum"
CapSourceHTTPPerm apicaps.CapID = "source.http.perm"
- CapSourceHTTPUIDGID apicaps.CapID = "soruce.http.uidgid"
+ // NOTE the historical typo
+ CapSourceHTTPUIDGID apicaps.CapID = "soruce.http.uidgid"
+ CapSourceHTTPHeader apicaps.CapID = "source.http.header"
CapSourceOCILayout apicaps.CapID = "source.ocilayout"
@@ -230,7 +233,7 @@ func init() {
})
Caps.Init(apicaps.Cap{
- ID: CapSourceOCILayout,
+ ID: CapSourceHTTPAuth,
Enabled: true,
Status: apicaps.CapStatusExperimental,
})
@@ -241,6 +244,18 @@ func init() {
Status: apicaps.CapStatusExperimental,
})
+ Caps.Init(apicaps.Cap{
+ ID: CapSourceHTTPHeader,
+ Enabled: true,
+ Status: apicaps.CapStatusExperimental,
+ })
+
+ Caps.Init(apicaps.Cap{
+ ID: CapSourceOCILayout,
+ Enabled: true,
+ Status: apicaps.CapStatusExperimental,
+ })
+
Caps.Init(apicaps.Cap{
ID: CapBuildOpLLBFileName,
Enabled: true,
diff --git a/vendor/github.com/moby/buildkit/solver/pb/platform.go b/vendor/github.com/moby/buildkit/solver/pb/platform.go
index b72bb4a1..9030615c 100644
--- a/vendor/github.com/moby/buildkit/solver/pb/platform.go
+++ b/vendor/github.com/moby/buildkit/solver/pb/platform.go
@@ -1,6 +1,8 @@
package pb
import (
+ "slices"
+
ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
)
@@ -12,7 +14,7 @@ func (p *Platform) Spec() ocispecs.Platform {
OSVersion: p.OSVersion,
}
if p.OSFeatures != nil {
- result.OSFeatures = append([]string{}, p.OSFeatures...)
+ result.OSFeatures = slices.Clone(p.OSFeatures)
}
return result
}
@@ -25,7 +27,7 @@ func PlatformFromSpec(p ocispecs.Platform) *Platform {
OSVersion: p.OSVersion,
}
if p.OSFeatures != nil {
- result.OSFeatures = append([]string{}, p.OSFeatures...)
+ result.OSFeatures = slices.Clone(p.OSFeatures)
}
return result
}
diff --git a/vendor/github.com/moby/buildkit/util/contentutil/source.go b/vendor/github.com/moby/buildkit/util/contentutil/source.go
index a12d60d1..4ee563b1 100644
--- a/vendor/github.com/moby/buildkit/util/contentutil/source.go
+++ b/vendor/github.com/moby/buildkit/util/contentutil/source.go
@@ -2,6 +2,7 @@ package contentutil
import (
"net/url"
+ "slices"
"strings"
"github.com/containerd/containerd/v2/core/content"
@@ -24,11 +25,8 @@ func HasSource(info content.Info, refspec reference.Spec) (bool, error) {
return false, nil
}
- for _, repo := range strings.Split(repoLabel, ",") {
- // the target repo is not a candidate
- if repo == target {
- return true, nil
- }
+ if slices.Contains(strings.Split(repoLabel, ","), target) {
+ return true, nil
}
return false, nil
}
diff --git a/vendor/github.com/moby/buildkit/util/flightcontrol/flightcontrol.go b/vendor/github.com/moby/buildkit/util/flightcontrol/flightcontrol.go
index faf8411b..2ed11a95 100644
--- a/vendor/github.com/moby/buildkit/util/flightcontrol/flightcontrol.go
+++ b/vendor/github.com/moby/buildkit/util/flightcontrol/flightcontrol.go
@@ -4,6 +4,7 @@ import (
"context"
"io"
"math/rand"
+ "slices"
"sort"
"sync"
"time"
@@ -211,7 +212,7 @@ func (c *call[T]) Err() error {
}
}
-func (c *call[T]) Value(key interface{}) interface{} {
+func (c *call[T]) Value(key any) any {
if key == contextKey {
return c.progressState
}
@@ -353,7 +354,7 @@ func (ps *progressState) close(pw progress.Writer) {
for i, w := range ps.writers {
if w == rw {
w.Close()
- ps.writers = append(ps.writers[:i], ps.writers[i+1:]...)
+ ps.writers = slices.Delete(ps.writers, i, i+1)
break
}
}
diff --git a/vendor/github.com/moby/buildkit/util/gitutil/git_cli.go b/vendor/github.com/moby/buildkit/util/gitutil/git_cli.go
index 23ff8aa4..39d43979 100644
--- a/vendor/github.com/moby/buildkit/util/gitutil/git_cli.go
+++ b/vendor/github.com/moby/buildkit/util/gitutil/git_cli.go
@@ -6,6 +6,7 @@ import (
"io"
"os"
"os/exec"
+ "slices"
"strings"
"github.com/pkg/errors"
@@ -120,7 +121,7 @@ func NewGitCLI(opts ...Option) *GitCLI {
// with the given options applied on top.
func (cli *GitCLI) New(opts ...Option) *GitCLI {
clone := *cli
- clone.args = append([]string{}, cli.args...)
+ clone.args = slices.Clone(cli.args)
for _, opt := range opts {
opt(&clone)
diff --git a/vendor/github.com/moby/buildkit/util/grpcerrors/intercept.go b/vendor/github.com/moby/buildkit/util/grpcerrors/intercept.go
index a5920789..a4c77495 100644
--- a/vendor/github.com/moby/buildkit/util/grpcerrors/intercept.go
+++ b/vendor/github.com/moby/buildkit/util/grpcerrors/intercept.go
@@ -10,7 +10,7 @@ import (
"google.golang.org/grpc"
)
-func UnaryServerInterceptor(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp interface{}, err error) {
+func UnaryServerInterceptor(ctx context.Context, req any, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp any, err error) {
resp, err = handler(ctx, req)
oldErr := err
if err != nil {
@@ -29,7 +29,7 @@ func UnaryServerInterceptor(ctx context.Context, req interface{}, info *grpc.Una
return resp, err
}
-func StreamServerInterceptor(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
+func StreamServerInterceptor(srv any, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
err := ToGRPC(ss.Context(), handler(srv, ss))
if err != nil {
stack.Helper()
@@ -37,7 +37,7 @@ func StreamServerInterceptor(srv interface{}, ss grpc.ServerStream, info *grpc.S
return err
}
-func UnaryClientInterceptor(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error {
+func UnaryClientInterceptor(ctx context.Context, method string, req, reply any, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error {
err := FromGRPC(invoker(ctx, method, req, reply, cc, opts...))
if err != nil {
stack.Helper()
diff --git a/vendor/github.com/moby/buildkit/util/progress/multiwriter.go b/vendor/github.com/moby/buildkit/util/progress/multiwriter.go
index 673f9ab5..e6b53ec7 100644
--- a/vendor/github.com/moby/buildkit/util/progress/multiwriter.go
+++ b/vendor/github.com/moby/buildkit/util/progress/multiwriter.go
@@ -16,7 +16,7 @@ type MultiWriter struct {
mu sync.Mutex
items []*Progress
writers map[rawProgressWriter]struct{}
- meta map[string]interface{}
+ meta map[string]any
}
var _ rawProgressWriter = &MultiWriter{}
@@ -24,7 +24,7 @@ var _ rawProgressWriter = &MultiWriter{}
func NewMultiWriter(opts ...WriterOption) *MultiWriter {
mw := &MultiWriter{
writers: map[rawProgressWriter]struct{}{},
- meta: map[string]interface{}{},
+ meta: map[string]any{},
}
for _, o := range opts {
o(mw)
@@ -70,7 +70,7 @@ func (ps *MultiWriter) Delete(pw Writer) {
ps.mu.Unlock()
}
-func (ps *MultiWriter) Write(id string, v interface{}) error {
+func (ps *MultiWriter) Write(id string, v any) error {
p := &Progress{
ID: id,
Timestamp: time.Now(),
@@ -83,7 +83,7 @@ func (ps *MultiWriter) Write(id string, v interface{}) error {
func (ps *MultiWriter) WriteRawProgress(p *Progress) error {
meta := p.meta
if len(ps.meta) > 0 {
- meta = map[string]interface{}{}
+ meta = map[string]any{}
maps.Copy(meta, p.meta)
for k, v := range ps.meta {
if _, ok := meta[k]; !ok {
diff --git a/vendor/github.com/moby/buildkit/util/progress/progress.go b/vendor/github.com/moby/buildkit/util/progress/progress.go
index 89b8a7da..f723bc93 100644
--- a/vendor/github.com/moby/buildkit/util/progress/progress.go
+++ b/vendor/github.com/moby/buildkit/util/progress/progress.go
@@ -67,7 +67,7 @@ func WithProgress(ctx context.Context, pw Writer) context.Context {
return context.WithValue(ctx, contextKey, pw)
}
-func WithMetadata(key string, val interface{}) WriterOption {
+func WithMetadata(key string, val any) WriterOption {
return func(w Writer) {
if pw, ok := w.(*progressWriter); ok {
pw.meta[key] = val
@@ -84,7 +84,7 @@ type Controller interface {
}
type Writer interface {
- Write(id string, value interface{}) error
+ Write(id string, value any) error
Close() error
}
@@ -95,8 +95,8 @@ type Reader interface {
type Progress struct {
ID string
Timestamp time.Time
- Sys interface{}
- meta map[string]interface{}
+ Sys any
+ meta map[string]any
}
type Status struct {
@@ -207,7 +207,7 @@ func pipe() (*progressReader, *progressWriter, func(error)) {
}
func newWriter(pw *progressWriter) *progressWriter {
- meta := make(map[string]interface{})
+ meta := make(map[string]any)
maps.Copy(meta, pw.meta)
pw = &progressWriter{
reader: pw.reader,
@@ -220,10 +220,10 @@ func newWriter(pw *progressWriter) *progressWriter {
type progressWriter struct {
done bool
reader *progressReader
- meta map[string]interface{}
+ meta map[string]any
}
-func (pw *progressWriter) Write(id string, v interface{}) error {
+func (pw *progressWriter) Write(id string, v any) error {
if pw.done {
return errors.Errorf("writing %s to closed progress writer", id)
}
@@ -238,7 +238,7 @@ func (pw *progressWriter) Write(id string, v interface{}) error {
func (pw *progressWriter) WriteRawProgress(p *Progress) error {
meta := p.meta
if len(pw.meta) > 0 {
- meta = map[string]interface{}{}
+ meta = map[string]any{}
maps.Copy(meta, p.meta)
for k, v := range pw.meta {
if _, ok := meta[k]; !ok {
@@ -267,14 +267,14 @@ func (pw *progressWriter) Close() error {
return nil
}
-func (p *Progress) Meta(key string) (interface{}, bool) {
+func (p *Progress) Meta(key string) (any, bool) {
v, ok := p.meta[key]
return v, ok
}
type noOpWriter struct{}
-func (pw *noOpWriter) Write(_ string, _ interface{}) error {
+func (pw *noOpWriter) Write(_ string, _ any) error {
return nil
}
diff --git a/vendor/github.com/moby/buildkit/util/progress/progressui/display.go b/vendor/github.com/moby/buildkit/util/progress/progressui/display.go
index c6ec68e6..89cf57f9 100644
--- a/vendor/github.com/moby/buildkit/util/progress/progressui/display.go
+++ b/vendor/github.com/moby/buildkit/util/progress/progressui/display.go
@@ -765,7 +765,7 @@ func (t *trace) update(s *client.SolveStatus, termWidth int) {
} else if sec < 100 {
prec = 2
}
- v.logs = append(v.logs, []byte(fmt.Sprintf("%s %s", fmt.Sprintf("%.[2]*[1]f", sec, prec), dt)))
+ v.logs = append(v.logs, fmt.Appendf(nil, "%s %s", fmt.Sprintf("%.[2]*[1]f", sec, prec), dt))
}
i++
})
@@ -787,7 +787,7 @@ func (t *trace) printErrorLogs(f io.Writer) {
}
// printer keeps last logs buffer
if v.logsBuffer != nil {
- for i := 0; i < v.logsBuffer.Len(); i++ {
+ for range v.logsBuffer.Len() {
if v.logsBuffer.Value != nil {
fmt.Fprintln(f, string(v.logsBuffer.Value.([]byte)))
}
@@ -1071,7 +1071,7 @@ func (disp *ttyDisplay) print(d displayInfo, width, height int, all bool) {
}
// override previous content
if diff := disp.lineCount - lineCount; diff > 0 {
- for i := 0; i < diff; i++ {
+ for range diff {
fmt.Fprintln(disp.c, strings.Repeat(" ", width))
}
fmt.Fprint(disp.c, aec.EmptyBuilder.Up(uint(diff)).Column(0).ANSI)
diff --git a/vendor/github.com/moby/buildkit/util/resolver/retryhandler/retry.go b/vendor/github.com/moby/buildkit/util/resolver/retryhandler/retry.go
index 69f39a30..63b9253b 100644
--- a/vendor/github.com/moby/buildkit/util/resolver/retryhandler/retry.go
+++ b/vendor/github.com/moby/buildkit/util/resolver/retryhandler/retry.go
@@ -33,7 +33,7 @@ func New(f images.HandlerFunc, logger func([]byte)) images.HandlerFunc {
}
}
if logger != nil {
- logger([]byte(fmt.Sprintf("error: %v\n", err.Error())))
+ logger(fmt.Appendf(nil, "error: %v\n", err.Error()))
}
} else {
return descs, nil
@@ -43,7 +43,7 @@ func New(f images.HandlerFunc, logger func([]byte)) images.HandlerFunc {
return nil, err
}
if logger != nil {
- logger([]byte(fmt.Sprintf("retrying in %v\n", backoff)))
+ logger(fmt.Appendf(nil, "retrying in %v\n", backoff))
}
time.Sleep(backoff)
backoff *= 2
diff --git a/vendor/github.com/moby/buildkit/util/system/path.go b/vendor/github.com/moby/buildkit/util/system/path.go
index 14f02b49..fdd41038 100644
--- a/vendor/github.com/moby/buildkit/util/system/path.go
+++ b/vendor/github.com/moby/buildkit/util/system/path.go
@@ -2,7 +2,6 @@ package system
import (
"path"
- "path/filepath"
"strings"
"github.com/pkg/errors"
@@ -46,7 +45,7 @@ func NormalizePath(parent, newPath, inputOS string, keepSlash bool) (string, err
}
var err error
- parent, err = CheckSystemDriveAndRemoveDriveLetter(parent, inputOS)
+ parent, err = CheckSystemDriveAndRemoveDriveLetter(parent, inputOS, keepSlash)
if err != nil {
return "", errors.Wrap(err, "removing drive letter")
}
@@ -61,7 +60,7 @@ func NormalizePath(parent, newPath, inputOS string, keepSlash bool) (string, err
newPath = parent
}
- newPath, err = CheckSystemDriveAndRemoveDriveLetter(newPath, inputOS)
+ newPath, err = CheckSystemDriveAndRemoveDriveLetter(newPath, inputOS, keepSlash)
if err != nil {
return "", errors.Wrap(err, "removing drive letter")
}
@@ -137,7 +136,7 @@ func IsAbs(pth, inputOS string) bool {
if inputOS == "" {
inputOS = "linux"
}
- cleanedPath, err := CheckSystemDriveAndRemoveDriveLetter(pth, inputOS)
+ cleanedPath, err := CheckSystemDriveAndRemoveDriveLetter(pth, inputOS, false)
if err != nil {
return false
}
@@ -174,7 +173,7 @@ func IsAbs(pth, inputOS string) bool {
// There is no sane way to support this without adding a lot of complexity
// which I am not sure is worth it.
// \\.\C$\a --> Fail
-func CheckSystemDriveAndRemoveDriveLetter(path string, inputOS string) (string, error) {
+func CheckSystemDriveAndRemoveDriveLetter(path string, inputOS string, keepSlash bool) (string, error) {
if inputOS == "" {
inputOS = "linux"
}
@@ -193,9 +192,10 @@ func CheckSystemDriveAndRemoveDriveLetter(path string, inputOS string) (string,
}
parts := strings.SplitN(path, ":", 2)
+
// Path does not have a drive letter. Just return it.
if len(parts) < 2 {
- return ToSlash(filepath.Clean(path), inputOS), nil
+ return ToSlash(cleanPath(path, inputOS, keepSlash), inputOS), nil
}
// We expect all paths to be in C:
@@ -220,5 +220,30 @@ func CheckSystemDriveAndRemoveDriveLetter(path string, inputOS string) (string,
//
// We must return the second element of the split path, as is, without attempting to convert
// it to an absolute path. We have no knowledge of the CWD; that is treated elsewhere.
- return ToSlash(filepath.Clean(parts[1]), inputOS), nil
+ return ToSlash(cleanPath(parts[1], inputOS, keepSlash), inputOS), nil
+}
+
+// An adaptation of filepath.Clean to allow an option to
+// retain the trailing slash, on either of the platforms.
+// See https://github.com/moby/buildkit/issues/5249
+func cleanPath(origPath, inputOS string, keepSlash bool) string {
+ // so as to handle cases like \\a\\b\\..\\c\\
+ // on Linux, when inputOS is Windows
+ origPath = ToSlash(origPath, inputOS)
+
+ if !keepSlash {
+ return path.Clean(origPath)
+ }
+
+ cleanedPath := path.Clean(origPath)
+ // Windows supports both \\ and / as path separator.
+ hasTrailingSlash := strings.HasSuffix(origPath, "/")
+ if inputOS == "windows" {
+ hasTrailingSlash = hasTrailingSlash || strings.HasSuffix(origPath, "\\")
+ }
+
+ if len(cleanedPath) > 1 && hasTrailingSlash {
+ return cleanedPath + "/"
+ }
+ return cleanedPath
}
diff --git a/vendor/github.com/moby/buildkit/util/system/path_unix.go b/vendor/github.com/moby/buildkit/util/system/path_unix.go
new file mode 100644
index 00000000..55391783
--- /dev/null
+++ b/vendor/github.com/moby/buildkit/util/system/path_unix.go
@@ -0,0 +1,17 @@
+//go:build !windows
+
+package system
+
+import "path/filepath"
+
+// IsAbsolutePath is just a wrapper that calls filepath.IsAbs.
+// Has been added here just for symmetry with Windows.
+func IsAbsolutePath(path string) bool {
+ return filepath.IsAbs(path)
+}
+
+// GetAbsolutePath does nothing on non-Windows, just returns
+// the same path.
+func GetAbsolutePath(path string) string {
+ return path
+}
diff --git a/vendor/github.com/moby/buildkit/util/system/path_windows.go b/vendor/github.com/moby/buildkit/util/system/path_windows.go
new file mode 100644
index 00000000..726d1f2d
--- /dev/null
+++ b/vendor/github.com/moby/buildkit/util/system/path_windows.go
@@ -0,0 +1,29 @@
+package system
+
+import (
+ "path/filepath"
+ "strings"
+)
+
+// DefaultSystemVolumeName is the default system volume label on Windows
+const DefaultSystemVolumeName = "C:"
+
+// IsAbsolutePath prepends the default system volume label
+// to the path that is presumed absolute, and then calls filepath.IsAbs
+func IsAbsolutePath(path string) bool {
+ path = filepath.Clean(path)
+ if strings.HasPrefix(path, "\\") {
+ path = DefaultSystemVolumeName + path
+ }
+ return filepath.IsAbs(path)
+}
+
+// GetAbsolutePath returns an absolute path rooted
+// to C:\\ on Windows.
+func GetAbsolutePath(path string) string {
+ path = filepath.Clean(path)
+ if len(path) >= 2 && strings.EqualFold(path[:2], DefaultSystemVolumeName) {
+ return path
+ }
+ return DefaultSystemVolumeName + path
+}
diff --git a/vendor/github.com/moby/buildkit/util/testutil/dockerd/client/client.go b/vendor/github.com/moby/buildkit/util/testutil/dockerd/client/client.go
new file mode 100644
index 00000000..dd6f8372
--- /dev/null
+++ b/vendor/github.com/moby/buildkit/util/testutil/dockerd/client/client.go
@@ -0,0 +1,248 @@
+package client
+
+import (
+ "context"
+ "crypto/tls"
+ "net"
+ "net/http"
+ "net/url"
+ "strings"
+ "time"
+
+ "github.com/pkg/errors"
+)
+
+// DummyHost is a hostname used for local communication.
+//
+// It acts as a valid formatted hostname for local connections (such as "unix://"
+// or "npipe://") which do not require a hostname. It should never be resolved,
+// but uses the special-purpose ".localhost" TLD (as defined in [RFC 2606, Section 2]
+// and [RFC 6761, Section 6.3]).
+//
+// [RFC 7230, Section 5.4] defines that an empty header must be used for such
+// cases:
+//
+// If the authority component is missing or undefined for the target URI,
+// then a client MUST send a Host header field with an empty field-value.
+//
+// However, [Go stdlib] enforces the semantics of HTTP(S) over TCP, does not
+// allow an empty header to be used, and requires req.URL.Scheme to be either
+// "http" or "https".
+//
+// For further details, refer to:
+//
+// - https://github.com/docker/engine-api/issues/189
+// - https://github.com/golang/go/issues/13624
+// - https://github.com/golang/go/issues/61076
+// - https://github.com/moby/moby/issues/45935
+//
+// [RFC 2606, Section 2]: https://www.rfc-editor.org/rfc/rfc2606.html#section-2
+// [RFC 6761, Section 6.3]: https://www.rfc-editor.org/rfc/rfc6761#section-6.3
+// [RFC 7230, Section 5.4]: https://datatracker.ietf.org/doc/html/rfc7230#section-5.4
+// [Go stdlib]: https://github.com/golang/go/blob/6244b1946bc2101b01955468f1be502dbadd6807/src/net/http/transport.go#L558-L569
+const DummyHost = "api.moby.localhost"
+
+// DefaultVersion is the pinned version of the docker API we utilize.
+const DefaultVersion = "1.47"
+
+// Client is the API client that performs all operations
+// against a docker server.
+type Client struct {
+ // scheme sets the scheme for the client
+ scheme string
+ // host holds the server address to connect to
+ host string
+ // proto holds the client protocol i.e. unix.
+ proto string
+ // addr holds the client address.
+ addr string
+ // basePath holds the path to prepend to the requests.
+ basePath string
+ // client used to send and receive http requests.
+ client *http.Client
+ // version of the server to talk to.
+ version string
+
+ // When the client transport is an *http.Transport (default) we need to do some extra things (like closing idle connections).
+ // Store the original transport as the http.Client transport will be wrapped with tracing libs.
+ baseTransport *http.Transport
+}
+
+// ErrRedirect is the error returned by checkRedirect when the request is non-GET.
+var ErrRedirect = errors.New("unexpected redirect in response")
+
+// CheckRedirect specifies the policy for dealing with redirect responses. It
+// can be set on [http.Client.CheckRedirect] to prevent HTTP redirects for
+// non-GET requests. It returns an [ErrRedirect] for non-GET request, otherwise
+// returns a [http.ErrUseLastResponse], which is special-cased by http.Client
+// to use the last response.
+//
+// Go 1.8 changed behavior for HTTP redirects (specifically 301, 307, and 308)
+// in the client. The client (and by extension API client) can be made to send
+// a request like "POST /containers//start" where what would normally be in the
+// name section of the URL is empty. This triggers an HTTP 301 from the daemon.
+//
+// In go 1.8 this 301 is converted to a GET request, and ends up getting
+// a 404 from the daemon. This behavior change manifests in the client in that
+// before, the 301 was not followed and the client did not generate an error,
+// but now results in a message like "Error response from daemon: page not found".
+func CheckRedirect(_ *http.Request, via []*http.Request) error {
+ if via[0].Method == http.MethodGet {
+ return http.ErrUseLastResponse
+ }
+ return ErrRedirect
+}
+
+// NewClientWithOpts initializes a new API client with a default HTTPClient, and
+// default API host and version. It also initializes the custom HTTP headers to
+// add to each request.
+//
+// It takes an optional list of [Opt] functional arguments, which are applied in
+// the order they're provided, which allows modifying the defaults when creating
+// the client. For example, the following initializes a client that configures
+// itself with values from environment variables ([FromEnv]), and has automatic
+// API version negotiation enabled ([WithAPIVersionNegotiation]).
+//
+// cli, err := client.NewClientWithOpts(
+// client.FromEnv,
+// client.WithAPIVersionNegotiation(),
+// )
+func NewClientWithOpts(ops ...Opt) (*Client, error) {
+ hostURL, err := ParseHostURL(DefaultDockerHost)
+ if err != nil {
+ return nil, err
+ }
+
+ client, err := defaultHTTPClient(hostURL)
+ if err != nil {
+ return nil, err
+ }
+ c := &Client{
+ host: DefaultDockerHost,
+ version: DefaultVersion,
+ client: client,
+ proto: hostURL.Scheme,
+ addr: hostURL.Host,
+ }
+
+ for _, op := range ops {
+ if err := op(c); err != nil {
+ return nil, err
+ }
+ }
+
+ if tr, ok := c.client.Transport.(*http.Transport); ok {
+ // Store the base transport before we wrap it in tracing libs below
+ // This is used, as an example, to close idle connections when the client is closed
+ c.baseTransport = tr
+ }
+
+ if c.scheme == "" {
+ // TODO(stevvooe): This isn't really the right way to write clients in Go.
+ // `NewClient` should probably only take an `*http.Client` and work from there.
+ // Unfortunately, the model of having a host-ish/url-thingy as the connection
+ // string has us confusing protocol and transport layers. We continue doing
+ // this to avoid breaking existing clients but this should be addressed.
+ if c.tlsConfig() != nil {
+ c.scheme = "https"
+ } else {
+ c.scheme = "http"
+ }
+ }
+ return c, nil
+}
+
+func (cli *Client) tlsConfig() *tls.Config {
+ if cli.baseTransport == nil {
+ return nil
+ }
+ return cli.baseTransport.TLSClientConfig
+}
+
+func defaultHTTPClient(hostURL *url.URL) (*http.Client, error) {
+ // Necessary to prevent long-lived processes using the
+ // client from leaking connections due to idle connections
+ // not being released.
+ transport := &http.Transport{
+ MaxIdleConns: 6,
+ IdleConnTimeout: 30 * time.Second,
+ }
+ if err := configureTransport(transport, hostURL.Scheme, hostURL.Host); err != nil {
+ return nil, err
+ }
+ return &http.Client{
+ Transport: transport,
+ CheckRedirect: CheckRedirect,
+ }, nil
+}
+
+// Close the transport used by the client
+func (cli *Client) Close() error {
+ if cli.baseTransport != nil {
+ cli.baseTransport.CloseIdleConnections()
+ return nil
+ }
+ return nil
+}
+
+// ParseHostURL parses a url string, validates the string is a host url, and
+// returns the parsed URL
+func ParseHostURL(host string) (*url.URL, error) {
+ proto, addr, ok := strings.Cut(host, "://")
+ if !ok || addr == "" {
+ return nil, errors.Errorf("unable to parse docker host `%s`", host)
+ }
+
+ var basePath string
+ if proto == "tcp" {
+ parsed, err := url.Parse("tcp://" + addr)
+ if err != nil {
+ return nil, err
+ }
+ addr = parsed.Host
+ basePath = parsed.Path
+ }
+ return &url.URL{
+ Scheme: proto,
+ Host: addr,
+ Path: basePath,
+ }, nil
+}
+
+func (cli *Client) dialerFromTransport() func(context.Context, string, string) (net.Conn, error) {
+ if cli.baseTransport == nil || cli.baseTransport.DialContext == nil {
+ return nil
+ }
+
+ if cli.baseTransport.TLSClientConfig != nil {
+ // When using a tls config we don't use the configured dialer but instead a fallback dialer...
+ // Note: It seems like this should use the normal dialer and wrap the returned net.Conn in a tls.Conn
+ // I honestly don't know why it doesn't do that, but it doesn't and such a change is entirely unrelated to the change in this commit.
+ return nil
+ }
+ return cli.baseTransport.DialContext
+}
+
+// Dialer returns a dialer for a raw stream connection, with an HTTP/1.1 header,
+// that can be used for proxying the daemon connection. It is used by
+// ["docker dial-stdio"].
+//
+// ["docker dial-stdio"]: https://github.com/docker/cli/pull/1014
+func (cli *Client) Dialer() func(context.Context) (net.Conn, error) {
+ return func(ctx context.Context) (net.Conn, error) {
+ if dialFn := cli.dialerFromTransport(); dialFn != nil {
+ return dialFn(ctx, cli.proto, cli.addr)
+ }
+ switch cli.proto {
+ case "unix":
+ return net.Dial(cli.proto, cli.addr)
+ case "npipe":
+ return DialPipe(cli.addr, 32*time.Second)
+ default:
+ if tlsConfig := cli.tlsConfig(); tlsConfig != nil {
+ return tls.Dial(cli.proto, cli.addr, tlsConfig)
+ }
+ return net.Dial(cli.proto, cli.addr)
+ }
+ }
+}
diff --git a/vendor/github.com/moby/buildkit/util/testutil/dockerd/client/client_unix.go b/vendor/github.com/moby/buildkit/util/testutil/dockerd/client/client_unix.go
new file mode 100644
index 00000000..e5b921b4
--- /dev/null
+++ b/vendor/github.com/moby/buildkit/util/testutil/dockerd/client/client_unix.go
@@ -0,0 +1,7 @@
+//go:build !windows
+
+package client
+
+// DefaultDockerHost defines OS-specific default host if the DOCKER_HOST
+// (EnvOverrideHost) environment variable is unset or empty.
+const DefaultDockerHost = "unix:///var/run/docker.sock"
diff --git a/vendor/github.com/moby/buildkit/util/testutil/dockerd/client/client_windows.go b/vendor/github.com/moby/buildkit/util/testutil/dockerd/client/client_windows.go
new file mode 100644
index 00000000..19b954b2
--- /dev/null
+++ b/vendor/github.com/moby/buildkit/util/testutil/dockerd/client/client_windows.go
@@ -0,0 +1,5 @@
+package client
+
+// DefaultDockerHost defines OS-specific default host if the DOCKER_HOST
+// (EnvOverrideHost) environment variable is unset or empty.
+const DefaultDockerHost = "npipe:////./pipe/docker_engine"
diff --git a/vendor/github.com/moby/buildkit/util/testutil/dockerd/client/errors.go b/vendor/github.com/moby/buildkit/util/testutil/dockerd/client/errors.go
new file mode 100644
index 00000000..84d79781
--- /dev/null
+++ b/vendor/github.com/moby/buildkit/util/testutil/dockerd/client/errors.go
@@ -0,0 +1,13 @@
+package client
+
+import (
+ "github.com/pkg/errors"
+)
+
+// ErrorConnectionFailed returns an error with host in the error message when connection to docker daemon failed.
+func ErrorConnectionFailed(host string) error {
+ if host == "" {
+ return errors.New("Cannot connect to the Docker daemon. Is the docker daemon running on this host?")
+ }
+ return errors.Errorf("Cannot connect to the Docker daemon at %s. Is the docker daemon running?", host)
+}
diff --git a/vendor/github.com/moby/buildkit/util/testutil/dockerd/client/hijack.go b/vendor/github.com/moby/buildkit/util/testutil/dockerd/client/hijack.go
new file mode 100644
index 00000000..36e5e473
--- /dev/null
+++ b/vendor/github.com/moby/buildkit/util/testutil/dockerd/client/hijack.go
@@ -0,0 +1,118 @@
+package client
+
+import (
+ "bufio"
+ "context"
+ "net"
+ "net/http"
+ "time"
+
+ "github.com/pkg/errors"
+)
+
+// DialHijack returns a hijacked connection with negotiated protocol proto.
+func (cli *Client) DialHijack(ctx context.Context, url, proto string, meta map[string][]string) (net.Conn, error) {
+ req, err := http.NewRequestWithContext(ctx, http.MethodPost, url, nil)
+ if err != nil {
+ return nil, err
+ }
+
+ conn, _, err := cli.setupHijackConn(req, proto)
+ return conn, err
+}
+
+func (cli *Client) setupHijackConn(req *http.Request, proto string) (_ net.Conn, _ string, retErr error) {
+ ctx := req.Context()
+ req.Header.Set("Connection", "Upgrade")
+ req.Header.Set("Upgrade", proto)
+
+ dialer := cli.Dialer()
+ conn, err := dialer(ctx)
+ if err != nil {
+ return nil, "", errors.Wrap(err, "cannot connect to the Docker daemon. Is 'docker daemon' running on this host?")
+ }
+ defer func() {
+ if retErr != nil {
+ conn.Close()
+ }
+ }()
+
+ // When we set up a TCP connection for hijack, there could be long periods
+ // of inactivity (a long running command with no output) that in certain
+ // network setups may cause ECONNTIMEOUT, leaving the client in an unknown
+ // state. Setting TCP KeepAlive on the socket connection will prohibit
+ // ECONNTIMEOUT unless the socket connection truly is broken
+ if tcpConn, ok := conn.(*net.TCPConn); ok {
+ _ = tcpConn.SetKeepAlive(true)
+ _ = tcpConn.SetKeepAlivePeriod(30 * time.Second)
+ }
+
+ hc := &hijackedConn{conn, bufio.NewReader(conn)}
+
+ // Server hijacks the connection, error 'connection closed' expected
+ resp, err := hc.RoundTrip(req)
+ if err != nil {
+ return nil, "", err
+ }
+ if resp.StatusCode != http.StatusSwitchingProtocols {
+ _ = resp.Body.Close()
+ return nil, "", errors.Errorf("unable to upgrade to %s, received %d", proto, resp.StatusCode)
+ }
+
+ if hc.r.Buffered() > 0 {
+ // If there is buffered content, wrap the connection. We return an
+ // object that implements CloseWrite if the underlying connection
+ // implements it.
+ if _, ok := hc.Conn.(CloseWriter); ok {
+ conn = &hijackedConnCloseWriter{hc}
+ } else {
+ conn = hc
+ }
+ } else {
+ hc.r.Reset(nil)
+ }
+
+ mediaType := resp.Header.Get("Content-Type")
+ return conn, mediaType, nil
+}
+
+// CloseWriter is an interface that implements structs
+// that close input streams to prevent from writing.
+type CloseWriter interface {
+ CloseWrite() error
+}
+
+// hijackedConn wraps a net.Conn and is returned by setupHijackConn in the case
+// that a) there was already buffered data in the http layer when Hijack() was
+// called, and b) the underlying net.Conn does *not* implement CloseWrite().
+// hijackedConn does not implement CloseWrite() either.
+type hijackedConn struct {
+ net.Conn
+ r *bufio.Reader
+}
+
+func (c *hijackedConn) RoundTrip(req *http.Request) (*http.Response, error) {
+ if err := req.Write(c.Conn); err != nil {
+ return nil, err
+ }
+ return http.ReadResponse(c.r, req)
+}
+
+func (c *hijackedConn) Read(b []byte) (int, error) {
+ return c.r.Read(b)
+}
+
+// hijackedConnCloseWriter is a hijackedConn which additionally implements
+// CloseWrite(). It is returned by setupHijackConn in the case that a) there
+// was already buffered data in the http layer when Hijack() was called, and b)
+// the underlying net.Conn *does* implement CloseWrite().
+type hijackedConnCloseWriter struct {
+ *hijackedConn
+}
+
+var _ CloseWriter = &hijackedConnCloseWriter{}
+
+func (c *hijackedConnCloseWriter) CloseWrite() error {
+ conn := c.Conn.(CloseWriter)
+ return conn.CloseWrite()
+}
diff --git a/vendor/github.com/moby/buildkit/util/testutil/dockerd/client/options.go b/vendor/github.com/moby/buildkit/util/testutil/dockerd/client/options.go
new file mode 100644
index 00000000..6ae8bb81
--- /dev/null
+++ b/vendor/github.com/moby/buildkit/util/testutil/dockerd/client/options.go
@@ -0,0 +1,28 @@
+package client
+
+import (
+ "net/http"
+
+ "github.com/pkg/errors"
+)
+
+// Opt is a configuration option to initialize a [Client].
+type Opt func(*Client) error
+
+// WithHost overrides the client host with the specified one.
+func WithHost(host string) Opt {
+ return func(c *Client) error {
+ hostURL, err := ParseHostURL(host)
+ if err != nil {
+ return err
+ }
+ c.host = host
+ c.proto = hostURL.Scheme
+ c.addr = hostURL.Host
+ c.basePath = hostURL.Path
+ if transport, ok := c.client.Transport.(*http.Transport); ok {
+ return configureTransport(transport, c.proto, c.addr)
+ }
+ return errors.Errorf("cannot apply host to transport: %T", c.client.Transport)
+ }
+}
diff --git a/vendor/github.com/moby/buildkit/util/testutil/dockerd/client/ping.go b/vendor/github.com/moby/buildkit/util/testutil/dockerd/client/ping.go
new file mode 100644
index 00000000..9dffb3fe
--- /dev/null
+++ b/vendor/github.com/moby/buildkit/util/testutil/dockerd/client/ping.go
@@ -0,0 +1,25 @@
+package client
+
+import (
+ "context"
+ "net/http"
+ "path"
+)
+
+type PingResponse struct{}
+
+func (cli *Client) Ping(ctx context.Context) error {
+ // Using cli.buildRequest() + cli.doRequest() instead of cli.sendRequest()
+ // because ping requests are used during API version negotiation, so we want
+ // to hit the non-versioned /_ping endpoint, not /v1.xx/_ping
+ req, err := cli.buildRequest(ctx, http.MethodHead, path.Join(cli.basePath, "/_ping"), nil, nil)
+ if err != nil {
+ return err
+ }
+ serverResp, err := cli.doRequest(req)
+ if err != nil {
+ return err
+ }
+ defer ensureReaderClosed(serverResp)
+ return cli.checkResponseErr(serverResp)
+}
diff --git a/vendor/github.com/moby/buildkit/util/testutil/dockerd/client/request.go b/vendor/github.com/moby/buildkit/util/testutil/dockerd/client/request.go
new file mode 100644
index 00000000..47dc9ec0
--- /dev/null
+++ b/vendor/github.com/moby/buildkit/util/testutil/dockerd/client/request.go
@@ -0,0 +1,162 @@
+//nolint:forbidigo
+package client
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "io"
+ "net"
+ "net/http"
+ "net/url"
+ "os"
+ "strings"
+
+ "github.com/pkg/errors"
+)
+
+func (cli *Client) buildRequest(ctx context.Context, method, path string, body io.Reader, headers http.Header) (*http.Request, error) {
+ req, err := http.NewRequestWithContext(ctx, method, path, body)
+ if err != nil {
+ return nil, err
+ }
+ req = cli.addHeaders(req, headers)
+ req.URL.Scheme = cli.scheme
+ req.URL.Host = cli.addr
+
+ if cli.proto == "unix" || cli.proto == "npipe" {
+ // Override host header for non-tcp connections.
+ req.Host = DummyHost
+ }
+
+ if body != nil && req.Header.Get("Content-Type") == "" {
+ req.Header.Set("Content-Type", "text/plain")
+ }
+ return req, nil
+}
+
+func (cli *Client) doRequest(req *http.Request) (*http.Response, error) {
+ resp, err := cli.client.Do(req)
+ if err != nil {
+ if cli.scheme != "https" && strings.Contains(err.Error(), "malformed HTTP response") {
+ return nil, errors.Errorf("%v.\n* Are you trying to connect to a TLS-enabled daemon without TLS?", err)
+ }
+
+ if cli.scheme == "https" && strings.Contains(err.Error(), "bad certificate") {
+ return nil, errors.Wrap(err, "the server probably has client authentication (--tlsverify) enabled; check your TLS client certification settings")
+ }
+
+ // Don't decorate context sentinel errors; users may be comparing to
+ // them directly.
+ if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
+ return nil, err
+ }
+
+ if uErr, ok := err.(*url.Error); ok {
+ if nErr, ok := uErr.Err.(*net.OpError); ok {
+ if os.IsPermission(nErr.Err) {
+ return nil, errors.Wrapf(err, "permission denied while trying to connect to the Docker daemon socket at %v", cli.host)
+ }
+ }
+ }
+
+ if nErr, ok := err.(net.Error); ok {
+ // FIXME(thaJeztah): any net.Error should be considered a connection error (but we should include the original error)?
+ if nErr.Timeout() {
+ return nil, ErrorConnectionFailed(cli.host)
+ }
+ if strings.Contains(nErr.Error(), "connection refused") || strings.Contains(nErr.Error(), "dial unix") {
+ return nil, ErrorConnectionFailed(cli.host)
+ }
+ }
+
+ // Although there's not a strongly typed error for this in go-winio,
+ // lots of people are using the default configuration for the docker
+ // daemon on Windows where the daemon is listening on a named pipe
+ // `//./pipe/docker_engine, and the client must be running elevated.
+ // Give users a clue rather than the not-overly useful message
+ // such as `error during connect: Get http://%2F%2F.%2Fpipe%2Fdocker_engine/v1.26/info:
+ // open //./pipe/docker_engine: The system cannot find the file specified.`.
+ // Note we can't string compare "The system cannot find the file specified" as
+ // this is localised - for example in French the error would be
+ // `open //./pipe/docker_engine: Le fichier spécifié est introuvable.`
+ if strings.Contains(err.Error(), `open //./pipe/docker_engine`) {
+ // Checks if client is running with elevated privileges
+ if f, elevatedErr := os.Open(`\\.\PHYSICALDRIVE0`); elevatedErr != nil {
+ err = errors.Wrap(err, "in the default daemon configuration on Windows, the docker client must be run with elevated privileges to connect")
+ } else {
+ _ = f.Close()
+ err = errors.Wrap(err, "this error may indicate that the docker daemon is not running")
+ }
+ }
+
+ return nil, errors.Wrap(err, "error during connect")
+ }
+ return resp, nil
+}
+
+func (cli *Client) checkResponseErr(serverResp *http.Response) error {
+ if serverResp == nil {
+ return nil
+ }
+ if serverResp.StatusCode >= 200 && serverResp.StatusCode < 400 {
+ return nil
+ }
+
+ var body []byte
+ var err error
+ var reqURL string
+ if serverResp.Request != nil {
+ reqURL = serverResp.Request.URL.String()
+ }
+ statusMsg := serverResp.Status
+ if statusMsg == "" {
+ statusMsg = http.StatusText(serverResp.StatusCode)
+ }
+ if serverResp.Body != nil {
+ bodyMax := 1 * 1024 * 1024 // 1 MiB
+ bodyR := &io.LimitedReader{
+ R: serverResp.Body,
+ N: int64(bodyMax),
+ }
+ body, err = io.ReadAll(bodyR)
+ if err != nil {
+ return err
+ }
+ if bodyR.N == 0 {
+ return fmt.Errorf("request returned %s with a message (> %d bytes) for API route and version %s, check if the server supports the requested API version", statusMsg, bodyMax, reqURL)
+ }
+ }
+ if len(body) == 0 {
+ return fmt.Errorf("request returned %s for API route and version %s, check if the server supports the requested API version", statusMsg, reqURL)
+ }
+
+ var daemonErr error
+ if serverResp.Header.Get("Content-Type") == "application/json" {
+ var errorResponse struct {
+ Message string `json:"message"`
+ }
+ if err := json.Unmarshal(body, &errorResponse); err != nil {
+ return errors.Wrap(err, "Error reading JSON")
+ }
+ daemonErr = errors.New(strings.TrimSpace(errorResponse.Message))
+ } else {
+ daemonErr = errors.New(strings.TrimSpace(string(body)))
+ }
+ return errors.Wrap(daemonErr, "Error response from daemon")
+}
+
+func (cli *Client) addHeaders(req *http.Request, headers http.Header) *http.Request {
+ for k, v := range headers {
+ req.Header[http.CanonicalHeaderKey(k)] = v
+ }
+ return req
+}
+
+func ensureReaderClosed(response *http.Response) {
+ if response.Body != nil {
+ // Drain up to 512 bytes and close the body to let the Transport reuse the connection
+ _, _ = io.CopyN(io.Discard, response.Body, 512)
+ _ = response.Body.Close()
+ }
+}
diff --git a/vendor/github.com/moby/buildkit/util/testutil/dockerd/client/sockets.go b/vendor/github.com/moby/buildkit/util/testutil/dockerd/client/sockets.go
new file mode 100644
index 00000000..d3aeb5bf
--- /dev/null
+++ b/vendor/github.com/moby/buildkit/util/testutil/dockerd/client/sockets.go
@@ -0,0 +1,32 @@
+package client
+
+import (
+ "net"
+ "net/http"
+ "time"
+)
+
+const defaultTimeout = 10 * time.Second
+
+// configureTransport configures the specified [http.Transport] according to the specified proto
+// and addr.
+//
+// If the proto is unix (using a unix socket to communicate) or npipe the compression is disabled.
+// For other protos, compression is enabled. If you want to manually enable/disable compression,
+// make sure you do it _after_ any subsequent calls to ConfigureTransport is made against the same
+// [http.Transport].
+func configureTransport(tr *http.Transport, proto, addr string) error {
+ switch proto {
+ case "unix":
+ return configureUnixTransport(tr, proto, addr)
+ case "npipe":
+ return configureNpipeTransport(tr, proto, addr)
+ default:
+ tr.Proxy = http.ProxyFromEnvironment
+ tr.DisableCompression = false
+ tr.DialContext = (&net.Dialer{
+ Timeout: defaultTimeout,
+ }).DialContext
+ }
+ return nil
+}
diff --git a/vendor/github.com/moby/buildkit/util/testutil/dockerd/client/sockets_unix.go b/vendor/github.com/moby/buildkit/util/testutil/dockerd/client/sockets_unix.go
new file mode 100644
index 00000000..04cd0022
--- /dev/null
+++ b/vendor/github.com/moby/buildkit/util/testutil/dockerd/client/sockets_unix.go
@@ -0,0 +1,40 @@
+//go:build !windows
+
+package client
+
+import (
+ "context"
+ "net"
+ "net/http"
+ "syscall"
+ "time"
+
+ "github.com/pkg/errors"
+)
+
+const maxUnixSocketPathSize = len(syscall.RawSockaddrUnix{}.Path)
+
+func configureUnixTransport(tr *http.Transport, proto, addr string) error {
+ if len(addr) > maxUnixSocketPathSize {
+ return errors.Errorf("unix socket path %q is too long", addr)
+ }
+ // No need for compression in local communications.
+ tr.DisableCompression = true
+ dialer := &net.Dialer{
+ Timeout: defaultTimeout,
+ }
+ tr.DialContext = func(ctx context.Context, _, _ string) (net.Conn, error) {
+ return dialer.DialContext(ctx, proto, addr)
+ }
+ return nil
+}
+
+func configureNpipeTransport(_ *http.Transport, _, _ string) error {
+ return errors.New("protocol not available")
+}
+
+// DialPipe connects to a Windows named pipe.
+// This is not supported on other OSes.
+func DialPipe(_ string, _ time.Duration) (net.Conn, error) {
+ return nil, syscall.EAFNOSUPPORT
+}
diff --git a/vendor/github.com/moby/buildkit/util/testutil/dockerd/client/sockets_windows.go b/vendor/github.com/moby/buildkit/util/testutil/dockerd/client/sockets_windows.go
new file mode 100644
index 00000000..111e3e23
--- /dev/null
+++ b/vendor/github.com/moby/buildkit/util/testutil/dockerd/client/sockets_windows.go
@@ -0,0 +1,29 @@
+package client
+
+import (
+ "context"
+ "net"
+ "net/http"
+ "time"
+
+ "github.com/Microsoft/go-winio"
+ "github.com/pkg/errors"
+)
+
+func configureUnixTransport(_ *http.Transport, _, _ string) error {
+ return errors.New("protocol not available")
+}
+
+func configureNpipeTransport(tr *http.Transport, _, addr string) error {
+ // No need for compression in local communications.
+ tr.DisableCompression = true
+ tr.DialContext = func(ctx context.Context, _, _ string) (net.Conn, error) {
+ return winio.DialPipeContext(ctx, addr)
+ }
+ return nil
+}
+
+// DialPipe connects to a Windows named pipe.
+func DialPipe(addr string, timeout time.Duration) (net.Conn, error) {
+ return winio.DialPipe(addr, &timeout)
+}
diff --git a/vendor/github.com/moby/buildkit/util/testutil/dockerd/daemon.go b/vendor/github.com/moby/buildkit/util/testutil/dockerd/daemon.go
index f4684216..c2523adf 100644
--- a/vendor/github.com/moby/buildkit/util/testutil/dockerd/daemon.go
+++ b/vendor/github.com/moby/buildkit/util/testutil/dockerd/daemon.go
@@ -16,12 +16,12 @@ import (
)
type LogT interface {
- Logf(string, ...interface{})
+ Logf(string, ...any)
}
type nopLog struct{}
-func (nopLog) Logf(string, ...interface{}) {}
+func (nopLog) Logf(string, ...any) {}
const (
shortLen = 12
@@ -73,7 +73,7 @@ func NewDaemon(workingDir string, ops ...Option) (*Daemon, error) {
dockerdBinary: DefaultDockerdBinary,
Log: nopLog{},
sockPath: filepath.Join(sockRoot, id+".sock"),
- envs: append([]string{}, os.Environ()...),
+ envs: os.Environ(),
}
for _, op := range ops {
diff --git a/vendor/github.com/moby/buildkit/util/testutil/integration/run.go b/vendor/github.com/moby/buildkit/util/testutil/integration/run.go
index 2c081029..fefbb167 100644
--- a/vendor/github.com/moby/buildkit/util/testutil/integration/run.go
+++ b/vendor/github.com/moby/buildkit/util/testutil/integration/run.go
@@ -5,6 +5,7 @@ import (
"context"
"fmt"
"maps"
+ "math"
"math/rand"
"os"
"os/exec"
@@ -12,6 +13,7 @@ import (
"reflect"
"runtime"
"sort"
+ "strconv"
"strings"
"sync"
"testing"
@@ -58,7 +60,7 @@ type Sandbox interface {
PrintLogs(*testing.T)
ClearLogs()
NewRegistry() (string, error)
- Value(string) interface{} // chosen matrix value
+ Value(string) any // chosen matrix value
Name() string
CDISpecDir() string
}
@@ -129,10 +131,10 @@ func List() []Worker {
// tests.
type TestOpt func(*testConf)
-func WithMatrix(key string, m map[string]interface{}) TestOpt {
+func WithMatrix(key string, m map[string]any) TestOpt {
return func(tc *testConf) {
if tc.matrix == nil {
- tc.matrix = map[string]map[string]interface{}{}
+ tc.matrix = map[string]map[string]any{}
}
tc.matrix[key] = m
}
@@ -148,7 +150,7 @@ func WithMirroredImages(m map[string]string) TestOpt {
}
type testConf struct {
- matrix map[string]map[string]interface{}
+ matrix map[string]map[string]any
mirroredImages map[string]string
}
@@ -161,6 +163,29 @@ func Run(t *testing.T, testCases []Test, opt ...TestOpt) {
t.Skip("skipping integration tests")
}
+ var sliceSplit int
+ if filter, ok := lookupTestFilter(); ok {
+ parts := strings.Split(filter, "/")
+ if len(parts) >= 2 {
+ const prefix = "slice="
+ if strings.HasPrefix(parts[1], prefix) {
+ conf := strings.TrimPrefix(parts[1], prefix)
+ offsetS, totalS, ok := strings.Cut(conf, "-")
+ if !ok {
+ t.Fatalf("invalid slice=%q", conf)
+ }
+ offset, err := strconv.Atoi(offsetS)
+ require.NoError(t, err)
+ total, err := strconv.Atoi(totalS)
+ require.NoError(t, err)
+ if offset < 1 || total < 1 || offset > total {
+ t.Fatalf("invalid slice=%q", conf)
+ }
+ sliceSplit = total
+ }
+ }
+ }
+
var tc testConf
for _, o := range opt {
o(&tc)
@@ -182,9 +207,14 @@ func Run(t *testing.T, testCases []Test, opt ...TestOpt) {
})
for _, br := range list {
- for _, tc := range testCases {
+ for i, tc := range testCases {
for _, mv := range matrix {
fn := tc.Name()
+ if sliceSplit > 0 {
+ pageLimit := int(math.Ceil(float64(len(testCases)) / float64(sliceSplit)))
+ sliceName := fmt.Sprintf("slice=%d-%d/", i/pageLimit+1, sliceSplit)
+ fn = sliceName + fn
+ }
name := fn + "/worker=" + br.Name() + mv.functionSuffix()
func(fn, testName string, br Worker, tc Test, mv matrixValue) {
ok := t.Run(testName, func(t *testing.T) {
@@ -221,7 +251,7 @@ func Run(t *testing.T, testCases []Test, opt ...TestOpt) {
}
}
-func getFunctionName(i interface{}) string {
+func getFunctionName(i any) string {
fullname := runtime.FuncForPC(reflect.ValueOf(i).Pointer()).Name()
dot := strings.LastIndex(fullname, ".") + 1
return strings.Title(fullname[dot:]) //nolint:staticcheck // ignoring "SA1019: strings.Title is deprecated", as for our use we don't need full unicode support
@@ -330,37 +360,80 @@ func lazyMirrorRunnerFunc(t *testing.T, images map[string]string) func() string
var mirror string
return func() string {
once.Do(func() {
- host, cleanup, err := runMirror(t, images)
+ m, err := RunMirror()
require.NoError(t, err)
- t.Cleanup(func() { _ = cleanup() })
- mirror = host
+ require.NoError(t, m.AddImages(t, images))
+ t.Cleanup(func() { _ = m.Close() })
+ mirror = m.Host
})
return mirror
}
}
-func runMirror(t *testing.T, mirroredImages map[string]string) (host string, _ func() error, err error) {
+type Mirror struct {
+ Host string
+ dir string
+ cleanup func() error
+}
+
+func (m *Mirror) lock() (*flock.Flock, error) {
+ if m.dir == "" {
+ return nil, nil
+ }
+ if err := os.MkdirAll(m.dir, 0700); err != nil {
+ return nil, err
+ }
+ lock := flock.New(filepath.Join(m.dir, "lock"))
+ if err := lock.Lock(); err != nil {
+ return nil, err
+ }
+ return lock, nil
+}
+
+func (m *Mirror) Close() error {
+ if m.cleanup != nil {
+ return m.cleanup()
+ }
+ return nil
+}
+
+func (m *Mirror) AddImages(t *testing.T, images map[string]string) (err error) {
+ lock, err := m.lock()
+ if err != nil {
+ return err
+ }
+ defer func() {
+ if lock != nil {
+ lock.Unlock()
+ }
+ }()
+
+ if err := copyImagesLocal(t, m.Host, images); err != nil {
+ return err
+ }
+ return nil
+}
+
+func RunMirror() (_ *Mirror, err error) {
mirrorDir := os.Getenv("BUILDKIT_REGISTRY_MIRROR_DIR")
- var lock *flock.Flock
- if mirrorDir != "" {
- if err := os.MkdirAll(mirrorDir, 0700); err != nil {
- return "", nil, err
- }
- lock = flock.New(filepath.Join(mirrorDir, "lock"))
- if err := lock.Lock(); err != nil {
- return "", nil, err
- }
- defer func() {
- if err != nil {
- lock.Unlock()
- }
- }()
+ m := &Mirror{
+ dir: mirrorDir,
}
- mirror, cleanup, err := NewRegistry(mirrorDir)
+ lock, err := m.lock()
if err != nil {
- return "", nil, err
+ return nil, err
+ }
+ defer func() {
+ if err != nil {
+ lock.Unlock()
+ }
+ }()
+
+ host, cleanup, err := NewRegistry(mirrorDir)
+ if err != nil {
+ return nil, err
}
defer func() {
if err != nil {
@@ -368,17 +441,16 @@ func runMirror(t *testing.T, mirroredImages map[string]string) (host string, _ f
}
}()
- if err := copyImagesLocal(t, mirror, mirroredImages); err != nil {
- return "", nil, err
- }
+ m.Host = host
+ m.cleanup = cleanup
- if mirrorDir != "" {
+ if lock != nil {
if err := lock.Unlock(); err != nil {
- return "", nil, err
+ return nil, err
}
}
- return mirror, cleanup, err
+ return m, err
}
type matrixValue struct {
@@ -400,10 +472,10 @@ func (mv matrixValue) functionSuffix() string {
type matrixValueChoice struct {
name string
- value interface{}
+ value any
}
-func newMatrixValue(key, name string, v interface{}) matrixValue {
+func newMatrixValue(key, name string, v any) matrixValue {
return matrixValue{
fn: []string{key},
values: map[string]matrixValueChoice{
@@ -463,3 +535,13 @@ func UnixOrWindows[T any](unix, windows T) T {
}
return unix
}
+
+func lookupTestFilter() (string, bool) {
+ const prefix = "-test.run="
+ for _, arg := range os.Args {
+ if strings.HasPrefix(arg, prefix) {
+ return strings.TrimPrefix(arg, prefix), true
+ }
+ }
+ return "", false
+}
diff --git a/vendor/github.com/moby/buildkit/util/testutil/integration/sandbox.go b/vendor/github.com/moby/buildkit/util/testutil/integration/sandbox.go
index 76c5e110..791d43ab 100644
--- a/vendor/github.com/moby/buildkit/util/testutil/integration/sandbox.go
+++ b/vendor/github.com/moby/buildkit/util/testutil/integration/sandbox.go
@@ -86,7 +86,7 @@ func (sb *sandbox) Cmd(args ...string) *exec.Cmd {
return cmd
}
-func (sb *sandbox) Value(k string) interface{} {
+func (sb *sandbox) Value(k string) any {
return sb.mv.values[k].value
}
@@ -197,7 +197,7 @@ func RootlessSupported(uid int) bool {
return true
}
-func PrintLogs(logs map[string]*bytes.Buffer, f func(args ...interface{})) {
+func PrintLogs(logs map[string]*bytes.Buffer, f func(args ...any)) {
for name, l := range logs {
f(name)
s := bufio.NewScanner(l)
diff --git a/vendor/github.com/moby/buildkit/util/testutil/workers/backend.go b/vendor/github.com/moby/buildkit/util/testutil/workers/backend.go
index 2b2aa26c..4534ac9f 100644
--- a/vendor/github.com/moby/buildkit/util/testutil/workers/backend.go
+++ b/vendor/github.com/moby/buildkit/util/testutil/workers/backend.go
@@ -2,6 +2,7 @@ package workers
import (
"os"
+ "slices"
"strings"
)
@@ -52,23 +53,14 @@ func (b backend) ExtraEnv() []string {
func (b backend) Supports(feature string) bool {
if enabledFeatures := os.Getenv("BUILDKIT_TEST_ENABLE_FEATURES"); enabledFeatures != "" {
- for _, enabledFeature := range strings.Split(enabledFeatures, ",") {
- if feature == enabledFeature {
- return true
- }
+ if slices.Contains(strings.Split(enabledFeatures, ","), feature) {
+ return true
}
}
if disabledFeatures := os.Getenv("BUILDKIT_TEST_DISABLE_FEATURES"); disabledFeatures != "" {
- for _, disabledFeature := range strings.Split(disabledFeatures, ",") {
- if feature == disabledFeature {
- return false
- }
- }
- }
- for _, unsupportedFeature := range b.unsupportedFeatures {
- if feature == unsupportedFeature {
+ if slices.Contains(strings.Split(disabledFeatures, ","), feature) {
return false
}
}
- return true
+ return !slices.Contains(b.unsupportedFeatures, feature)
}
diff --git a/vendor/github.com/moby/buildkit/util/testutil/workers/dockerd.go b/vendor/github.com/moby/buildkit/util/testutil/workers/dockerd.go
index 1a7da3c1..ba557580 100644
--- a/vendor/github.com/moby/buildkit/util/testutil/workers/dockerd.go
+++ b/vendor/github.com/moby/buildkit/util/testutil/workers/dockerd.go
@@ -10,9 +10,9 @@ import (
"strings"
"time"
- "github.com/docker/docker/client"
"github.com/moby/buildkit/cmd/buildkitd/config"
"github.com/moby/buildkit/util/testutil/dockerd"
+ "github.com/moby/buildkit/util/testutil/dockerd/client"
"github.com/moby/buildkit/util/testutil/integration"
"github.com/pkg/errors"
"golang.org/x/sync/errgroup"
@@ -252,7 +252,7 @@ func waitForAPI(ctx context.Context, apiClient *client.Client, d time.Duration)
step := 50 * time.Millisecond
i := 0
for {
- if _, err := apiClient.Ping(ctx); err == nil {
+ if err := apiClient.Ping(ctx); err == nil {
break
}
i++
diff --git a/vendor/github.com/moby/buildkit/util/tracing/tracing.go b/vendor/github.com/moby/buildkit/util/tracing/tracing.go
index 2df1c462..038a7a17 100644
--- a/vendor/github.com/moby/buildkit/util/tracing/tracing.go
+++ b/vendor/github.com/moby/buildkit/util/tracing/tracing.go
@@ -5,7 +5,11 @@ import (
"fmt"
"net/http"
"net/http/httptrace"
+ "slices"
+ "github.com/moby/buildkit/util/bklog"
+ "github.com/moby/buildkit/util/stack"
+ "github.com/pkg/errors"
"go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace"
"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
"go.opentelemetry.io/otel/attribute"
@@ -14,11 +18,6 @@ import (
semconv "go.opentelemetry.io/otel/semconv/v1.26.0"
"go.opentelemetry.io/otel/trace"
"go.opentelemetry.io/otel/trace/noop"
-
- "github.com/pkg/errors"
-
- "github.com/moby/buildkit/util/bklog"
- "github.com/moby/buildkit/util/stack"
)
// StartSpan starts a new span as a child of the span in context.
@@ -43,10 +42,8 @@ func hasStacktrace(err error) bool {
case interface{ Unwrap() error }:
return hasStacktrace(e.Unwrap())
case interface{ Unwrap() []error }:
- for _, ue := range e.Unwrap() {
- if hasStacktrace(ue) {
- return true
- }
+ if slices.ContainsFunc(e.Unwrap(), hasStacktrace) {
+ return true
}
}
return false
diff --git a/vendor/github.com/moby/sys/user/idtools.go b/vendor/github.com/moby/sys/user/idtools.go
new file mode 100644
index 00000000..595b7a92
--- /dev/null
+++ b/vendor/github.com/moby/sys/user/idtools.go
@@ -0,0 +1,141 @@
+package user
+
+import (
+ "fmt"
+ "os"
+)
+
+// MkdirOpt is a type for options to pass to Mkdir calls
+type MkdirOpt func(*mkdirOptions)
+
+type mkdirOptions struct {
+ onlyNew bool
+}
+
+// WithOnlyNew is an option for MkdirAllAndChown that will only change ownership and permissions
+// on newly created directories. If the directory already exists, it will not be modified
+func WithOnlyNew(o *mkdirOptions) {
+ o.onlyNew = true
+}
+
+// MkdirAllAndChown creates a directory (include any along the path) and then modifies
+// ownership to the requested uid/gid. By default, if the directory already exists, this
+// function will still change ownership and permissions. If WithOnlyNew is passed as an
+// option, then only the newly created directories will have ownership and permissions changed.
+func MkdirAllAndChown(path string, mode os.FileMode, uid, gid int, opts ...MkdirOpt) error {
+ var options mkdirOptions
+ for _, opt := range opts {
+ opt(&options)
+ }
+
+ return mkdirAs(path, mode, uid, gid, true, options.onlyNew)
+}
+
+// MkdirAndChown creates a directory and then modifies ownership to the requested uid/gid.
+// By default, if the directory already exists, this function still changes ownership and permissions.
+// If WithOnlyNew is passed as an option, then only the newly created directory will have ownership
+// and permissions changed.
+// Note that unlike os.Mkdir(), this function does not return IsExist error
+// in case path already exists.
+func MkdirAndChown(path string, mode os.FileMode, uid, gid int, opts ...MkdirOpt) error {
+ var options mkdirOptions
+ for _, opt := range opts {
+ opt(&options)
+ }
+ return mkdirAs(path, mode, uid, gid, false, options.onlyNew)
+}
+
+// getRootUIDGID retrieves the remapped root uid/gid pair from the set of maps.
+// If the maps are empty, then the root uid/gid will default to "real" 0/0
+func getRootUIDGID(uidMap, gidMap []IDMap) (int, int, error) {
+ uid, err := toHost(0, uidMap)
+ if err != nil {
+ return -1, -1, err
+ }
+ gid, err := toHost(0, gidMap)
+ if err != nil {
+ return -1, -1, err
+ }
+ return uid, gid, nil
+}
+
+// toContainer takes an id mapping, and uses it to translate a
+// host ID to the remapped ID. If no map is provided, then the translation
+// assumes a 1-to-1 mapping and returns the passed in id
+func toContainer(hostID int, idMap []IDMap) (int, error) {
+ if idMap == nil {
+ return hostID, nil
+ }
+ for _, m := range idMap {
+ if (int64(hostID) >= m.ParentID) && (int64(hostID) <= (m.ParentID + m.Count - 1)) {
+ contID := int(m.ID + (int64(hostID) - m.ParentID))
+ return contID, nil
+ }
+ }
+ return -1, fmt.Errorf("host ID %d cannot be mapped to a container ID", hostID)
+}
+
+// toHost takes an id mapping and a remapped ID, and translates the
+// ID to the mapped host ID. If no map is provided, then the translation
+// assumes a 1-to-1 mapping and returns the passed in id #
+func toHost(contID int, idMap []IDMap) (int, error) {
+ if idMap == nil {
+ return contID, nil
+ }
+ for _, m := range idMap {
+ if (int64(contID) >= m.ID) && (int64(contID) <= (m.ID + m.Count - 1)) {
+ hostID := int(m.ParentID + (int64(contID) - m.ID))
+ return hostID, nil
+ }
+ }
+ return -1, fmt.Errorf("container ID %d cannot be mapped to a host ID", contID)
+}
+
+// IdentityMapping contains a mappings of UIDs and GIDs.
+// The zero value represents an empty mapping.
+type IdentityMapping struct {
+ UIDMaps []IDMap `json:"UIDMaps"`
+ GIDMaps []IDMap `json:"GIDMaps"`
+}
+
+// RootPair returns a uid and gid pair for the root user. The error is ignored
+// because a root user always exists, and the defaults are correct when the uid
+// and gid maps are empty.
+func (i IdentityMapping) RootPair() (int, int) {
+ uid, gid, _ := getRootUIDGID(i.UIDMaps, i.GIDMaps)
+ return uid, gid
+}
+
+// ToHost returns the host UID and GID for the container uid, gid.
+// Remapping is only performed if the ids aren't already the remapped root ids
+func (i IdentityMapping) ToHost(uid, gid int) (int, int, error) {
+ var err error
+ ruid, rgid := i.RootPair()
+
+ if uid != ruid {
+ ruid, err = toHost(uid, i.UIDMaps)
+ if err != nil {
+ return ruid, rgid, err
+ }
+ }
+
+ if gid != rgid {
+ rgid, err = toHost(gid, i.GIDMaps)
+ }
+ return ruid, rgid, err
+}
+
+// ToContainer returns the container UID and GID for the host uid and gid
+func (i IdentityMapping) ToContainer(uid, gid int) (int, int, error) {
+ ruid, err := toContainer(uid, i.UIDMaps)
+ if err != nil {
+ return -1, -1, err
+ }
+ rgid, err := toContainer(gid, i.GIDMaps)
+ return ruid, rgid, err
+}
+
+// Empty returns true if there are no id mappings
+func (i IdentityMapping) Empty() bool {
+ return len(i.UIDMaps) == 0 && len(i.GIDMaps) == 0
+}
diff --git a/vendor/github.com/moby/sys/user/idtools_unix.go b/vendor/github.com/moby/sys/user/idtools_unix.go
new file mode 100644
index 00000000..4e39d244
--- /dev/null
+++ b/vendor/github.com/moby/sys/user/idtools_unix.go
@@ -0,0 +1,143 @@
+//go:build !windows
+
+package user
+
+import (
+ "fmt"
+ "os"
+ "path/filepath"
+ "strconv"
+ "syscall"
+)
+
+func mkdirAs(path string, mode os.FileMode, uid, gid int, mkAll, onlyNew bool) error {
+ path, err := filepath.Abs(path)
+ if err != nil {
+ return err
+ }
+
+ stat, err := os.Stat(path)
+ if err == nil {
+ if !stat.IsDir() {
+ return &os.PathError{Op: "mkdir", Path: path, Err: syscall.ENOTDIR}
+ }
+ if onlyNew {
+ return nil
+ }
+
+ // short-circuit -- we were called with an existing directory and chown was requested
+ return setPermissions(path, mode, uid, gid, stat)
+ }
+
+ // make an array containing the original path asked for, plus (for mkAll == true)
+ // all path components leading up to the complete path that don't exist before we MkdirAll
+ // so that we can chown all of them properly at the end. If onlyNew is true, we won't
+ // chown the full directory path if it exists
+ var paths []string
+ if os.IsNotExist(err) {
+ paths = append(paths, path)
+ }
+
+ if mkAll {
+ // walk back to "/" looking for directories which do not exist
+ // and add them to the paths array for chown after creation
+ dirPath := path
+ for {
+ dirPath = filepath.Dir(dirPath)
+ if dirPath == "/" {
+ break
+ }
+ if _, err = os.Stat(dirPath); os.IsNotExist(err) {
+ paths = append(paths, dirPath)
+ }
+ }
+ if err = os.MkdirAll(path, mode); err != nil {
+ return err
+ }
+ } else if err = os.Mkdir(path, mode); err != nil {
+ return err
+ }
+ // even if it existed, we will chown the requested path + any subpaths that
+ // didn't exist when we called MkdirAll
+ for _, pathComponent := range paths {
+ if err = setPermissions(pathComponent, mode, uid, gid, nil); err != nil {
+ return err
+ }
+ }
+ return nil
+}
+
+// setPermissions performs a chown/chmod only if the uid/gid don't match what's requested
+// Normally a Chown is a no-op if uid/gid match, but in some cases this can still cause an error, e.g. if the
+// dir is on an NFS share, so don't call chown unless we absolutely must.
+// Likewise for setting permissions.
+func setPermissions(p string, mode os.FileMode, uid, gid int, stat os.FileInfo) error {
+ if stat == nil {
+ var err error
+ stat, err = os.Stat(p)
+ if err != nil {
+ return err
+ }
+ }
+ if stat.Mode().Perm() != mode.Perm() {
+ if err := os.Chmod(p, mode.Perm()); err != nil {
+ return err
+ }
+ }
+ ssi := stat.Sys().(*syscall.Stat_t)
+ if ssi.Uid == uint32(uid) && ssi.Gid == uint32(gid) {
+ return nil
+ }
+ return os.Chown(p, uid, gid)
+}
+
+// LoadIdentityMapping takes a requested username and
+// using the data from /etc/sub{uid,gid} ranges, creates the
+// proper uid and gid remapping ranges for that user/group pair
+func LoadIdentityMapping(name string) (IdentityMapping, error) {
+ // TODO: Consider adding support for calling out to "getent"
+ usr, err := LookupUser(name)
+ if err != nil {
+ return IdentityMapping{}, fmt.Errorf("could not get user for username %s: %w", name, err)
+ }
+
+ subuidRanges, err := lookupSubRangesFile("/etc/subuid", usr)
+ if err != nil {
+ return IdentityMapping{}, err
+ }
+ subgidRanges, err := lookupSubRangesFile("/etc/subgid", usr)
+ if err != nil {
+ return IdentityMapping{}, err
+ }
+
+ return IdentityMapping{
+ UIDMaps: subuidRanges,
+ GIDMaps: subgidRanges,
+ }, nil
+}
+
+func lookupSubRangesFile(path string, usr User) ([]IDMap, error) {
+ uidstr := strconv.Itoa(usr.Uid)
+ rangeList, err := ParseSubIDFileFilter(path, func(sid SubID) bool {
+ return sid.Name == usr.Name || sid.Name == uidstr
+ })
+ if err != nil {
+ return nil, err
+ }
+ if len(rangeList) == 0 {
+ return nil, fmt.Errorf("no subuid ranges found for user %q", usr.Name)
+ }
+
+ idMap := []IDMap{}
+
+ var containerID int64
+ for _, idrange := range rangeList {
+ idMap = append(idMap, IDMap{
+ ID: containerID,
+ ParentID: idrange.SubID,
+ Count: idrange.Count,
+ })
+ containerID = containerID + idrange.Count
+ }
+ return idMap, nil
+}
diff --git a/vendor/github.com/moby/sys/user/idtools_windows.go b/vendor/github.com/moby/sys/user/idtools_windows.go
new file mode 100644
index 00000000..9de730ca
--- /dev/null
+++ b/vendor/github.com/moby/sys/user/idtools_windows.go
@@ -0,0 +1,13 @@
+package user
+
+import (
+ "os"
+)
+
+// This is currently a wrapper around [os.MkdirAll] since currently
+// permissions aren't set through this path, the identity isn't utilized.
+// Ownership is handled elsewhere, but in the future could be support here
+// too.
+func mkdirAs(path string, _ os.FileMode, _, _ int, _, _ bool) error {
+ return os.MkdirAll(path, 0)
+}
diff --git a/vendor/github.com/opencontainers/image-spec/specs-go/version.go b/vendor/github.com/opencontainers/image-spec/specs-go/version.go
index 7069ae44..c3897c7c 100644
--- a/vendor/github.com/opencontainers/image-spec/specs-go/version.go
+++ b/vendor/github.com/opencontainers/image-spec/specs-go/version.go
@@ -22,7 +22,7 @@ const (
// VersionMinor is for functionality in a backwards-compatible manner
VersionMinor = 1
// VersionPatch is for backwards-compatible bug fixes
- VersionPatch = 0
+ VersionPatch = 1
// VersionDev indicates development branch. Releases will be empty string.
VersionDev = ""
diff --git a/vendor/github.com/tonistiigi/dchapes-mode/mode.go b/vendor/github.com/tonistiigi/dchapes-mode/mode.go
index 01d3444f..a53aa7d0 100644
--- a/vendor/github.com/tonistiigi/dchapes-mode/mode.go
+++ b/vendor/github.com/tonistiigi/dchapes-mode/mode.go
@@ -262,8 +262,10 @@ func ParseWithUmask(s string, umask uint) (Set, error) {
case 'w':
perm |= iWUser | iWGroup | iWOther
case 'X':
- if op == '+' {
+ if op != '-' {
permX = iXUser | iXGroup | iXOther
+ } else {
+ perm |= iXUser | iXGroup | iXOther
}
case 'x':
perm |= iXUser | iXGroup | iXOther
diff --git a/vendor/github.com/tonistiigi/fsutil/copy/copy.go b/vendor/github.com/tonistiigi/fsutil/copy/copy.go
index f24e77f8..aef1a146 100644
--- a/vendor/github.com/tonistiigi/fsutil/copy/copy.go
+++ b/vendor/github.com/tonistiigi/fsutil/copy/copy.go
@@ -18,6 +18,8 @@ import (
"github.com/tonistiigi/fsutil"
)
+const defaultDirectoryMode = 0755
+
var bufferPool = &sync.Pool{
New: func() interface{} {
buffer := make([]byte, 32*1024)
@@ -80,7 +82,11 @@ func Copy(ctx context.Context, srcRoot, src, dstRoot, dst string, opts ...Opt) e
if err != nil {
return err
}
- if createdDirs, err := MkdirAll(ensureDstPath, 0755, ci.Chown, ci.Utime); err != nil {
+ perm := defaultDirectoryMode
+ if ci.Mode != nil {
+ perm = *ci.Mode
+ }
+ if createdDirs, err := MkdirAll(ensureDstPath, os.FileMode(perm), ci.Chown, ci.Utime); err != nil {
return err
} else {
defer fixCreatedParentDirs(createdDirs, ci.Utime)
@@ -159,7 +165,11 @@ func (c *copier) prepareTargetDir(srcFollowed, src, destPath string, copyDirCont
target = destPath
}
var createdDirs []string
- if dirs, err := MkdirAll(target, 0755, c.chown, c.utime); err != nil {
+ mode := defaultDirectoryMode
+ if c.mode != nil {
+ mode = *c.mode
+ }
+ if dirs, err := MkdirAll(target, os.FileMode(mode), c.chown, c.utime); err != nil {
return "", nil, err
} else {
createdDirs = dirs
@@ -335,13 +345,13 @@ func (c *copier) copy(ctx context.Context, src, srcComponents, target string, ov
if srcComponents != "" {
matchesIncludePattern := false
matchesExcludePattern := false
- matchesIncludePattern, includeMatchInfo, err = c.include(srcComponents, fi, parentIncludeMatchInfo)
+ matchesIncludePattern, includeMatchInfo, err = c.include(srcComponents, parentIncludeMatchInfo)
if err != nil {
return err
}
include = matchesIncludePattern
- matchesExcludePattern, excludeMatchInfo, err = c.exclude(srcComponents, fi, parentExcludeMatchInfo)
+ matchesExcludePattern, excludeMatchInfo, err = c.exclude(srcComponents, parentExcludeMatchInfo)
if err != nil {
return err
}
@@ -351,11 +361,11 @@ func (c *copier) copy(ctx context.Context, src, srcComponents, target string, ov
}
if include {
- if err := c.removeTargetIfNeeded(src, target, fi, targetFi); err != nil {
+ if err := c.removeTargetIfNeeded(target, fi, targetFi); err != nil {
return err
}
- if err := c.createParentDirs(src, srcComponents, target, overwriteTargetMetadata); err != nil {
+ if err := c.createParentDirs(src, overwriteTargetMetadata); err != nil {
return err
}
}
@@ -447,7 +457,7 @@ func (c *copier) notifyChange(target string, fi os.FileInfo) error {
return nil
}
-func (c *copier) include(path string, fi os.FileInfo, parentIncludeMatchInfo patternmatcher.MatchInfo) (bool, patternmatcher.MatchInfo, error) {
+func (c *copier) include(path string, parentIncludeMatchInfo patternmatcher.MatchInfo) (bool, patternmatcher.MatchInfo, error) {
if c.includePatternMatcher == nil {
return true, patternmatcher.MatchInfo{}, nil
}
@@ -459,7 +469,7 @@ func (c *copier) include(path string, fi os.FileInfo, parentIncludeMatchInfo pat
return m, matchInfo, nil
}
-func (c *copier) exclude(path string, fi os.FileInfo, parentExcludeMatchInfo patternmatcher.MatchInfo) (bool, patternmatcher.MatchInfo, error) {
+func (c *copier) exclude(path string, parentExcludeMatchInfo patternmatcher.MatchInfo) (bool, patternmatcher.MatchInfo, error) {
if c.excludePatternMatcher == nil {
return false, patternmatcher.MatchInfo{}, nil
}
@@ -471,7 +481,7 @@ func (c *copier) exclude(path string, fi os.FileInfo, parentExcludeMatchInfo pat
return m, matchInfo, nil
}
-func (c *copier) removeTargetIfNeeded(src, target string, srcFi, targetFi os.FileInfo) error {
+func (c *copier) removeTargetIfNeeded(target string, srcFi, targetFi os.FileInfo) error {
if !c.alwaysReplaceExistingDestPaths {
return nil
}
@@ -488,7 +498,7 @@ func (c *copier) removeTargetIfNeeded(src, target string, srcFi, targetFi os.Fil
// Delayed creation of parent directories when a file or dir matches an include
// pattern.
-func (c *copier) createParentDirs(src, srcComponents, target string, overwriteTargetMetadata bool) error {
+func (c *copier) createParentDirs(src string, overwriteTargetMetadata bool) error {
for i, parentDir := range c.parentDirs {
if parentDir.copied {
continue
@@ -502,7 +512,7 @@ func (c *copier) createParentDirs(src, srcComponents, target string, overwriteTa
return errors.Errorf("%s is not a directory", parentDir.srcPath)
}
- created, err := copyDirectoryOnly(parentDir.srcPath, parentDir.dstPath, fi, overwriteTargetMetadata)
+ created, err := copyDirectoryOnly(parentDir.dstPath, fi, overwriteTargetMetadata)
if err != nil {
return err
}
@@ -549,7 +559,7 @@ func (c *copier) copyDirectory(
// encounter a/b/c.
if include {
var err error
- created, err = copyDirectoryOnly(src, dst, stat, overwriteTargetMetadata)
+ created, err = copyDirectoryOnly(dst, stat, overwriteTargetMetadata)
if err != nil {
return created, err
}
@@ -586,7 +596,7 @@ func (c *copier) copyDirectory(
return created, nil
}
-func copyDirectoryOnly(src, dst string, stat os.FileInfo, overwriteTargetMetadata bool) (bool, error) {
+func copyDirectoryOnly(dst string, stat os.FileInfo, overwriteTargetMetadata bool) (bool, error) {
if st, err := os.Lstat(dst); err != nil {
if !os.IsNotExist(err) {
return false, err
diff --git a/vendor/github.com/tonistiigi/fsutil/copy/copy_linux.go b/vendor/github.com/tonistiigi/fsutil/copy/copy_linux.go
index 9b046c53..3df11d6a 100644
--- a/vendor/github.com/tonistiigi/fsutil/copy/copy_linux.go
+++ b/vendor/github.com/tonistiigi/fsutil/copy/copy_linux.go
@@ -15,7 +15,7 @@ func getUIDGID(fi os.FileInfo) (uid, gid int) {
return int(st.Uid), int(st.Gid)
}
-func (c *copier) copyFileInfo(fi os.FileInfo, src, name string) error {
+func (c *copier) copyFileInfo(fi os.FileInfo, _, name string) error {
chown := c.chown
uid, gid := getUIDGID(fi)
old := &User{UID: uid, GID: gid}
diff --git a/vendor/github.com/tonistiigi/fsutil/copy/copy_unix.go b/vendor/github.com/tonistiigi/fsutil/copy/copy_unix.go
index df1cdd07..5407c16e 100644
--- a/vendor/github.com/tonistiigi/fsutil/copy/copy_unix.go
+++ b/vendor/github.com/tonistiigi/fsutil/copy/copy_unix.go
@@ -16,7 +16,7 @@ func getUIDGID(fi os.FileInfo) (uid, gid int) {
return int(st.Uid), int(st.Gid)
}
-func (c *copier) copyFileInfo(fi os.FileInfo, src, name string) error {
+func (c *copier) copyFileInfo(fi os.FileInfo, _, name string) error {
chown := c.chown
uid, gid := getUIDGID(fi)
old := &User{UID: uid, GID: gid}
diff --git a/vendor/github.com/tonistiigi/fsutil/copy/copy_windows.go b/vendor/github.com/tonistiigi/fsutil/copy/copy_windows.go
index a049565e..45b4f210 100644
--- a/vendor/github.com/tonistiigi/fsutil/copy/copy_windows.go
+++ b/vendor/github.com/tonistiigi/fsutil/copy/copy_windows.go
@@ -13,7 +13,7 @@ const (
seTakeOwnershipPrivilege = "SeTakeOwnershipPrivilege"
)
-func getUIDGID(fi os.FileInfo) (uid, gid int) {
+func getUIDGID(_ os.FileInfo) (uid, gid int) {
return 0, 0
}
@@ -119,10 +119,10 @@ func copyFileContent(dst, src *os.File) error {
return err
}
-func copyXAttrs(dst, src string, xeh XAttrErrorHandler) error {
+func copyXAttrs(_, _ string, _ XAttrErrorHandler) error {
return nil
}
-func copyDevice(dst string, fi os.FileInfo) error {
+func copyDevice(_ string, _ os.FileInfo) error {
return errors.New("device copy not supported")
}
diff --git a/vendor/github.com/tonistiigi/fsutil/copy/hardlink_windows.go b/vendor/github.com/tonistiigi/fsutil/copy/hardlink_windows.go
index ad8845a7..ce081d68 100644
--- a/vendor/github.com/tonistiigi/fsutil/copy/hardlink_windows.go
+++ b/vendor/github.com/tonistiigi/fsutil/copy/hardlink_windows.go
@@ -2,6 +2,6 @@ package fs
import "os"
-func getLinkInfo(fi os.FileInfo) (uint64, bool) {
+func getLinkInfo(_ os.FileInfo) (uint64, bool) {
return 0, false
}
diff --git a/vendor/github.com/tonistiigi/fsutil/diskwriter_windows.go b/vendor/github.com/tonistiigi/fsutil/diskwriter_windows.go
index 2dd3f7d0..d278add8 100644
--- a/vendor/github.com/tonistiigi/fsutil/diskwriter_windows.go
+++ b/vendor/github.com/tonistiigi/fsutil/diskwriter_windows.go
@@ -20,7 +20,7 @@ func rewriteMetadata(p string, stat *types.Stat) error {
// handleTarTypeBlockCharFifo is an OS-specific helper function used by
// createTarFile to handle the following types of header: Block; Char; Fifo
-func handleTarTypeBlockCharFifo(path string, stat *types.Stat) error {
+func handleTarTypeBlockCharFifo(_ string, _ *types.Stat) error {
return errors.New("Not implemented on windows")
}
diff --git a/vendor/github.com/tonistiigi/fsutil/docker-bake.hcl b/vendor/github.com/tonistiigi/fsutil/docker-bake.hcl
index ea73bd20..c70ad2b6 100644
--- a/vendor/github.com/tonistiigi/fsutil/docker-bake.hcl
+++ b/vendor/github.com/tonistiigi/fsutil/docker-bake.hcl
@@ -60,7 +60,15 @@ target "test-noroot" {
output = ["${DESTDIR}/coverage"]
}
-target "lint" {
+group "lint" {
+ targets = ["lint-golangci", "lint-gopls"]
+}
+
+group "lint-cross" {
+ targets = ["lint-golangci-cross", "lint-gopls-cross"]
+}
+
+target "lint-golangci" {
inherits = ["_common"]
dockerfile = "./hack/dockerfiles/lint.Dockerfile"
output = ["type=cacheonly"]
@@ -69,8 +77,17 @@ target "lint" {
}
}
-target "lint-cross" {
- inherits = ["lint", "_platforms"]
+target "lint-gopls" {
+ inherits = ["lint-golangci"]
+ target = "gopls-analyze"
+}
+
+target "lint-golangci-cross" {
+ inherits = ["lint-golangci", "_platforms"]
+}
+
+target "lint-gopls-cross" {
+ inherits = ["lint-gopls", "_platforms"]
}
target "validate-generated-files" {
diff --git a/vendor/golang.org/x/crypto/internal/poly1305/mac_noasm.go b/vendor/golang.org/x/crypto/internal/poly1305/mac_noasm.go
index bd896bdc..8d99551f 100644
--- a/vendor/golang.org/x/crypto/internal/poly1305/mac_noasm.go
+++ b/vendor/golang.org/x/crypto/internal/poly1305/mac_noasm.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-//go:build (!amd64 && !ppc64le && !ppc64 && !s390x) || !gc || purego
+//go:build (!amd64 && !loong64 && !ppc64le && !ppc64 && !s390x) || !gc || purego
package poly1305
diff --git a/vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.go b/vendor/golang.org/x/crypto/internal/poly1305/sum_asm.go
similarity index 94%
rename from vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.go
rename to vendor/golang.org/x/crypto/internal/poly1305/sum_asm.go
index 164cd47d..315b84ac 100644
--- a/vendor/golang.org/x/crypto/internal/poly1305/sum_amd64.go
+++ b/vendor/golang.org/x/crypto/internal/poly1305/sum_asm.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-//go:build gc && !purego
+//go:build gc && !purego && (amd64 || loong64 || ppc64 || ppc64le)
package poly1305
diff --git a/vendor/golang.org/x/crypto/internal/poly1305/sum_loong64.s b/vendor/golang.org/x/crypto/internal/poly1305/sum_loong64.s
new file mode 100644
index 00000000..bc8361da
--- /dev/null
+++ b/vendor/golang.org/x/crypto/internal/poly1305/sum_loong64.s
@@ -0,0 +1,123 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build gc && !purego
+
+// func update(state *macState, msg []byte)
+TEXT ·update(SB), $0-32
+ MOVV state+0(FP), R4
+ MOVV msg_base+8(FP), R5
+ MOVV msg_len+16(FP), R6
+
+ MOVV $0x10, R7
+
+ MOVV (R4), R8 // h0
+ MOVV 8(R4), R9 // h1
+ MOVV 16(R4), R10 // h2
+ MOVV 24(R4), R11 // r0
+ MOVV 32(R4), R12 // r1
+
+ BLT R6, R7, bytes_between_0_and_15
+
+loop:
+ MOVV (R5), R14 // msg[0:8]
+ MOVV 8(R5), R16 // msg[8:16]
+ ADDV R14, R8, R8 // h0 (x1 + y1 = z1', if z1' < x1 then z1' overflow)
+ ADDV R16, R9, R27
+ SGTU R14, R8, R24 // h0.carry
+ SGTU R9, R27, R28
+ ADDV R27, R24, R9 // h1
+ SGTU R27, R9, R24
+ OR R24, R28, R24 // h1.carry
+ ADDV $0x01, R24, R24
+ ADDV R10, R24, R10 // h2
+
+ ADDV $16, R5, R5 // msg = msg[16:]
+
+multiply:
+ MULV R8, R11, R14 // h0r0.lo
+ MULHVU R8, R11, R15 // h0r0.hi
+ MULV R9, R11, R13 // h1r0.lo
+ MULHVU R9, R11, R16 // h1r0.hi
+ ADDV R13, R15, R15
+ SGTU R13, R15, R24
+ ADDV R24, R16, R16
+ MULV R10, R11, R25
+ ADDV R16, R25, R25
+ MULV R8, R12, R13 // h0r1.lo
+ MULHVU R8, R12, R16 // h0r1.hi
+ ADDV R13, R15, R15
+ SGTU R13, R15, R24
+ ADDV R24, R16, R16
+ MOVV R16, R8
+ MULV R10, R12, R26 // h2r1
+ MULV R9, R12, R13 // h1r1.lo
+ MULHVU R9, R12, R16 // h1r1.hi
+ ADDV R13, R25, R25
+ ADDV R16, R26, R27
+ SGTU R13, R25, R24
+ ADDV R27, R24, R26
+ ADDV R8, R25, R25
+ SGTU R8, R25, R24
+ ADDV R24, R26, R26
+ AND $3, R25, R10
+ AND $-4, R25, R17
+ ADDV R17, R14, R8
+ ADDV R26, R15, R27
+ SGTU R17, R8, R24
+ SGTU R26, R27, R28
+ ADDV R27, R24, R9
+ SGTU R27, R9, R24
+ OR R24, R28, R24
+ ADDV R24, R10, R10
+ SLLV $62, R26, R27
+ SRLV $2, R25, R28
+ SRLV $2, R26, R26
+ OR R27, R28, R25
+ ADDV R25, R8, R8
+ ADDV R26, R9, R27
+ SGTU R25, R8, R24
+ SGTU R26, R27, R28
+ ADDV R27, R24, R9
+ SGTU R27, R9, R24
+ OR R24, R28, R24
+ ADDV R24, R10, R10
+
+ SUBV $16, R6, R6
+ BGE R6, R7, loop
+
+bytes_between_0_and_15:
+ BEQ R6, R0, done
+ MOVV $1, R14
+ XOR R15, R15
+ ADDV R6, R5, R5
+
+flush_buffer:
+ MOVBU -1(R5), R25
+ SRLV $56, R14, R24
+ SLLV $8, R15, R28
+ SLLV $8, R14, R14
+ OR R24, R28, R15
+ XOR R25, R14, R14
+ SUBV $1, R6, R6
+ SUBV $1, R5, R5
+ BNE R6, R0, flush_buffer
+
+ ADDV R14, R8, R8
+ SGTU R14, R8, R24
+ ADDV R15, R9, R27
+ SGTU R15, R27, R28
+ ADDV R27, R24, R9
+ SGTU R27, R9, R24
+ OR R24, R28, R24
+ ADDV R10, R24, R10
+
+ MOVV $16, R6
+ JMP multiply
+
+done:
+ MOVV R8, (R4)
+ MOVV R9, 8(R4)
+ MOVV R10, 16(R4)
+ RET
diff --git a/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64x.go b/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64x.go
deleted file mode 100644
index 1a1679aa..00000000
--- a/vendor/golang.org/x/crypto/internal/poly1305/sum_ppc64x.go
+++ /dev/null
@@ -1,47 +0,0 @@
-// Copyright 2019 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build gc && !purego && (ppc64 || ppc64le)
-
-package poly1305
-
-//go:noescape
-func update(state *macState, msg []byte)
-
-// mac is a wrapper for macGeneric that redirects calls that would have gone to
-// updateGeneric to update.
-//
-// Its Write and Sum methods are otherwise identical to the macGeneric ones, but
-// using function pointers would carry a major performance cost.
-type mac struct{ macGeneric }
-
-func (h *mac) Write(p []byte) (int, error) {
- nn := len(p)
- if h.offset > 0 {
- n := copy(h.buffer[h.offset:], p)
- if h.offset+n < TagSize {
- h.offset += n
- return nn, nil
- }
- p = p[n:]
- h.offset = 0
- update(&h.macState, h.buffer[:])
- }
- if n := len(p) - (len(p) % TagSize); n > 0 {
- update(&h.macState, p[:n])
- p = p[n:]
- }
- if len(p) > 0 {
- h.offset += copy(h.buffer[h.offset:], p)
- }
- return nn, nil
-}
-
-func (h *mac) Sum(out *[16]byte) {
- state := h.macState
- if h.offset > 0 {
- update(&state, h.buffer[:h.offset])
- }
- finalize(out, &state.h, &state.s)
-}
diff --git a/vendor/golang.org/x/crypto/ssh/handshake.go b/vendor/golang.org/x/crypto/ssh/handshake.go
index 56cdc7c2..b6bf546b 100644
--- a/vendor/golang.org/x/crypto/ssh/handshake.go
+++ b/vendor/golang.org/x/crypto/ssh/handshake.go
@@ -5,7 +5,6 @@
package ssh
import (
- "crypto/rand"
"errors"
"fmt"
"io"
@@ -25,6 +24,11 @@ const debugHandshake = false
// quickly.
const chanSize = 16
+// maxPendingPackets sets the maximum number of packets to queue while waiting
+// for KEX to complete. This limits the total pending data to maxPendingPackets
+// * maxPacket bytes, which is ~16.8MB.
+const maxPendingPackets = 64
+
// keyingTransport is a packet based transport that supports key
// changes. It need not be thread-safe. It should pass through
// msgNewKeys in both directions.
@@ -73,13 +77,22 @@ type handshakeTransport struct {
incoming chan []byte
readError error
- mu sync.Mutex
- writeError error
- sentInitPacket []byte
- sentInitMsg *kexInitMsg
- pendingPackets [][]byte // Used when a key exchange is in progress.
+ mu sync.Mutex
+ // Condition for the above mutex. It is used to notify a completed key
+ // exchange or a write failure. Writes can wait for this condition while a
+ // key exchange is in progress.
+ writeCond *sync.Cond
+ writeError error
+ sentInitPacket []byte
+ sentInitMsg *kexInitMsg
+ // Used to queue writes when a key exchange is in progress. The length is
+ // limited by pendingPacketsSize. Once full, writes will block until the key
+ // exchange is completed or an error occurs. If not empty, it is emptied
+ // all at once when the key exchange is completed in kexLoop.
+ pendingPackets [][]byte
writePacketsLeft uint32
writeBytesLeft int64
+ userAuthComplete bool // whether the user authentication phase is complete
// If the read loop wants to schedule a kex, it pings this
// channel, and the write loop will send out a kex
@@ -133,6 +146,7 @@ func newHandshakeTransport(conn keyingTransport, config *Config, clientVersion,
config: config,
}
+ t.writeCond = sync.NewCond(&t.mu)
t.resetReadThresholds()
t.resetWriteThresholds()
@@ -259,6 +273,7 @@ func (t *handshakeTransport) recordWriteError(err error) {
defer t.mu.Unlock()
if t.writeError == nil && err != nil {
t.writeError = err
+ t.writeCond.Broadcast()
}
}
@@ -362,6 +377,8 @@ write:
}
}
t.pendingPackets = t.pendingPackets[:0]
+ // Unblock writePacket if waiting for KEX.
+ t.writeCond.Broadcast()
t.mu.Unlock()
}
@@ -483,7 +500,7 @@ func (t *handshakeTransport) sendKexInit() error {
CompressionClientServer: supportedCompressions,
CompressionServerClient: supportedCompressions,
}
- io.ReadFull(rand.Reader, msg.Cookie[:])
+ io.ReadFull(t.config.Rand, msg.Cookie[:])
// We mutate the KexAlgos slice, in order to add the kex-strict extension algorithm,
// and possibly to add the ext-info extension algorithm. Since the slice may be the
@@ -552,26 +569,44 @@ func (t *handshakeTransport) sendKexInit() error {
return nil
}
+var errSendBannerPhase = errors.New("ssh: SendAuthBanner outside of authentication phase")
+
func (t *handshakeTransport) writePacket(p []byte) error {
+ t.mu.Lock()
+ defer t.mu.Unlock()
+
switch p[0] {
case msgKexInit:
return errors.New("ssh: only handshakeTransport can send kexInit")
case msgNewKeys:
return errors.New("ssh: only handshakeTransport can send newKeys")
+ case msgUserAuthBanner:
+ if t.userAuthComplete {
+ return errSendBannerPhase
+ }
+ case msgUserAuthSuccess:
+ t.userAuthComplete = true
}
- t.mu.Lock()
- defer t.mu.Unlock()
if t.writeError != nil {
return t.writeError
}
if t.sentInitMsg != nil {
- // Copy the packet so the writer can reuse the buffer.
- cp := make([]byte, len(p))
- copy(cp, p)
- t.pendingPackets = append(t.pendingPackets, cp)
- return nil
+ if len(t.pendingPackets) < maxPendingPackets {
+ // Copy the packet so the writer can reuse the buffer.
+ cp := make([]byte, len(p))
+ copy(cp, p)
+ t.pendingPackets = append(t.pendingPackets, cp)
+ return nil
+ }
+ for t.sentInitMsg != nil {
+ // Block and wait for KEX to complete or an error.
+ t.writeCond.Wait()
+ if t.writeError != nil {
+ return t.writeError
+ }
+ }
}
if t.writeBytesLeft > 0 {
@@ -588,6 +623,7 @@ func (t *handshakeTransport) writePacket(p []byte) error {
if err := t.pushPacket(p); err != nil {
t.writeError = err
+ t.writeCond.Broadcast()
}
return nil
diff --git a/vendor/golang.org/x/crypto/ssh/messages.go b/vendor/golang.org/x/crypto/ssh/messages.go
index b55f8605..118427bc 100644
--- a/vendor/golang.org/x/crypto/ssh/messages.go
+++ b/vendor/golang.org/x/crypto/ssh/messages.go
@@ -818,6 +818,8 @@ func decode(packet []byte) (interface{}, error) {
return new(userAuthSuccessMsg), nil
case msgUserAuthFailure:
msg = new(userAuthFailureMsg)
+ case msgUserAuthBanner:
+ msg = new(userAuthBannerMsg)
case msgUserAuthPubKeyOk:
msg = new(userAuthPubKeyOkMsg)
case msgGlobalRequest:
diff --git a/vendor/golang.org/x/crypto/ssh/server.go b/vendor/golang.org/x/crypto/ssh/server.go
index 5b5ccd96..1839ddc6 100644
--- a/vendor/golang.org/x/crypto/ssh/server.go
+++ b/vendor/golang.org/x/crypto/ssh/server.go
@@ -59,6 +59,27 @@ type GSSAPIWithMICConfig struct {
Server GSSAPIServer
}
+// SendAuthBanner implements [ServerPreAuthConn].
+func (s *connection) SendAuthBanner(msg string) error {
+ return s.transport.writePacket(Marshal(&userAuthBannerMsg{
+ Message: msg,
+ }))
+}
+
+func (*connection) unexportedMethodForFutureProofing() {}
+
+// ServerPreAuthConn is the interface available on an incoming server
+// connection before authentication has completed.
+type ServerPreAuthConn interface {
+ unexportedMethodForFutureProofing() // permits growing ServerPreAuthConn safely later, ala testing.TB
+
+ ConnMetadata
+
+ // SendAuthBanner sends a banner message to the client.
+ // It returns an error once the authentication phase has ended.
+ SendAuthBanner(string) error
+}
+
// ServerConfig holds server specific configuration data.
type ServerConfig struct {
// Config contains configuration shared between client and server.
@@ -118,6 +139,12 @@ type ServerConfig struct {
// attempts.
AuthLogCallback func(conn ConnMetadata, method string, err error)
+ // PreAuthConnCallback, if non-nil, is called upon receiving a new connection
+ // before any authentication has started. The provided ServerPreAuthConn
+ // can be used at any time before authentication is complete, including
+ // after this callback has returned.
+ PreAuthConnCallback func(ServerPreAuthConn)
+
// ServerVersion is the version identification string to announce in
// the public handshake.
// If empty, a reasonable default is used.
@@ -488,6 +515,10 @@ func (b *BannerError) Error() string {
}
func (s *connection) serverAuthenticate(config *ServerConfig) (*Permissions, error) {
+ if config.PreAuthConnCallback != nil {
+ config.PreAuthConnCallback(s)
+ }
+
sessionID := s.transport.getSessionID()
var cache pubKeyCache
var perms *Permissions
@@ -495,7 +526,7 @@ func (s *connection) serverAuthenticate(config *ServerConfig) (*Permissions, err
authFailures := 0
noneAuthCount := 0
var authErrs []error
- var displayedBanner bool
+ var calledBannerCallback bool
partialSuccessReturned := false
// Set the initial authentication callbacks from the config. They can be
// changed if a PartialSuccessError is returned.
@@ -542,14 +573,10 @@ userAuthLoop:
s.user = userAuthReq.User
- if !displayedBanner && config.BannerCallback != nil {
- displayedBanner = true
- msg := config.BannerCallback(s)
- if msg != "" {
- bannerMsg := &userAuthBannerMsg{
- Message: msg,
- }
- if err := s.transport.writePacket(Marshal(bannerMsg)); err != nil {
+ if !calledBannerCallback && config.BannerCallback != nil {
+ calledBannerCallback = true
+ if msg := config.BannerCallback(s); msg != "" {
+ if err := s.SendAuthBanner(msg); err != nil {
return nil, err
}
}
@@ -762,10 +789,7 @@ userAuthLoop:
var bannerErr *BannerError
if errors.As(authErr, &bannerErr) {
if bannerErr.Message != "" {
- bannerMsg := &userAuthBannerMsg{
- Message: bannerErr.Message,
- }
- if err := s.transport.writePacket(Marshal(bannerMsg)); err != nil {
+ if err := s.SendAuthBanner(bannerErr.Message); err != nil {
return nil, err
}
}
diff --git a/vendor/golang.org/x/crypto/ssh/tcpip.go b/vendor/golang.org/x/crypto/ssh/tcpip.go
index ef5059a1..93d844f0 100644
--- a/vendor/golang.org/x/crypto/ssh/tcpip.go
+++ b/vendor/golang.org/x/crypto/ssh/tcpip.go
@@ -459,7 +459,7 @@ func (c *Client) dial(laddr string, lport int, raddr string, rport int) (Channel
return nil, err
}
go DiscardRequests(in)
- return ch, err
+ return ch, nil
}
type tcpChan struct {
diff --git a/vendor/golang.org/x/exp/constraints/constraints.go b/vendor/golang.org/x/exp/constraints/constraints.go
index 2c033dff..9d260bab 100644
--- a/vendor/golang.org/x/exp/constraints/constraints.go
+++ b/vendor/golang.org/x/exp/constraints/constraints.go
@@ -6,6 +6,8 @@
// with type parameters.
package constraints
+import "cmp"
+
// Signed is a constraint that permits any signed integer type.
// If future releases of Go add new predeclared signed integer types,
// this constraint will be modified to include them.
@@ -45,6 +47,8 @@ type Complex interface {
// that supports the operators < <= >= >.
// If future releases of Go add new ordered types,
// this constraint will be modified to include them.
-type Ordered interface {
- Integer | Float | ~string
-}
+//
+// This type is redundant since Go 1.21 introduced [cmp.Ordered].
+//
+//go:fix inline
+type Ordered = cmp.Ordered
diff --git a/vendor/golang.org/x/exp/maps/maps.go b/vendor/golang.org/x/exp/maps/maps.go
index ecc0dabb..4a9747ef 100644
--- a/vendor/golang.org/x/exp/maps/maps.go
+++ b/vendor/golang.org/x/exp/maps/maps.go
@@ -5,9 +5,16 @@
// Package maps defines various functions useful with maps of any type.
package maps
+import "maps"
+
// Keys returns the keys of the map m.
// The keys will be in an indeterminate order.
+//
+// The simplest true equivalent using the standard library is:
+//
+// slices.AppendSeq(make([]K, 0, len(m)), maps.Keys(m))
func Keys[M ~map[K]V, K comparable, V any](m M) []K {
+
r := make([]K, 0, len(m))
for k := range m {
r = append(r, k)
@@ -17,7 +24,12 @@ func Keys[M ~map[K]V, K comparable, V any](m M) []K {
// Values returns the values of the map m.
// The values will be in an indeterminate order.
+//
+// The simplest true equivalent using the standard library is:
+//
+// slices.AppendSeq(make([]V, 0, len(m)), maps.Values(m))
func Values[M ~map[K]V, K comparable, V any](m M) []V {
+
r := make([]V, 0, len(m))
for _, v := range m {
r = append(r, v)
@@ -27,68 +39,48 @@ func Values[M ~map[K]V, K comparable, V any](m M) []V {
// Equal reports whether two maps contain the same key/value pairs.
// Values are compared using ==.
+//
+//go:fix inline
func Equal[M1, M2 ~map[K]V, K, V comparable](m1 M1, m2 M2) bool {
- if len(m1) != len(m2) {
- return false
- }
- for k, v1 := range m1 {
- if v2, ok := m2[k]; !ok || v1 != v2 {
- return false
- }
- }
- return true
+ return maps.Equal(m1, m2)
}
// EqualFunc is like Equal, but compares values using eq.
// Keys are still compared with ==.
+//
+//go:fix inline
func EqualFunc[M1 ~map[K]V1, M2 ~map[K]V2, K comparable, V1, V2 any](m1 M1, m2 M2, eq func(V1, V2) bool) bool {
- if len(m1) != len(m2) {
- return false
- }
- for k, v1 := range m1 {
- if v2, ok := m2[k]; !ok || !eq(v1, v2) {
- return false
- }
- }
- return true
+ return maps.EqualFunc(m1, m2, eq)
}
// Clear removes all entries from m, leaving it empty.
+//
+//go:fix inline
func Clear[M ~map[K]V, K comparable, V any](m M) {
- for k := range m {
- delete(m, k)
- }
+ clear(m)
}
// Clone returns a copy of m. This is a shallow clone:
// the new keys and values are set using ordinary assignment.
+//
+//go:fix inline
func Clone[M ~map[K]V, K comparable, V any](m M) M {
- // Preserve nil in case it matters.
- if m == nil {
- return nil
- }
- r := make(M, len(m))
- for k, v := range m {
- r[k] = v
- }
- return r
+ return maps.Clone(m)
}
// Copy copies all key/value pairs in src adding them to dst.
// When a key in src is already present in dst,
// the value in dst will be overwritten by the value associated
// with the key in src.
+//
+//go:fix inline
func Copy[M1 ~map[K]V, M2 ~map[K]V, K comparable, V any](dst M1, src M2) {
- for k, v := range src {
- dst[k] = v
- }
+ maps.Copy(dst, src)
}
// DeleteFunc deletes any key/value pairs from m for which del returns true.
+//
+//go:fix inline
func DeleteFunc[M ~map[K]V, K comparable, V any](m M, del func(K, V) bool) {
- for k, v := range m {
- if del(k, v) {
- delete(m, k)
- }
- }
+ maps.DeleteFunc(m, del)
}
diff --git a/vendor/golang.org/x/exp/slices/cmp.go b/vendor/golang.org/x/exp/slices/cmp.go
deleted file mode 100644
index fbf1934a..00000000
--- a/vendor/golang.org/x/exp/slices/cmp.go
+++ /dev/null
@@ -1,44 +0,0 @@
-// Copyright 2023 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package slices
-
-import "golang.org/x/exp/constraints"
-
-// min is a version of the predeclared function from the Go 1.21 release.
-func min[T constraints.Ordered](a, b T) T {
- if a < b || isNaN(a) {
- return a
- }
- return b
-}
-
-// max is a version of the predeclared function from the Go 1.21 release.
-func max[T constraints.Ordered](a, b T) T {
- if a > b || isNaN(a) {
- return a
- }
- return b
-}
-
-// cmpLess is a copy of cmp.Less from the Go 1.21 release.
-func cmpLess[T constraints.Ordered](x, y T) bool {
- return (isNaN(x) && !isNaN(y)) || x < y
-}
-
-// cmpCompare is a copy of cmp.Compare from the Go 1.21 release.
-func cmpCompare[T constraints.Ordered](x, y T) int {
- xNaN := isNaN(x)
- yNaN := isNaN(y)
- if xNaN && yNaN {
- return 0
- }
- if xNaN || x < y {
- return -1
- }
- if yNaN || x > y {
- return +1
- }
- return 0
-}
diff --git a/vendor/golang.org/x/exp/slices/slices.go b/vendor/golang.org/x/exp/slices/slices.go
index 46ceac34..da0df370 100644
--- a/vendor/golang.org/x/exp/slices/slices.go
+++ b/vendor/golang.org/x/exp/slices/slices.go
@@ -6,9 +6,8 @@
package slices
import (
- "unsafe"
-
- "golang.org/x/exp/constraints"
+ "cmp"
+ "slices"
)
// Equal reports whether two slices are equal: the same length and all
@@ -16,16 +15,10 @@ import (
// Otherwise, the elements are compared in increasing index order, and the
// comparison stops at the first unequal pair.
// Floating point NaNs are not considered equal.
+//
+//go:fix inline
func Equal[S ~[]E, E comparable](s1, s2 S) bool {
- if len(s1) != len(s2) {
- return false
- }
- for i := range s1 {
- if s1[i] != s2[i] {
- return false
- }
- }
- return true
+ return slices.Equal(s1, s2)
}
// EqualFunc reports whether two slices are equal using an equality
@@ -33,17 +26,10 @@ func Equal[S ~[]E, E comparable](s1, s2 S) bool {
// EqualFunc returns false. Otherwise, the elements are compared in
// increasing index order, and the comparison stops at the first index
// for which eq returns false.
+//
+//go:fix inline
func EqualFunc[S1 ~[]E1, S2 ~[]E2, E1, E2 any](s1 S1, s2 S2, eq func(E1, E2) bool) bool {
- if len(s1) != len(s2) {
- return false
- }
- for i, v1 := range s1 {
- v2 := s2[i]
- if !eq(v1, v2) {
- return false
- }
- }
- return true
+ return slices.EqualFunc(s1, s2, eq)
}
// Compare compares the elements of s1 and s2, using [cmp.Compare] on each pair
@@ -53,20 +39,10 @@ func EqualFunc[S1 ~[]E1, S2 ~[]E2, E1, E2 any](s1 S1, s2 S2, eq func(E1, E2) boo
// If both slices are equal until one of them ends, the shorter slice is
// considered less than the longer one.
// The result is 0 if s1 == s2, -1 if s1 < s2, and +1 if s1 > s2.
-func Compare[S ~[]E, E constraints.Ordered](s1, s2 S) int {
- for i, v1 := range s1 {
- if i >= len(s2) {
- return +1
- }
- v2 := s2[i]
- if c := cmpCompare(v1, v2); c != 0 {
- return c
- }
- }
- if len(s1) < len(s2) {
- return -1
- }
- return 0
+//
+//go:fix inline
+func Compare[S ~[]E, E cmp.Ordered](s1, s2 S) int {
+ return slices.Compare(s1, s2)
}
// CompareFunc is like [Compare] but uses a custom comparison function on each
@@ -74,53 +50,41 @@ func Compare[S ~[]E, E constraints.Ordered](s1, s2 S) int {
// The result is the first non-zero result of cmp; if cmp always
// returns 0 the result is 0 if len(s1) == len(s2), -1 if len(s1) < len(s2),
// and +1 if len(s1) > len(s2).
+//
+//go:fix inline
func CompareFunc[S1 ~[]E1, S2 ~[]E2, E1, E2 any](s1 S1, s2 S2, cmp func(E1, E2) int) int {
- for i, v1 := range s1 {
- if i >= len(s2) {
- return +1
- }
- v2 := s2[i]
- if c := cmp(v1, v2); c != 0 {
- return c
- }
- }
- if len(s1) < len(s2) {
- return -1
- }
- return 0
+ return slices.CompareFunc(s1, s2, cmp)
}
// Index returns the index of the first occurrence of v in s,
// or -1 if not present.
+//
+//go:fix inline
func Index[S ~[]E, E comparable](s S, v E) int {
- for i := range s {
- if v == s[i] {
- return i
- }
- }
- return -1
+ return slices.Index(s, v)
}
// IndexFunc returns the first index i satisfying f(s[i]),
// or -1 if none do.
+//
+//go:fix inline
func IndexFunc[S ~[]E, E any](s S, f func(E) bool) int {
- for i := range s {
- if f(s[i]) {
- return i
- }
- }
- return -1
+ return slices.IndexFunc(s, f)
}
// Contains reports whether v is present in s.
+//
+//go:fix inline
func Contains[S ~[]E, E comparable](s S, v E) bool {
- return Index(s, v) >= 0
+ return slices.Contains(s, v)
}
// ContainsFunc reports whether at least one
// element e of s satisfies f(e).
+//
+//go:fix inline
func ContainsFunc[S ~[]E, E any](s S, f func(E) bool) bool {
- return IndexFunc(s, f) >= 0
+ return slices.ContainsFunc(s, f)
}
// Insert inserts the values v... into s at index i,
@@ -130,93 +94,10 @@ func ContainsFunc[S ~[]E, E any](s S, f func(E) bool) bool {
// and r[i+len(v)] == value originally at r[i].
// Insert panics if i is out of range.
// This function is O(len(s) + len(v)).
+//
+//go:fix inline
func Insert[S ~[]E, E any](s S, i int, v ...E) S {
- m := len(v)
- if m == 0 {
- return s
- }
- n := len(s)
- if i == n {
- return append(s, v...)
- }
- if n+m > cap(s) {
- // Use append rather than make so that we bump the size of
- // the slice up to the next storage class.
- // This is what Grow does but we don't call Grow because
- // that might copy the values twice.
- s2 := append(s[:i], make(S, n+m-i)...)
- copy(s2[i:], v)
- copy(s2[i+m:], s[i:])
- return s2
- }
- s = s[:n+m]
-
- // before:
- // s: aaaaaaaabbbbccccccccdddd
- // ^ ^ ^ ^
- // i i+m n n+m
- // after:
- // s: aaaaaaaavvvvbbbbcccccccc
- // ^ ^ ^ ^
- // i i+m n n+m
- //
- // a are the values that don't move in s.
- // v are the values copied in from v.
- // b and c are the values from s that are shifted up in index.
- // d are the values that get overwritten, never to be seen again.
-
- if !overlaps(v, s[i+m:]) {
- // Easy case - v does not overlap either the c or d regions.
- // (It might be in some of a or b, or elsewhere entirely.)
- // The data we copy up doesn't write to v at all, so just do it.
-
- copy(s[i+m:], s[i:])
-
- // Now we have
- // s: aaaaaaaabbbbbbbbcccccccc
- // ^ ^ ^ ^
- // i i+m n n+m
- // Note the b values are duplicated.
-
- copy(s[i:], v)
-
- // Now we have
- // s: aaaaaaaavvvvbbbbcccccccc
- // ^ ^ ^ ^
- // i i+m n n+m
- // That's the result we want.
- return s
- }
-
- // The hard case - v overlaps c or d. We can't just shift up
- // the data because we'd move or clobber the values we're trying
- // to insert.
- // So instead, write v on top of d, then rotate.
- copy(s[n:], v)
-
- // Now we have
- // s: aaaaaaaabbbbccccccccvvvv
- // ^ ^ ^ ^
- // i i+m n n+m
-
- rotateRight(s[i:], m)
-
- // Now we have
- // s: aaaaaaaavvvvbbbbcccccccc
- // ^ ^ ^ ^
- // i i+m n n+m
- // That's the result we want.
- return s
-}
-
-// clearSlice sets all elements up to the length of s to the zero value of E.
-// We may use the builtin clear func instead, and remove clearSlice, when upgrading
-// to Go 1.21+.
-func clearSlice[S ~[]E, E any](s S) {
- var zero E
- for i := range s {
- s[i] = zero
- }
+ return slices.Insert(s, i, v...)
}
// Delete removes the elements s[i:j] from s, returning the modified slice.
@@ -224,136 +105,36 @@ func clearSlice[S ~[]E, E any](s S) {
// Delete is O(len(s)-i), so if many items must be deleted, it is better to
// make a single call deleting them all together than to delete one at a time.
// Delete zeroes the elements s[len(s)-(j-i):len(s)].
+//
+//go:fix inline
func Delete[S ~[]E, E any](s S, i, j int) S {
- _ = s[i:j:len(s)] // bounds check
-
- if i == j {
- return s
- }
-
- oldlen := len(s)
- s = append(s[:i], s[j:]...)
- clearSlice(s[len(s):oldlen]) // zero/nil out the obsolete elements, for GC
- return s
+ return slices.Delete(s, i, j)
}
// DeleteFunc removes any elements from s for which del returns true,
// returning the modified slice.
// DeleteFunc zeroes the elements between the new length and the original length.
+//
+//go:fix inline
func DeleteFunc[S ~[]E, E any](s S, del func(E) bool) S {
- i := IndexFunc(s, del)
- if i == -1 {
- return s
- }
- // Don't start copying elements until we find one to delete.
- for j := i + 1; j < len(s); j++ {
- if v := s[j]; !del(v) {
- s[i] = v
- i++
- }
- }
- clearSlice(s[i:]) // zero/nil out the obsolete elements, for GC
- return s[:i]
+ return slices.DeleteFunc(s, del)
}
// Replace replaces the elements s[i:j] by the given v, and returns the
// modified slice. Replace panics if s[i:j] is not a valid slice of s.
// When len(v) < (j-i), Replace zeroes the elements between the new length and the original length.
+//
+//go:fix inline
func Replace[S ~[]E, E any](s S, i, j int, v ...E) S {
- _ = s[i:j] // verify that i:j is a valid subslice
-
- if i == j {
- return Insert(s, i, v...)
- }
- if j == len(s) {
- return append(s[:i], v...)
- }
-
- tot := len(s[:i]) + len(v) + len(s[j:])
- if tot > cap(s) {
- // Too big to fit, allocate and copy over.
- s2 := append(s[:i], make(S, tot-i)...) // See Insert
- copy(s2[i:], v)
- copy(s2[i+len(v):], s[j:])
- return s2
- }
-
- r := s[:tot]
-
- if i+len(v) <= j {
- // Easy, as v fits in the deleted portion.
- copy(r[i:], v)
- if i+len(v) != j {
- copy(r[i+len(v):], s[j:])
- }
- clearSlice(s[tot:]) // zero/nil out the obsolete elements, for GC
- return r
- }
-
- // We are expanding (v is bigger than j-i).
- // The situation is something like this:
- // (example has i=4,j=8,len(s)=16,len(v)=6)
- // s: aaaaxxxxbbbbbbbbyy
- // ^ ^ ^ ^
- // i j len(s) tot
- // a: prefix of s
- // x: deleted range
- // b: more of s
- // y: area to expand into
-
- if !overlaps(r[i+len(v):], v) {
- // Easy, as v is not clobbered by the first copy.
- copy(r[i+len(v):], s[j:])
- copy(r[i:], v)
- return r
- }
-
- // This is a situation where we don't have a single place to which
- // we can copy v. Parts of it need to go to two different places.
- // We want to copy the prefix of v into y and the suffix into x, then
- // rotate |y| spots to the right.
- //
- // v[2:] v[:2]
- // | |
- // s: aaaavvvvbbbbbbbbvv
- // ^ ^ ^ ^
- // i j len(s) tot
- //
- // If either of those two destinations don't alias v, then we're good.
- y := len(v) - (j - i) // length of y portion
-
- if !overlaps(r[i:j], v) {
- copy(r[i:j], v[y:])
- copy(r[len(s):], v[:y])
- rotateRight(r[i:], y)
- return r
- }
- if !overlaps(r[len(s):], v) {
- copy(r[len(s):], v[:y])
- copy(r[i:j], v[y:])
- rotateRight(r[i:], y)
- return r
- }
-
- // Now we know that v overlaps both x and y.
- // That means that the entirety of b is *inside* v.
- // So we don't need to preserve b at all; instead we
- // can copy v first, then copy the b part of v out of
- // v to the right destination.
- k := startIdx(v, s[j:])
- copy(r[i:], v)
- copy(r[i+len(v):], r[i+k:])
- return r
+ return slices.Replace(s, i, j, v...)
}
// Clone returns a copy of the slice.
// The elements are copied using assignment, so this is a shallow clone.
+//
+//go:fix inline
func Clone[S ~[]E, E any](s S) S {
- // Preserve nil in case it matters.
- if s == nil {
- return nil
- }
- return append(S([]E{}), s...)
+ return slices.Clone(s)
}
// Compact replaces consecutive runs of equal elements with a single copy.
@@ -361,155 +142,41 @@ func Clone[S ~[]E, E any](s S) S {
// Compact modifies the contents of the slice s and returns the modified slice,
// which may have a smaller length.
// Compact zeroes the elements between the new length and the original length.
+//
+//go:fix inline
func Compact[S ~[]E, E comparable](s S) S {
- if len(s) < 2 {
- return s
- }
- i := 1
- for k := 1; k < len(s); k++ {
- if s[k] != s[k-1] {
- if i != k {
- s[i] = s[k]
- }
- i++
- }
- }
- clearSlice(s[i:]) // zero/nil out the obsolete elements, for GC
- return s[:i]
+ return slices.Compact(s)
}
// CompactFunc is like [Compact] but uses an equality function to compare elements.
// For runs of elements that compare equal, CompactFunc keeps the first one.
// CompactFunc zeroes the elements between the new length and the original length.
+//
+//go:fix inline
func CompactFunc[S ~[]E, E any](s S, eq func(E, E) bool) S {
- if len(s) < 2 {
- return s
- }
- i := 1
- for k := 1; k < len(s); k++ {
- if !eq(s[k], s[k-1]) {
- if i != k {
- s[i] = s[k]
- }
- i++
- }
- }
- clearSlice(s[i:]) // zero/nil out the obsolete elements, for GC
- return s[:i]
+ return slices.CompactFunc(s, eq)
}
// Grow increases the slice's capacity, if necessary, to guarantee space for
// another n elements. After Grow(n), at least n elements can be appended
// to the slice without another allocation. If n is negative or too large to
// allocate the memory, Grow panics.
+//
+//go:fix inline
func Grow[S ~[]E, E any](s S, n int) S {
- if n < 0 {
- panic("cannot be negative")
- }
- if n -= cap(s) - len(s); n > 0 {
- // TODO(https://go.dev/issue/53888): Make using []E instead of S
- // to workaround a compiler bug where the runtime.growslice optimization
- // does not take effect. Revert when the compiler is fixed.
- s = append([]E(s)[:cap(s)], make([]E, n)...)[:len(s)]
- }
- return s
+ return slices.Grow(s, n)
}
// Clip removes unused capacity from the slice, returning s[:len(s):len(s)].
+//
+//go:fix inline
func Clip[S ~[]E, E any](s S) S {
- return s[:len(s):len(s)]
-}
-
-// Rotation algorithm explanation:
-//
-// rotate left by 2
-// start with
-// 0123456789
-// split up like this
-// 01 234567 89
-// swap first 2 and last 2
-// 89 234567 01
-// join first parts
-// 89234567 01
-// recursively rotate first left part by 2
-// 23456789 01
-// join at the end
-// 2345678901
-//
-// rotate left by 8
-// start with
-// 0123456789
-// split up like this
-// 01 234567 89
-// swap first 2 and last 2
-// 89 234567 01
-// join last parts
-// 89 23456701
-// recursively rotate second part left by 6
-// 89 01234567
-// join at the end
-// 8901234567
-
-// TODO: There are other rotate algorithms.
-// This algorithm has the desirable property that it moves each element exactly twice.
-// The triple-reverse algorithm is simpler and more cache friendly, but takes more writes.
-// The follow-cycles algorithm can be 1-write but it is not very cache friendly.
-
-// rotateLeft rotates b left by n spaces.
-// s_final[i] = s_orig[i+r], wrapping around.
-func rotateLeft[E any](s []E, r int) {
- for r != 0 && r != len(s) {
- if r*2 <= len(s) {
- swap(s[:r], s[len(s)-r:])
- s = s[:len(s)-r]
- } else {
- swap(s[:len(s)-r], s[r:])
- s, r = s[len(s)-r:], r*2-len(s)
- }
- }
-}
-func rotateRight[E any](s []E, r int) {
- rotateLeft(s, len(s)-r)
-}
-
-// swap swaps the contents of x and y. x and y must be equal length and disjoint.
-func swap[E any](x, y []E) {
- for i := 0; i < len(x); i++ {
- x[i], y[i] = y[i], x[i]
- }
-}
-
-// overlaps reports whether the memory ranges a[0:len(a)] and b[0:len(b)] overlap.
-func overlaps[E any](a, b []E) bool {
- if len(a) == 0 || len(b) == 0 {
- return false
- }
- elemSize := unsafe.Sizeof(a[0])
- if elemSize == 0 {
- return false
- }
- // TODO: use a runtime/unsafe facility once one becomes available. See issue 12445.
- // Also see crypto/internal/alias/alias.go:AnyOverlap
- return uintptr(unsafe.Pointer(&a[0])) <= uintptr(unsafe.Pointer(&b[len(b)-1]))+(elemSize-1) &&
- uintptr(unsafe.Pointer(&b[0])) <= uintptr(unsafe.Pointer(&a[len(a)-1]))+(elemSize-1)
-}
-
-// startIdx returns the index in haystack where the needle starts.
-// prerequisite: the needle must be aliased entirely inside the haystack.
-func startIdx[E any](haystack, needle []E) int {
- p := &needle[0]
- for i := range haystack {
- if p == &haystack[i] {
- return i
- }
- }
- // TODO: what if the overlap is by a non-integral number of Es?
- panic("needle not found")
+ return slices.Clip(s)
}
// Reverse reverses the elements of the slice in place.
+//
+//go:fix inline
func Reverse[S ~[]E, E any](s S) {
- for i, j := 0, len(s)-1; i < j; i, j = i+1, j-1 {
- s[i], s[j] = s[j], s[i]
- }
+ slices.Reverse(s)
}
diff --git a/vendor/golang.org/x/exp/slices/sort.go b/vendor/golang.org/x/exp/slices/sort.go
index f58bbc7b..bd91a8d4 100644
--- a/vendor/golang.org/x/exp/slices/sort.go
+++ b/vendor/golang.org/x/exp/slices/sort.go
@@ -2,21 +2,19 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-//go:generate go run $GOROOT/src/sort/gen_sort_variants.go -exp
-
package slices
import (
- "math/bits"
-
- "golang.org/x/exp/constraints"
+ "cmp"
+ "slices"
)
// Sort sorts a slice of any ordered type in ascending order.
// When sorting floating-point numbers, NaNs are ordered before other values.
-func Sort[S ~[]E, E constraints.Ordered](x S) {
- n := len(x)
- pdqsortOrdered(x, 0, n, bits.Len(uint(n)))
+//
+//go:fix inline
+func Sort[S ~[]E, E cmp.Ordered](x S) {
+ slices.Sort(x)
}
// SortFunc sorts the slice x in ascending order as determined by the cmp
@@ -28,119 +26,79 @@ func Sort[S ~[]E, E constraints.Ordered](x S) {
// SortFunc requires that cmp is a strict weak ordering.
// See https://en.wikipedia.org/wiki/Weak_ordering#Strict_weak_orderings.
// To indicate 'uncomparable', return 0 from the function.
+//
+//go:fix inline
func SortFunc[S ~[]E, E any](x S, cmp func(a, b E) int) {
- n := len(x)
- pdqsortCmpFunc(x, 0, n, bits.Len(uint(n)), cmp)
+ slices.SortFunc(x, cmp)
}
// SortStableFunc sorts the slice x while keeping the original order of equal
// elements, using cmp to compare elements in the same way as [SortFunc].
+//
+//go:fix inline
func SortStableFunc[S ~[]E, E any](x S, cmp func(a, b E) int) {
- stableCmpFunc(x, len(x), cmp)
+ slices.SortStableFunc(x, cmp)
}
// IsSorted reports whether x is sorted in ascending order.
-func IsSorted[S ~[]E, E constraints.Ordered](x S) bool {
- for i := len(x) - 1; i > 0; i-- {
- if cmpLess(x[i], x[i-1]) {
- return false
- }
- }
- return true
+//
+//go:fix inline
+func IsSorted[S ~[]E, E cmp.Ordered](x S) bool {
+ return slices.IsSorted(x)
}
// IsSortedFunc reports whether x is sorted in ascending order, with cmp as the
// comparison function as defined by [SortFunc].
+//
+//go:fix inline
func IsSortedFunc[S ~[]E, E any](x S, cmp func(a, b E) int) bool {
- for i := len(x) - 1; i > 0; i-- {
- if cmp(x[i], x[i-1]) < 0 {
- return false
- }
- }
- return true
+ return slices.IsSortedFunc(x, cmp)
}
// Min returns the minimal value in x. It panics if x is empty.
// For floating-point numbers, Min propagates NaNs (any NaN value in x
// forces the output to be NaN).
-func Min[S ~[]E, E constraints.Ordered](x S) E {
- if len(x) < 1 {
- panic("slices.Min: empty list")
- }
- m := x[0]
- for i := 1; i < len(x); i++ {
- m = min(m, x[i])
- }
- return m
+//
+//go:fix inline
+func Min[S ~[]E, E cmp.Ordered](x S) E {
+ return slices.Min(x)
}
// MinFunc returns the minimal value in x, using cmp to compare elements.
// It panics if x is empty. If there is more than one minimal element
// according to the cmp function, MinFunc returns the first one.
+//
+//go:fix inline
func MinFunc[S ~[]E, E any](x S, cmp func(a, b E) int) E {
- if len(x) < 1 {
- panic("slices.MinFunc: empty list")
- }
- m := x[0]
- for i := 1; i < len(x); i++ {
- if cmp(x[i], m) < 0 {
- m = x[i]
- }
- }
- return m
+ return slices.MinFunc(x, cmp)
}
// Max returns the maximal value in x. It panics if x is empty.
// For floating-point E, Max propagates NaNs (any NaN value in x
// forces the output to be NaN).
-func Max[S ~[]E, E constraints.Ordered](x S) E {
- if len(x) < 1 {
- panic("slices.Max: empty list")
- }
- m := x[0]
- for i := 1; i < len(x); i++ {
- m = max(m, x[i])
- }
- return m
+//
+//go:fix inline
+func Max[S ~[]E, E cmp.Ordered](x S) E {
+ return slices.Max(x)
}
// MaxFunc returns the maximal value in x, using cmp to compare elements.
// It panics if x is empty. If there is more than one maximal element
// according to the cmp function, MaxFunc returns the first one.
+//
+//go:fix inline
func MaxFunc[S ~[]E, E any](x S, cmp func(a, b E) int) E {
- if len(x) < 1 {
- panic("slices.MaxFunc: empty list")
- }
- m := x[0]
- for i := 1; i < len(x); i++ {
- if cmp(x[i], m) > 0 {
- m = x[i]
- }
- }
- return m
+ return slices.MaxFunc(x, cmp)
}
// BinarySearch searches for target in a sorted slice and returns the position
// where target is found, or the position where target would appear in the
// sort order; it also returns a bool saying whether the target is really found
// in the slice. The slice must be sorted in increasing order.
-func BinarySearch[S ~[]E, E constraints.Ordered](x S, target E) (int, bool) {
- // Inlining is faster than calling BinarySearchFunc with a lambda.
- n := len(x)
- // Define x[-1] < target and x[n] >= target.
- // Invariant: x[i-1] < target, x[j] >= target.
- i, j := 0, n
- for i < j {
- h := int(uint(i+j) >> 1) // avoid overflow when computing h
- // i ≤ h < j
- if cmpLess(x[h], target) {
- i = h + 1 // preserves x[i-1] < target
- } else {
- j = h // preserves x[j] >= target
- }
- }
- // i == j, x[i-1] < target, and x[j] (= x[i]) >= target => answer is i.
- return i, i < n && (x[i] == target || (isNaN(x[i]) && isNaN(target)))
+//
+//go:fix inline
+func BinarySearch[S ~[]E, E cmp.Ordered](x S, target E) (int, bool) {
+ return slices.BinarySearch(x, target)
}
// BinarySearchFunc works like [BinarySearch], but uses a custom comparison
@@ -150,48 +108,8 @@ func BinarySearch[S ~[]E, E constraints.Ordered](x S, target E) (int, bool) {
// or a positive number if the slice element follows the target.
// cmp must implement the same ordering as the slice, such that if
// cmp(a, t) < 0 and cmp(b, t) >= 0, then a must precede b in the slice.
+//
+//go:fix inline
func BinarySearchFunc[S ~[]E, E, T any](x S, target T, cmp func(E, T) int) (int, bool) {
- n := len(x)
- // Define cmp(x[-1], target) < 0 and cmp(x[n], target) >= 0 .
- // Invariant: cmp(x[i - 1], target) < 0, cmp(x[j], target) >= 0.
- i, j := 0, n
- for i < j {
- h := int(uint(i+j) >> 1) // avoid overflow when computing h
- // i ≤ h < j
- if cmp(x[h], target) < 0 {
- i = h + 1 // preserves cmp(x[i - 1], target) < 0
- } else {
- j = h // preserves cmp(x[j], target) >= 0
- }
- }
- // i == j, cmp(x[i-1], target) < 0, and cmp(x[j], target) (= cmp(x[i], target)) >= 0 => answer is i.
- return i, i < n && cmp(x[i], target) == 0
-}
-
-type sortedHint int // hint for pdqsort when choosing the pivot
-
-const (
- unknownHint sortedHint = iota
- increasingHint
- decreasingHint
-)
-
-// xorshift paper: https://www.jstatsoft.org/article/view/v008i14/xorshift.pdf
-type xorshift uint64
-
-func (r *xorshift) Next() uint64 {
- *r ^= *r << 13
- *r ^= *r >> 17
- *r ^= *r << 5
- return uint64(*r)
-}
-
-func nextPowerOfTwo(length int) uint {
- return 1 << bits.Len(uint(length))
-}
-
-// isNaN reports whether x is a NaN without requiring the math package.
-// This will always return false if T is not floating-point.
-func isNaN[T constraints.Ordered](x T) bool {
- return x != x
+ return slices.BinarySearchFunc(x, target, cmp)
}
diff --git a/vendor/golang.org/x/exp/slices/zsortanyfunc.go b/vendor/golang.org/x/exp/slices/zsortanyfunc.go
deleted file mode 100644
index 06f2c7a2..00000000
--- a/vendor/golang.org/x/exp/slices/zsortanyfunc.go
+++ /dev/null
@@ -1,479 +0,0 @@
-// Code generated by gen_sort_variants.go; DO NOT EDIT.
-
-// Copyright 2022 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package slices
-
-// insertionSortCmpFunc sorts data[a:b] using insertion sort.
-func insertionSortCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) {
- for i := a + 1; i < b; i++ {
- for j := i; j > a && (cmp(data[j], data[j-1]) < 0); j-- {
- data[j], data[j-1] = data[j-1], data[j]
- }
- }
-}
-
-// siftDownCmpFunc implements the heap property on data[lo:hi].
-// first is an offset into the array where the root of the heap lies.
-func siftDownCmpFunc[E any](data []E, lo, hi, first int, cmp func(a, b E) int) {
- root := lo
- for {
- child := 2*root + 1
- if child >= hi {
- break
- }
- if child+1 < hi && (cmp(data[first+child], data[first+child+1]) < 0) {
- child++
- }
- if !(cmp(data[first+root], data[first+child]) < 0) {
- return
- }
- data[first+root], data[first+child] = data[first+child], data[first+root]
- root = child
- }
-}
-
-func heapSortCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) {
- first := a
- lo := 0
- hi := b - a
-
- // Build heap with greatest element at top.
- for i := (hi - 1) / 2; i >= 0; i-- {
- siftDownCmpFunc(data, i, hi, first, cmp)
- }
-
- // Pop elements, largest first, into end of data.
- for i := hi - 1; i >= 0; i-- {
- data[first], data[first+i] = data[first+i], data[first]
- siftDownCmpFunc(data, lo, i, first, cmp)
- }
-}
-
-// pdqsortCmpFunc sorts data[a:b].
-// The algorithm based on pattern-defeating quicksort(pdqsort), but without the optimizations from BlockQuicksort.
-// pdqsort paper: https://arxiv.org/pdf/2106.05123.pdf
-// C++ implementation: https://github.com/orlp/pdqsort
-// Rust implementation: https://docs.rs/pdqsort/latest/pdqsort/
-// limit is the number of allowed bad (very unbalanced) pivots before falling back to heapsort.
-func pdqsortCmpFunc[E any](data []E, a, b, limit int, cmp func(a, b E) int) {
- const maxInsertion = 12
-
- var (
- wasBalanced = true // whether the last partitioning was reasonably balanced
- wasPartitioned = true // whether the slice was already partitioned
- )
-
- for {
- length := b - a
-
- if length <= maxInsertion {
- insertionSortCmpFunc(data, a, b, cmp)
- return
- }
-
- // Fall back to heapsort if too many bad choices were made.
- if limit == 0 {
- heapSortCmpFunc(data, a, b, cmp)
- return
- }
-
- // If the last partitioning was imbalanced, we need to breaking patterns.
- if !wasBalanced {
- breakPatternsCmpFunc(data, a, b, cmp)
- limit--
- }
-
- pivot, hint := choosePivotCmpFunc(data, a, b, cmp)
- if hint == decreasingHint {
- reverseRangeCmpFunc(data, a, b, cmp)
- // The chosen pivot was pivot-a elements after the start of the array.
- // After reversing it is pivot-a elements before the end of the array.
- // The idea came from Rust's implementation.
- pivot = (b - 1) - (pivot - a)
- hint = increasingHint
- }
-
- // The slice is likely already sorted.
- if wasBalanced && wasPartitioned && hint == increasingHint {
- if partialInsertionSortCmpFunc(data, a, b, cmp) {
- return
- }
- }
-
- // Probably the slice contains many duplicate elements, partition the slice into
- // elements equal to and elements greater than the pivot.
- if a > 0 && !(cmp(data[a-1], data[pivot]) < 0) {
- mid := partitionEqualCmpFunc(data, a, b, pivot, cmp)
- a = mid
- continue
- }
-
- mid, alreadyPartitioned := partitionCmpFunc(data, a, b, pivot, cmp)
- wasPartitioned = alreadyPartitioned
-
- leftLen, rightLen := mid-a, b-mid
- balanceThreshold := length / 8
- if leftLen < rightLen {
- wasBalanced = leftLen >= balanceThreshold
- pdqsortCmpFunc(data, a, mid, limit, cmp)
- a = mid + 1
- } else {
- wasBalanced = rightLen >= balanceThreshold
- pdqsortCmpFunc(data, mid+1, b, limit, cmp)
- b = mid
- }
- }
-}
-
-// partitionCmpFunc does one quicksort partition.
-// Let p = data[pivot]
-// Moves elements in data[a:b] around, so that data[i]=p for inewpivot.
-// On return, data[newpivot] = p
-func partitionCmpFunc[E any](data []E, a, b, pivot int, cmp func(a, b E) int) (newpivot int, alreadyPartitioned bool) {
- data[a], data[pivot] = data[pivot], data[a]
- i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned
-
- for i <= j && (cmp(data[i], data[a]) < 0) {
- i++
- }
- for i <= j && !(cmp(data[j], data[a]) < 0) {
- j--
- }
- if i > j {
- data[j], data[a] = data[a], data[j]
- return j, true
- }
- data[i], data[j] = data[j], data[i]
- i++
- j--
-
- for {
- for i <= j && (cmp(data[i], data[a]) < 0) {
- i++
- }
- for i <= j && !(cmp(data[j], data[a]) < 0) {
- j--
- }
- if i > j {
- break
- }
- data[i], data[j] = data[j], data[i]
- i++
- j--
- }
- data[j], data[a] = data[a], data[j]
- return j, false
-}
-
-// partitionEqualCmpFunc partitions data[a:b] into elements equal to data[pivot] followed by elements greater than data[pivot].
-// It assumed that data[a:b] does not contain elements smaller than the data[pivot].
-func partitionEqualCmpFunc[E any](data []E, a, b, pivot int, cmp func(a, b E) int) (newpivot int) {
- data[a], data[pivot] = data[pivot], data[a]
- i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned
-
- for {
- for i <= j && !(cmp(data[a], data[i]) < 0) {
- i++
- }
- for i <= j && (cmp(data[a], data[j]) < 0) {
- j--
- }
- if i > j {
- break
- }
- data[i], data[j] = data[j], data[i]
- i++
- j--
- }
- return i
-}
-
-// partialInsertionSortCmpFunc partially sorts a slice, returns true if the slice is sorted at the end.
-func partialInsertionSortCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) bool {
- const (
- maxSteps = 5 // maximum number of adjacent out-of-order pairs that will get shifted
- shortestShifting = 50 // don't shift any elements on short arrays
- )
- i := a + 1
- for j := 0; j < maxSteps; j++ {
- for i < b && !(cmp(data[i], data[i-1]) < 0) {
- i++
- }
-
- if i == b {
- return true
- }
-
- if b-a < shortestShifting {
- return false
- }
-
- data[i], data[i-1] = data[i-1], data[i]
-
- // Shift the smaller one to the left.
- if i-a >= 2 {
- for j := i - 1; j >= 1; j-- {
- if !(cmp(data[j], data[j-1]) < 0) {
- break
- }
- data[j], data[j-1] = data[j-1], data[j]
- }
- }
- // Shift the greater one to the right.
- if b-i >= 2 {
- for j := i + 1; j < b; j++ {
- if !(cmp(data[j], data[j-1]) < 0) {
- break
- }
- data[j], data[j-1] = data[j-1], data[j]
- }
- }
- }
- return false
-}
-
-// breakPatternsCmpFunc scatters some elements around in an attempt to break some patterns
-// that might cause imbalanced partitions in quicksort.
-func breakPatternsCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) {
- length := b - a
- if length >= 8 {
- random := xorshift(length)
- modulus := nextPowerOfTwo(length)
-
- for idx := a + (length/4)*2 - 1; idx <= a+(length/4)*2+1; idx++ {
- other := int(uint(random.Next()) & (modulus - 1))
- if other >= length {
- other -= length
- }
- data[idx], data[a+other] = data[a+other], data[idx]
- }
- }
-}
-
-// choosePivotCmpFunc chooses a pivot in data[a:b].
-//
-// [0,8): chooses a static pivot.
-// [8,shortestNinther): uses the simple median-of-three method.
-// [shortestNinther,∞): uses the Tukey ninther method.
-func choosePivotCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) (pivot int, hint sortedHint) {
- const (
- shortestNinther = 50
- maxSwaps = 4 * 3
- )
-
- l := b - a
-
- var (
- swaps int
- i = a + l/4*1
- j = a + l/4*2
- k = a + l/4*3
- )
-
- if l >= 8 {
- if l >= shortestNinther {
- // Tukey ninther method, the idea came from Rust's implementation.
- i = medianAdjacentCmpFunc(data, i, &swaps, cmp)
- j = medianAdjacentCmpFunc(data, j, &swaps, cmp)
- k = medianAdjacentCmpFunc(data, k, &swaps, cmp)
- }
- // Find the median among i, j, k and stores it into j.
- j = medianCmpFunc(data, i, j, k, &swaps, cmp)
- }
-
- switch swaps {
- case 0:
- return j, increasingHint
- case maxSwaps:
- return j, decreasingHint
- default:
- return j, unknownHint
- }
-}
-
-// order2CmpFunc returns x,y where data[x] <= data[y], where x,y=a,b or x,y=b,a.
-func order2CmpFunc[E any](data []E, a, b int, swaps *int, cmp func(a, b E) int) (int, int) {
- if cmp(data[b], data[a]) < 0 {
- *swaps++
- return b, a
- }
- return a, b
-}
-
-// medianCmpFunc returns x where data[x] is the median of data[a],data[b],data[c], where x is a, b, or c.
-func medianCmpFunc[E any](data []E, a, b, c int, swaps *int, cmp func(a, b E) int) int {
- a, b = order2CmpFunc(data, a, b, swaps, cmp)
- b, c = order2CmpFunc(data, b, c, swaps, cmp)
- a, b = order2CmpFunc(data, a, b, swaps, cmp)
- return b
-}
-
-// medianAdjacentCmpFunc finds the median of data[a - 1], data[a], data[a + 1] and stores the index into a.
-func medianAdjacentCmpFunc[E any](data []E, a int, swaps *int, cmp func(a, b E) int) int {
- return medianCmpFunc(data, a-1, a, a+1, swaps, cmp)
-}
-
-func reverseRangeCmpFunc[E any](data []E, a, b int, cmp func(a, b E) int) {
- i := a
- j := b - 1
- for i < j {
- data[i], data[j] = data[j], data[i]
- i++
- j--
- }
-}
-
-func swapRangeCmpFunc[E any](data []E, a, b, n int, cmp func(a, b E) int) {
- for i := 0; i < n; i++ {
- data[a+i], data[b+i] = data[b+i], data[a+i]
- }
-}
-
-func stableCmpFunc[E any](data []E, n int, cmp func(a, b E) int) {
- blockSize := 20 // must be > 0
- a, b := 0, blockSize
- for b <= n {
- insertionSortCmpFunc(data, a, b, cmp)
- a = b
- b += blockSize
- }
- insertionSortCmpFunc(data, a, n, cmp)
-
- for blockSize < n {
- a, b = 0, 2*blockSize
- for b <= n {
- symMergeCmpFunc(data, a, a+blockSize, b, cmp)
- a = b
- b += 2 * blockSize
- }
- if m := a + blockSize; m < n {
- symMergeCmpFunc(data, a, m, n, cmp)
- }
- blockSize *= 2
- }
-}
-
-// symMergeCmpFunc merges the two sorted subsequences data[a:m] and data[m:b] using
-// the SymMerge algorithm from Pok-Son Kim and Arne Kutzner, "Stable Minimum
-// Storage Merging by Symmetric Comparisons", in Susanne Albers and Tomasz
-// Radzik, editors, Algorithms - ESA 2004, volume 3221 of Lecture Notes in
-// Computer Science, pages 714-723. Springer, 2004.
-//
-// Let M = m-a and N = b-n. Wolog M < N.
-// The recursion depth is bound by ceil(log(N+M)).
-// The algorithm needs O(M*log(N/M + 1)) calls to data.Less.
-// The algorithm needs O((M+N)*log(M)) calls to data.Swap.
-//
-// The paper gives O((M+N)*log(M)) as the number of assignments assuming a
-// rotation algorithm which uses O(M+N+gcd(M+N)) assignments. The argumentation
-// in the paper carries through for Swap operations, especially as the block
-// swapping rotate uses only O(M+N) Swaps.
-//
-// symMerge assumes non-degenerate arguments: a < m && m < b.
-// Having the caller check this condition eliminates many leaf recursion calls,
-// which improves performance.
-func symMergeCmpFunc[E any](data []E, a, m, b int, cmp func(a, b E) int) {
- // Avoid unnecessary recursions of symMerge
- // by direct insertion of data[a] into data[m:b]
- // if data[a:m] only contains one element.
- if m-a == 1 {
- // Use binary search to find the lowest index i
- // such that data[i] >= data[a] for m <= i < b.
- // Exit the search loop with i == b in case no such index exists.
- i := m
- j := b
- for i < j {
- h := int(uint(i+j) >> 1)
- if cmp(data[h], data[a]) < 0 {
- i = h + 1
- } else {
- j = h
- }
- }
- // Swap values until data[a] reaches the position before i.
- for k := a; k < i-1; k++ {
- data[k], data[k+1] = data[k+1], data[k]
- }
- return
- }
-
- // Avoid unnecessary recursions of symMerge
- // by direct insertion of data[m] into data[a:m]
- // if data[m:b] only contains one element.
- if b-m == 1 {
- // Use binary search to find the lowest index i
- // such that data[i] > data[m] for a <= i < m.
- // Exit the search loop with i == m in case no such index exists.
- i := a
- j := m
- for i < j {
- h := int(uint(i+j) >> 1)
- if !(cmp(data[m], data[h]) < 0) {
- i = h + 1
- } else {
- j = h
- }
- }
- // Swap values until data[m] reaches the position i.
- for k := m; k > i; k-- {
- data[k], data[k-1] = data[k-1], data[k]
- }
- return
- }
-
- mid := int(uint(a+b) >> 1)
- n := mid + m
- var start, r int
- if m > mid {
- start = n - b
- r = mid
- } else {
- start = a
- r = m
- }
- p := n - 1
-
- for start < r {
- c := int(uint(start+r) >> 1)
- if !(cmp(data[p-c], data[c]) < 0) {
- start = c + 1
- } else {
- r = c
- }
- }
-
- end := n - start
- if start < m && m < end {
- rotateCmpFunc(data, start, m, end, cmp)
- }
- if a < start && start < mid {
- symMergeCmpFunc(data, a, start, mid, cmp)
- }
- if mid < end && end < b {
- symMergeCmpFunc(data, mid, end, b, cmp)
- }
-}
-
-// rotateCmpFunc rotates two consecutive blocks u = data[a:m] and v = data[m:b] in data:
-// Data of the form 'x u v y' is changed to 'x v u y'.
-// rotate performs at most b-a many calls to data.Swap,
-// and it assumes non-degenerate arguments: a < m && m < b.
-func rotateCmpFunc[E any](data []E, a, m, b int, cmp func(a, b E) int) {
- i := m - a
- j := b - m
-
- for i != j {
- if i > j {
- swapRangeCmpFunc(data, m-i, m, j, cmp)
- i -= j
- } else {
- swapRangeCmpFunc(data, m-i, m+j-i, i, cmp)
- j -= i
- }
- }
- // i == j
- swapRangeCmpFunc(data, m-i, m, i, cmp)
-}
diff --git a/vendor/golang.org/x/exp/slices/zsortordered.go b/vendor/golang.org/x/exp/slices/zsortordered.go
deleted file mode 100644
index 99b47c39..00000000
--- a/vendor/golang.org/x/exp/slices/zsortordered.go
+++ /dev/null
@@ -1,481 +0,0 @@
-// Code generated by gen_sort_variants.go; DO NOT EDIT.
-
-// Copyright 2022 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package slices
-
-import "golang.org/x/exp/constraints"
-
-// insertionSortOrdered sorts data[a:b] using insertion sort.
-func insertionSortOrdered[E constraints.Ordered](data []E, a, b int) {
- for i := a + 1; i < b; i++ {
- for j := i; j > a && cmpLess(data[j], data[j-1]); j-- {
- data[j], data[j-1] = data[j-1], data[j]
- }
- }
-}
-
-// siftDownOrdered implements the heap property on data[lo:hi].
-// first is an offset into the array where the root of the heap lies.
-func siftDownOrdered[E constraints.Ordered](data []E, lo, hi, first int) {
- root := lo
- for {
- child := 2*root + 1
- if child >= hi {
- break
- }
- if child+1 < hi && cmpLess(data[first+child], data[first+child+1]) {
- child++
- }
- if !cmpLess(data[first+root], data[first+child]) {
- return
- }
- data[first+root], data[first+child] = data[first+child], data[first+root]
- root = child
- }
-}
-
-func heapSortOrdered[E constraints.Ordered](data []E, a, b int) {
- first := a
- lo := 0
- hi := b - a
-
- // Build heap with greatest element at top.
- for i := (hi - 1) / 2; i >= 0; i-- {
- siftDownOrdered(data, i, hi, first)
- }
-
- // Pop elements, largest first, into end of data.
- for i := hi - 1; i >= 0; i-- {
- data[first], data[first+i] = data[first+i], data[first]
- siftDownOrdered(data, lo, i, first)
- }
-}
-
-// pdqsortOrdered sorts data[a:b].
-// The algorithm based on pattern-defeating quicksort(pdqsort), but without the optimizations from BlockQuicksort.
-// pdqsort paper: https://arxiv.org/pdf/2106.05123.pdf
-// C++ implementation: https://github.com/orlp/pdqsort
-// Rust implementation: https://docs.rs/pdqsort/latest/pdqsort/
-// limit is the number of allowed bad (very unbalanced) pivots before falling back to heapsort.
-func pdqsortOrdered[E constraints.Ordered](data []E, a, b, limit int) {
- const maxInsertion = 12
-
- var (
- wasBalanced = true // whether the last partitioning was reasonably balanced
- wasPartitioned = true // whether the slice was already partitioned
- )
-
- for {
- length := b - a
-
- if length <= maxInsertion {
- insertionSortOrdered(data, a, b)
- return
- }
-
- // Fall back to heapsort if too many bad choices were made.
- if limit == 0 {
- heapSortOrdered(data, a, b)
- return
- }
-
- // If the last partitioning was imbalanced, we need to breaking patterns.
- if !wasBalanced {
- breakPatternsOrdered(data, a, b)
- limit--
- }
-
- pivot, hint := choosePivotOrdered(data, a, b)
- if hint == decreasingHint {
- reverseRangeOrdered(data, a, b)
- // The chosen pivot was pivot-a elements after the start of the array.
- // After reversing it is pivot-a elements before the end of the array.
- // The idea came from Rust's implementation.
- pivot = (b - 1) - (pivot - a)
- hint = increasingHint
- }
-
- // The slice is likely already sorted.
- if wasBalanced && wasPartitioned && hint == increasingHint {
- if partialInsertionSortOrdered(data, a, b) {
- return
- }
- }
-
- // Probably the slice contains many duplicate elements, partition the slice into
- // elements equal to and elements greater than the pivot.
- if a > 0 && !cmpLess(data[a-1], data[pivot]) {
- mid := partitionEqualOrdered(data, a, b, pivot)
- a = mid
- continue
- }
-
- mid, alreadyPartitioned := partitionOrdered(data, a, b, pivot)
- wasPartitioned = alreadyPartitioned
-
- leftLen, rightLen := mid-a, b-mid
- balanceThreshold := length / 8
- if leftLen < rightLen {
- wasBalanced = leftLen >= balanceThreshold
- pdqsortOrdered(data, a, mid, limit)
- a = mid + 1
- } else {
- wasBalanced = rightLen >= balanceThreshold
- pdqsortOrdered(data, mid+1, b, limit)
- b = mid
- }
- }
-}
-
-// partitionOrdered does one quicksort partition.
-// Let p = data[pivot]
-// Moves elements in data[a:b] around, so that data[i]=p for inewpivot.
-// On return, data[newpivot] = p
-func partitionOrdered[E constraints.Ordered](data []E, a, b, pivot int) (newpivot int, alreadyPartitioned bool) {
- data[a], data[pivot] = data[pivot], data[a]
- i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned
-
- for i <= j && cmpLess(data[i], data[a]) {
- i++
- }
- for i <= j && !cmpLess(data[j], data[a]) {
- j--
- }
- if i > j {
- data[j], data[a] = data[a], data[j]
- return j, true
- }
- data[i], data[j] = data[j], data[i]
- i++
- j--
-
- for {
- for i <= j && cmpLess(data[i], data[a]) {
- i++
- }
- for i <= j && !cmpLess(data[j], data[a]) {
- j--
- }
- if i > j {
- break
- }
- data[i], data[j] = data[j], data[i]
- i++
- j--
- }
- data[j], data[a] = data[a], data[j]
- return j, false
-}
-
-// partitionEqualOrdered partitions data[a:b] into elements equal to data[pivot] followed by elements greater than data[pivot].
-// It assumed that data[a:b] does not contain elements smaller than the data[pivot].
-func partitionEqualOrdered[E constraints.Ordered](data []E, a, b, pivot int) (newpivot int) {
- data[a], data[pivot] = data[pivot], data[a]
- i, j := a+1, b-1 // i and j are inclusive of the elements remaining to be partitioned
-
- for {
- for i <= j && !cmpLess(data[a], data[i]) {
- i++
- }
- for i <= j && cmpLess(data[a], data[j]) {
- j--
- }
- if i > j {
- break
- }
- data[i], data[j] = data[j], data[i]
- i++
- j--
- }
- return i
-}
-
-// partialInsertionSortOrdered partially sorts a slice, returns true if the slice is sorted at the end.
-func partialInsertionSortOrdered[E constraints.Ordered](data []E, a, b int) bool {
- const (
- maxSteps = 5 // maximum number of adjacent out-of-order pairs that will get shifted
- shortestShifting = 50 // don't shift any elements on short arrays
- )
- i := a + 1
- for j := 0; j < maxSteps; j++ {
- for i < b && !cmpLess(data[i], data[i-1]) {
- i++
- }
-
- if i == b {
- return true
- }
-
- if b-a < shortestShifting {
- return false
- }
-
- data[i], data[i-1] = data[i-1], data[i]
-
- // Shift the smaller one to the left.
- if i-a >= 2 {
- for j := i - 1; j >= 1; j-- {
- if !cmpLess(data[j], data[j-1]) {
- break
- }
- data[j], data[j-1] = data[j-1], data[j]
- }
- }
- // Shift the greater one to the right.
- if b-i >= 2 {
- for j := i + 1; j < b; j++ {
- if !cmpLess(data[j], data[j-1]) {
- break
- }
- data[j], data[j-1] = data[j-1], data[j]
- }
- }
- }
- return false
-}
-
-// breakPatternsOrdered scatters some elements around in an attempt to break some patterns
-// that might cause imbalanced partitions in quicksort.
-func breakPatternsOrdered[E constraints.Ordered](data []E, a, b int) {
- length := b - a
- if length >= 8 {
- random := xorshift(length)
- modulus := nextPowerOfTwo(length)
-
- for idx := a + (length/4)*2 - 1; idx <= a+(length/4)*2+1; idx++ {
- other := int(uint(random.Next()) & (modulus - 1))
- if other >= length {
- other -= length
- }
- data[idx], data[a+other] = data[a+other], data[idx]
- }
- }
-}
-
-// choosePivotOrdered chooses a pivot in data[a:b].
-//
-// [0,8): chooses a static pivot.
-// [8,shortestNinther): uses the simple median-of-three method.
-// [shortestNinther,∞): uses the Tukey ninther method.
-func choosePivotOrdered[E constraints.Ordered](data []E, a, b int) (pivot int, hint sortedHint) {
- const (
- shortestNinther = 50
- maxSwaps = 4 * 3
- )
-
- l := b - a
-
- var (
- swaps int
- i = a + l/4*1
- j = a + l/4*2
- k = a + l/4*3
- )
-
- if l >= 8 {
- if l >= shortestNinther {
- // Tukey ninther method, the idea came from Rust's implementation.
- i = medianAdjacentOrdered(data, i, &swaps)
- j = medianAdjacentOrdered(data, j, &swaps)
- k = medianAdjacentOrdered(data, k, &swaps)
- }
- // Find the median among i, j, k and stores it into j.
- j = medianOrdered(data, i, j, k, &swaps)
- }
-
- switch swaps {
- case 0:
- return j, increasingHint
- case maxSwaps:
- return j, decreasingHint
- default:
- return j, unknownHint
- }
-}
-
-// order2Ordered returns x,y where data[x] <= data[y], where x,y=a,b or x,y=b,a.
-func order2Ordered[E constraints.Ordered](data []E, a, b int, swaps *int) (int, int) {
- if cmpLess(data[b], data[a]) {
- *swaps++
- return b, a
- }
- return a, b
-}
-
-// medianOrdered returns x where data[x] is the median of data[a],data[b],data[c], where x is a, b, or c.
-func medianOrdered[E constraints.Ordered](data []E, a, b, c int, swaps *int) int {
- a, b = order2Ordered(data, a, b, swaps)
- b, c = order2Ordered(data, b, c, swaps)
- a, b = order2Ordered(data, a, b, swaps)
- return b
-}
-
-// medianAdjacentOrdered finds the median of data[a - 1], data[a], data[a + 1] and stores the index into a.
-func medianAdjacentOrdered[E constraints.Ordered](data []E, a int, swaps *int) int {
- return medianOrdered(data, a-1, a, a+1, swaps)
-}
-
-func reverseRangeOrdered[E constraints.Ordered](data []E, a, b int) {
- i := a
- j := b - 1
- for i < j {
- data[i], data[j] = data[j], data[i]
- i++
- j--
- }
-}
-
-func swapRangeOrdered[E constraints.Ordered](data []E, a, b, n int) {
- for i := 0; i < n; i++ {
- data[a+i], data[b+i] = data[b+i], data[a+i]
- }
-}
-
-func stableOrdered[E constraints.Ordered](data []E, n int) {
- blockSize := 20 // must be > 0
- a, b := 0, blockSize
- for b <= n {
- insertionSortOrdered(data, a, b)
- a = b
- b += blockSize
- }
- insertionSortOrdered(data, a, n)
-
- for blockSize < n {
- a, b = 0, 2*blockSize
- for b <= n {
- symMergeOrdered(data, a, a+blockSize, b)
- a = b
- b += 2 * blockSize
- }
- if m := a + blockSize; m < n {
- symMergeOrdered(data, a, m, n)
- }
- blockSize *= 2
- }
-}
-
-// symMergeOrdered merges the two sorted subsequences data[a:m] and data[m:b] using
-// the SymMerge algorithm from Pok-Son Kim and Arne Kutzner, "Stable Minimum
-// Storage Merging by Symmetric Comparisons", in Susanne Albers and Tomasz
-// Radzik, editors, Algorithms - ESA 2004, volume 3221 of Lecture Notes in
-// Computer Science, pages 714-723. Springer, 2004.
-//
-// Let M = m-a and N = b-n. Wolog M < N.
-// The recursion depth is bound by ceil(log(N+M)).
-// The algorithm needs O(M*log(N/M + 1)) calls to data.Less.
-// The algorithm needs O((M+N)*log(M)) calls to data.Swap.
-//
-// The paper gives O((M+N)*log(M)) as the number of assignments assuming a
-// rotation algorithm which uses O(M+N+gcd(M+N)) assignments. The argumentation
-// in the paper carries through for Swap operations, especially as the block
-// swapping rotate uses only O(M+N) Swaps.
-//
-// symMerge assumes non-degenerate arguments: a < m && m < b.
-// Having the caller check this condition eliminates many leaf recursion calls,
-// which improves performance.
-func symMergeOrdered[E constraints.Ordered](data []E, a, m, b int) {
- // Avoid unnecessary recursions of symMerge
- // by direct insertion of data[a] into data[m:b]
- // if data[a:m] only contains one element.
- if m-a == 1 {
- // Use binary search to find the lowest index i
- // such that data[i] >= data[a] for m <= i < b.
- // Exit the search loop with i == b in case no such index exists.
- i := m
- j := b
- for i < j {
- h := int(uint(i+j) >> 1)
- if cmpLess(data[h], data[a]) {
- i = h + 1
- } else {
- j = h
- }
- }
- // Swap values until data[a] reaches the position before i.
- for k := a; k < i-1; k++ {
- data[k], data[k+1] = data[k+1], data[k]
- }
- return
- }
-
- // Avoid unnecessary recursions of symMerge
- // by direct insertion of data[m] into data[a:m]
- // if data[m:b] only contains one element.
- if b-m == 1 {
- // Use binary search to find the lowest index i
- // such that data[i] > data[m] for a <= i < m.
- // Exit the search loop with i == m in case no such index exists.
- i := a
- j := m
- for i < j {
- h := int(uint(i+j) >> 1)
- if !cmpLess(data[m], data[h]) {
- i = h + 1
- } else {
- j = h
- }
- }
- // Swap values until data[m] reaches the position i.
- for k := m; k > i; k-- {
- data[k], data[k-1] = data[k-1], data[k]
- }
- return
- }
-
- mid := int(uint(a+b) >> 1)
- n := mid + m
- var start, r int
- if m > mid {
- start = n - b
- r = mid
- } else {
- start = a
- r = m
- }
- p := n - 1
-
- for start < r {
- c := int(uint(start+r) >> 1)
- if !cmpLess(data[p-c], data[c]) {
- start = c + 1
- } else {
- r = c
- }
- }
-
- end := n - start
- if start < m && m < end {
- rotateOrdered(data, start, m, end)
- }
- if a < start && start < mid {
- symMergeOrdered(data, a, start, mid)
- }
- if mid < end && end < b {
- symMergeOrdered(data, mid, end, b)
- }
-}
-
-// rotateOrdered rotates two consecutive blocks u = data[a:m] and v = data[m:b] in data:
-// Data of the form 'x u v y' is changed to 'x v u y'.
-// rotate performs at most b-a many calls to data.Swap,
-// and it assumes non-degenerate arguments: a < m && m < b.
-func rotateOrdered[E constraints.Ordered](data []E, a, m, b int) {
- i := m - a
- j := b - m
-
- for i != j {
- if i > j {
- swapRangeOrdered(data, m-i, m, j)
- i -= j
- } else {
- swapRangeOrdered(data, m-i, m+j-i, i)
- j -= i
- }
- }
- // i == j
- swapRangeOrdered(data, m-i, m, i)
-}
diff --git a/vendor/golang.org/x/net/html/atom/table.go b/vendor/golang.org/x/net/html/atom/table.go
index 2a938864..b460e6f7 100644
--- a/vendor/golang.org/x/net/html/atom/table.go
+++ b/vendor/golang.org/x/net/html/atom/table.go
@@ -11,23 +11,23 @@ const (
AcceptCharset Atom = 0x1a0e
Accesskey Atom = 0x2c09
Acronym Atom = 0xaa07
- Action Atom = 0x27206
- Address Atom = 0x6f307
+ Action Atom = 0x26506
+ Address Atom = 0x6f107
Align Atom = 0xb105
- Allowfullscreen Atom = 0x2080f
+ Allowfullscreen Atom = 0x3280f
Allowpaymentrequest Atom = 0xc113
Allowusermedia Atom = 0xdd0e
Alt Atom = 0xf303
Annotation Atom = 0x1c90a
AnnotationXml Atom = 0x1c90e
- Applet Atom = 0x31906
- Area Atom = 0x35604
- Article Atom = 0x3fc07
+ Applet Atom = 0x30806
+ Area Atom = 0x35004
+ Article Atom = 0x3f607
As Atom = 0x3c02
Aside Atom = 0x10705
Async Atom = 0xff05
Audio Atom = 0x11505
- Autocomplete Atom = 0x2780c
+ Autocomplete Atom = 0x26b0c
Autofocus Atom = 0x12109
Autoplay Atom = 0x13c08
B Atom = 0x101
@@ -43,34 +43,34 @@ const (
Br Atom = 0x202
Button Atom = 0x19106
Canvas Atom = 0x10306
- Caption Atom = 0x23107
- Center Atom = 0x22006
- Challenge Atom = 0x29b09
+ Caption Atom = 0x22407
+ Center Atom = 0x21306
+ Challenge Atom = 0x28e09
Charset Atom = 0x2107
- Checked Atom = 0x47907
+ Checked Atom = 0x5b507
Cite Atom = 0x19c04
- Class Atom = 0x56405
- Code Atom = 0x5c504
+ Class Atom = 0x55805
+ Code Atom = 0x5ee04
Col Atom = 0x1ab03
Colgroup Atom = 0x1ab08
Color Atom = 0x1bf05
Cols Atom = 0x1c404
Colspan Atom = 0x1c407
Command Atom = 0x1d707
- Content Atom = 0x58b07
- Contenteditable Atom = 0x58b0f
- Contextmenu Atom = 0x3800b
+ Content Atom = 0x57b07
+ Contenteditable Atom = 0x57b0f
+ Contextmenu Atom = 0x37a0b
Controls Atom = 0x1de08
- Coords Atom = 0x1ea06
- Crossorigin Atom = 0x1fb0b
- Data Atom = 0x4a504
- Datalist Atom = 0x4a508
- Datetime Atom = 0x2b808
- Dd Atom = 0x2d702
+ Coords Atom = 0x1f006
+ Crossorigin Atom = 0x1fa0b
+ Data Atom = 0x49904
+ Datalist Atom = 0x49908
+ Datetime Atom = 0x2ab08
+ Dd Atom = 0x2bf02
Default Atom = 0x10a07
- Defer Atom = 0x5c705
- Del Atom = 0x45203
- Desc Atom = 0x56104
+ Defer Atom = 0x5f005
+ Del Atom = 0x44c03
+ Desc Atom = 0x55504
Details Atom = 0x7207
Dfn Atom = 0x8703
Dialog Atom = 0xbb06
@@ -78,106 +78,106 @@ const (
Dirname Atom = 0x9307
Disabled Atom = 0x16408
Div Atom = 0x16b03
- Dl Atom = 0x5e602
- Download Atom = 0x46308
+ Dl Atom = 0x5d602
+ Download Atom = 0x45d08
Draggable Atom = 0x17a09
- Dropzone Atom = 0x40508
- Dt Atom = 0x64b02
+ Dropzone Atom = 0x3ff08
+ Dt Atom = 0x64002
Em Atom = 0x6e02
Embed Atom = 0x6e05
- Enctype Atom = 0x28d07
- Face Atom = 0x21e04
- Fieldset Atom = 0x22608
- Figcaption Atom = 0x22e0a
- Figure Atom = 0x24806
+ Enctype Atom = 0x28007
+ Face Atom = 0x21104
+ Fieldset Atom = 0x21908
+ Figcaption Atom = 0x2210a
+ Figure Atom = 0x23b06
Font Atom = 0x3f04
Footer Atom = 0xf606
- For Atom = 0x25403
- ForeignObject Atom = 0x2540d
- Foreignobject Atom = 0x2610d
- Form Atom = 0x26e04
- Formaction Atom = 0x26e0a
- Formenctype Atom = 0x2890b
- Formmethod Atom = 0x2a40a
- Formnovalidate Atom = 0x2ae0e
- Formtarget Atom = 0x2c00a
+ For Atom = 0x24703
+ ForeignObject Atom = 0x2470d
+ Foreignobject Atom = 0x2540d
+ Form Atom = 0x26104
+ Formaction Atom = 0x2610a
+ Formenctype Atom = 0x27c0b
+ Formmethod Atom = 0x2970a
+ Formnovalidate Atom = 0x2a10e
+ Formtarget Atom = 0x2b30a
Frame Atom = 0x8b05
Frameset Atom = 0x8b08
H1 Atom = 0x15c02
- H2 Atom = 0x2de02
- H3 Atom = 0x30d02
- H4 Atom = 0x34502
- H5 Atom = 0x34f02
- H6 Atom = 0x64d02
- Head Atom = 0x33104
- Header Atom = 0x33106
- Headers Atom = 0x33107
+ H2 Atom = 0x56102
+ H3 Atom = 0x2cd02
+ H4 Atom = 0x2fc02
+ H5 Atom = 0x33f02
+ H6 Atom = 0x34902
+ Head Atom = 0x32004
+ Header Atom = 0x32006
+ Headers Atom = 0x32007
Height Atom = 0x5206
- Hgroup Atom = 0x2ca06
- Hidden Atom = 0x2d506
- High Atom = 0x2db04
+ Hgroup Atom = 0x64206
+ Hidden Atom = 0x2bd06
+ High Atom = 0x2ca04
Hr Atom = 0x15702
- Href Atom = 0x2e004
- Hreflang Atom = 0x2e008
+ Href Atom = 0x2cf04
+ Hreflang Atom = 0x2cf08
Html Atom = 0x5604
- HttpEquiv Atom = 0x2e80a
+ HttpEquiv Atom = 0x2d70a
I Atom = 0x601
- Icon Atom = 0x58a04
+ Icon Atom = 0x57a04
Id Atom = 0x10902
- Iframe Atom = 0x2fc06
- Image Atom = 0x30205
- Img Atom = 0x30703
- Input Atom = 0x44b05
- Inputmode Atom = 0x44b09
- Ins Atom = 0x20403
- Integrity Atom = 0x23f09
+ Iframe Atom = 0x2eb06
+ Image Atom = 0x2f105
+ Img Atom = 0x2f603
+ Input Atom = 0x44505
+ Inputmode Atom = 0x44509
+ Ins Atom = 0x20303
+ Integrity Atom = 0x23209
Is Atom = 0x16502
- Isindex Atom = 0x30f07
- Ismap Atom = 0x31605
- Itemid Atom = 0x38b06
+ Isindex Atom = 0x2fe07
+ Ismap Atom = 0x30505
+ Itemid Atom = 0x38506
Itemprop Atom = 0x19d08
- Itemref Atom = 0x3cd07
- Itemscope Atom = 0x67109
- Itemtype Atom = 0x31f08
+ Itemref Atom = 0x3c707
+ Itemscope Atom = 0x66f09
+ Itemtype Atom = 0x30e08
Kbd Atom = 0xb903
Keygen Atom = 0x3206
Keytype Atom = 0xd607
Kind Atom = 0x17704
Label Atom = 0x5905
- Lang Atom = 0x2e404
+ Lang Atom = 0x2d304
Legend Atom = 0x18106
Li Atom = 0xb202
Link Atom = 0x17404
- List Atom = 0x4a904
- Listing Atom = 0x4a907
+ List Atom = 0x49d04
+ Listing Atom = 0x49d07
Loop Atom = 0x5d04
Low Atom = 0xc303
Main Atom = 0x1004
Malignmark Atom = 0xb00a
- Manifest Atom = 0x6d708
- Map Atom = 0x31803
+ Manifest Atom = 0x6d508
+ Map Atom = 0x30703
Mark Atom = 0xb604
- Marquee Atom = 0x32707
- Math Atom = 0x32e04
- Max Atom = 0x33d03
- Maxlength Atom = 0x33d09
+ Marquee Atom = 0x31607
+ Math Atom = 0x31d04
+ Max Atom = 0x33703
+ Maxlength Atom = 0x33709
Media Atom = 0xe605
Mediagroup Atom = 0xe60a
- Menu Atom = 0x38704
- Menuitem Atom = 0x38708
- Meta Atom = 0x4b804
+ Menu Atom = 0x38104
+ Menuitem Atom = 0x38108
+ Meta Atom = 0x4ac04
Meter Atom = 0x9805
- Method Atom = 0x2a806
- Mglyph Atom = 0x30806
- Mi Atom = 0x34702
- Min Atom = 0x34703
- Minlength Atom = 0x34709
- Mn Atom = 0x2b102
+ Method Atom = 0x29b06
+ Mglyph Atom = 0x2f706
+ Mi Atom = 0x34102
+ Min Atom = 0x34103
+ Minlength Atom = 0x34109
+ Mn Atom = 0x2a402
Mo Atom = 0xa402
- Ms Atom = 0x67402
- Mtext Atom = 0x35105
- Multiple Atom = 0x35f08
- Muted Atom = 0x36705
+ Ms Atom = 0x67202
+ Mtext Atom = 0x34b05
+ Multiple Atom = 0x35908
+ Muted Atom = 0x36105
Name Atom = 0x9604
Nav Atom = 0x1303
Nobr Atom = 0x3704
@@ -185,101 +185,101 @@ const (
Noframes Atom = 0x8908
Nomodule Atom = 0xa208
Nonce Atom = 0x1a605
- Noscript Atom = 0x21608
- Novalidate Atom = 0x2b20a
- Object Atom = 0x26806
+ Noscript Atom = 0x2c208
+ Novalidate Atom = 0x2a50a
+ Object Atom = 0x25b06
Ol Atom = 0x13702
Onabort Atom = 0x19507
- Onafterprint Atom = 0x2360c
- Onautocomplete Atom = 0x2760e
- Onautocompleteerror Atom = 0x27613
- Onauxclick Atom = 0x61f0a
- Onbeforeprint Atom = 0x69e0d
- Onbeforeunload Atom = 0x6e70e
- Onblur Atom = 0x56d06
+ Onafterprint Atom = 0x2290c
+ Onautocomplete Atom = 0x2690e
+ Onautocompleteerror Atom = 0x26913
+ Onauxclick Atom = 0x6140a
+ Onbeforeprint Atom = 0x69c0d
+ Onbeforeunload Atom = 0x6e50e
+ Onblur Atom = 0x1ea06
Oncancel Atom = 0x11908
Oncanplay Atom = 0x14d09
Oncanplaythrough Atom = 0x14d10
- Onchange Atom = 0x41b08
- Onclick Atom = 0x2f507
- Onclose Atom = 0x36c07
- Oncontextmenu Atom = 0x37e0d
- Oncopy Atom = 0x39106
- Oncuechange Atom = 0x3970b
- Oncut Atom = 0x3a205
- Ondblclick Atom = 0x3a70a
- Ondrag Atom = 0x3b106
- Ondragend Atom = 0x3b109
- Ondragenter Atom = 0x3ba0b
- Ondragexit Atom = 0x3c50a
- Ondragleave Atom = 0x3df0b
- Ondragover Atom = 0x3ea0a
- Ondragstart Atom = 0x3f40b
- Ondrop Atom = 0x40306
- Ondurationchange Atom = 0x41310
- Onemptied Atom = 0x40a09
- Onended Atom = 0x42307
- Onerror Atom = 0x42a07
- Onfocus Atom = 0x43107
- Onhashchange Atom = 0x43d0c
- Oninput Atom = 0x44907
- Oninvalid Atom = 0x45509
- Onkeydown Atom = 0x45e09
- Onkeypress Atom = 0x46b0a
- Onkeyup Atom = 0x48007
- Onlanguagechange Atom = 0x48d10
- Onload Atom = 0x49d06
- Onloadeddata Atom = 0x49d0c
- Onloadedmetadata Atom = 0x4b010
- Onloadend Atom = 0x4c609
- Onloadstart Atom = 0x4cf0b
- Onmessage Atom = 0x4da09
- Onmessageerror Atom = 0x4da0e
- Onmousedown Atom = 0x4e80b
- Onmouseenter Atom = 0x4f30c
- Onmouseleave Atom = 0x4ff0c
- Onmousemove Atom = 0x50b0b
- Onmouseout Atom = 0x5160a
- Onmouseover Atom = 0x5230b
- Onmouseup Atom = 0x52e09
- Onmousewheel Atom = 0x53c0c
- Onoffline Atom = 0x54809
- Ononline Atom = 0x55108
- Onpagehide Atom = 0x5590a
- Onpageshow Atom = 0x5730a
- Onpaste Atom = 0x57f07
- Onpause Atom = 0x59a07
- Onplay Atom = 0x5a406
- Onplaying Atom = 0x5a409
- Onpopstate Atom = 0x5ad0a
- Onprogress Atom = 0x5b70a
- Onratechange Atom = 0x5cc0c
- Onrejectionhandled Atom = 0x5d812
- Onreset Atom = 0x5ea07
- Onresize Atom = 0x5f108
- Onscroll Atom = 0x60008
- Onsecuritypolicyviolation Atom = 0x60819
- Onseeked Atom = 0x62908
- Onseeking Atom = 0x63109
- Onselect Atom = 0x63a08
- Onshow Atom = 0x64406
- Onsort Atom = 0x64f06
- Onstalled Atom = 0x65909
- Onstorage Atom = 0x66209
- Onsubmit Atom = 0x66b08
- Onsuspend Atom = 0x67b09
+ Onchange Atom = 0x41508
+ Onclick Atom = 0x2e407
+ Onclose Atom = 0x36607
+ Oncontextmenu Atom = 0x3780d
+ Oncopy Atom = 0x38b06
+ Oncuechange Atom = 0x3910b
+ Oncut Atom = 0x39c05
+ Ondblclick Atom = 0x3a10a
+ Ondrag Atom = 0x3ab06
+ Ondragend Atom = 0x3ab09
+ Ondragenter Atom = 0x3b40b
+ Ondragexit Atom = 0x3bf0a
+ Ondragleave Atom = 0x3d90b
+ Ondragover Atom = 0x3e40a
+ Ondragstart Atom = 0x3ee0b
+ Ondrop Atom = 0x3fd06
+ Ondurationchange Atom = 0x40d10
+ Onemptied Atom = 0x40409
+ Onended Atom = 0x41d07
+ Onerror Atom = 0x42407
+ Onfocus Atom = 0x42b07
+ Onhashchange Atom = 0x4370c
+ Oninput Atom = 0x44307
+ Oninvalid Atom = 0x44f09
+ Onkeydown Atom = 0x45809
+ Onkeypress Atom = 0x4650a
+ Onkeyup Atom = 0x47407
+ Onlanguagechange Atom = 0x48110
+ Onload Atom = 0x49106
+ Onloadeddata Atom = 0x4910c
+ Onloadedmetadata Atom = 0x4a410
+ Onloadend Atom = 0x4ba09
+ Onloadstart Atom = 0x4c30b
+ Onmessage Atom = 0x4ce09
+ Onmessageerror Atom = 0x4ce0e
+ Onmousedown Atom = 0x4dc0b
+ Onmouseenter Atom = 0x4e70c
+ Onmouseleave Atom = 0x4f30c
+ Onmousemove Atom = 0x4ff0b
+ Onmouseout Atom = 0x50a0a
+ Onmouseover Atom = 0x5170b
+ Onmouseup Atom = 0x52209
+ Onmousewheel Atom = 0x5300c
+ Onoffline Atom = 0x53c09
+ Ononline Atom = 0x54508
+ Onpagehide Atom = 0x54d0a
+ Onpageshow Atom = 0x5630a
+ Onpaste Atom = 0x56f07
+ Onpause Atom = 0x58a07
+ Onplay Atom = 0x59406
+ Onplaying Atom = 0x59409
+ Onpopstate Atom = 0x59d0a
+ Onprogress Atom = 0x5a70a
+ Onratechange Atom = 0x5bc0c
+ Onrejectionhandled Atom = 0x5c812
+ Onreset Atom = 0x5da07
+ Onresize Atom = 0x5e108
+ Onscroll Atom = 0x5f508
+ Onsecuritypolicyviolation Atom = 0x5fd19
+ Onseeked Atom = 0x61e08
+ Onseeking Atom = 0x62609
+ Onselect Atom = 0x62f08
+ Onshow Atom = 0x63906
+ Onsort Atom = 0x64d06
+ Onstalled Atom = 0x65709
+ Onstorage Atom = 0x66009
+ Onsubmit Atom = 0x66908
+ Onsuspend Atom = 0x67909
Ontimeupdate Atom = 0x400c
- Ontoggle Atom = 0x68408
- Onunhandledrejection Atom = 0x68c14
- Onunload Atom = 0x6ab08
- Onvolumechange Atom = 0x6b30e
- Onwaiting Atom = 0x6c109
- Onwheel Atom = 0x6ca07
+ Ontoggle Atom = 0x68208
+ Onunhandledrejection Atom = 0x68a14
+ Onunload Atom = 0x6a908
+ Onvolumechange Atom = 0x6b10e
+ Onwaiting Atom = 0x6bf09
+ Onwheel Atom = 0x6c807
Open Atom = 0x1a304
Optgroup Atom = 0x5f08
- Optimum Atom = 0x6d107
- Option Atom = 0x6e306
- Output Atom = 0x51d06
+ Optimum Atom = 0x6cf07
+ Option Atom = 0x6e106
+ Output Atom = 0x51106
P Atom = 0xc01
Param Atom = 0xc05
Pattern Atom = 0x6607
@@ -288,466 +288,468 @@ const (
Placeholder Atom = 0x1310b
Plaintext Atom = 0x1b209
Playsinline Atom = 0x1400b
- Poster Atom = 0x2cf06
- Pre Atom = 0x47003
- Preload Atom = 0x48607
- Progress Atom = 0x5b908
- Prompt Atom = 0x53606
- Public Atom = 0x58606
+ Poster Atom = 0x64706
+ Pre Atom = 0x46a03
+ Preload Atom = 0x47a07
+ Progress Atom = 0x5a908
+ Prompt Atom = 0x52a06
+ Public Atom = 0x57606
Q Atom = 0xcf01
Radiogroup Atom = 0x30a
Rb Atom = 0x3a02
- Readonly Atom = 0x35708
- Referrerpolicy Atom = 0x3d10e
- Rel Atom = 0x48703
- Required Atom = 0x24c08
+ Readonly Atom = 0x35108
+ Referrerpolicy Atom = 0x3cb0e
+ Rel Atom = 0x47b03
+ Required Atom = 0x23f08
Reversed Atom = 0x8008
Rows Atom = 0x9c04
Rowspan Atom = 0x9c07
- Rp Atom = 0x23c02
+ Rp Atom = 0x22f02
Rt Atom = 0x19a02
Rtc Atom = 0x19a03
Ruby Atom = 0xfb04
S Atom = 0x2501
Samp Atom = 0x7804
Sandbox Atom = 0x12907
- Scope Atom = 0x67505
- Scoped Atom = 0x67506
- Script Atom = 0x21806
- Seamless Atom = 0x37108
- Section Atom = 0x56807
- Select Atom = 0x63c06
- Selected Atom = 0x63c08
- Shape Atom = 0x1e505
- Size Atom = 0x5f504
- Sizes Atom = 0x5f505
- Slot Atom = 0x1ef04
- Small Atom = 0x20605
- Sortable Atom = 0x65108
- Sorted Atom = 0x33706
- Source Atom = 0x37806
- Spacer Atom = 0x43706
+ Scope Atom = 0x67305
+ Scoped Atom = 0x67306
+ Script Atom = 0x2c406
+ Seamless Atom = 0x36b08
+ Search Atom = 0x55c06
+ Section Atom = 0x1e507
+ Select Atom = 0x63106
+ Selected Atom = 0x63108
+ Shape Atom = 0x1f505
+ Size Atom = 0x5e504
+ Sizes Atom = 0x5e505
+ Slot Atom = 0x20504
+ Small Atom = 0x32605
+ Sortable Atom = 0x64f08
+ Sorted Atom = 0x37206
+ Source Atom = 0x43106
+ Spacer Atom = 0x46e06
Span Atom = 0x9f04
- Spellcheck Atom = 0x4740a
- Src Atom = 0x5c003
- Srcdoc Atom = 0x5c006
- Srclang Atom = 0x5f907
- Srcset Atom = 0x6f906
- Start Atom = 0x3fa05
- Step Atom = 0x58304
+ Spellcheck Atom = 0x5b00a
+ Src Atom = 0x5e903
+ Srcdoc Atom = 0x5e906
+ Srclang Atom = 0x6f707
+ Srcset Atom = 0x6fe06
+ Start Atom = 0x3f405
+ Step Atom = 0x57304
Strike Atom = 0xd206
- Strong Atom = 0x6dd06
- Style Atom = 0x6ff05
- Sub Atom = 0x66d03
- Summary Atom = 0x70407
- Sup Atom = 0x70b03
- Svg Atom = 0x70e03
- System Atom = 0x71106
- Tabindex Atom = 0x4be08
- Table Atom = 0x59505
- Target Atom = 0x2c406
+ Strong Atom = 0x6db06
+ Style Atom = 0x70405
+ Sub Atom = 0x66b03
+ Summary Atom = 0x70907
+ Sup Atom = 0x71003
+ Svg Atom = 0x71303
+ System Atom = 0x71606
+ Tabindex Atom = 0x4b208
+ Table Atom = 0x58505
+ Target Atom = 0x2b706
Tbody Atom = 0x2705
Td Atom = 0x9202
- Template Atom = 0x71408
- Textarea Atom = 0x35208
+ Template Atom = 0x71908
+ Textarea Atom = 0x34c08
Tfoot Atom = 0xf505
Th Atom = 0x15602
- Thead Atom = 0x33005
+ Thead Atom = 0x31f05
Time Atom = 0x4204
Title Atom = 0x11005
Tr Atom = 0xcc02
Track Atom = 0x1ba05
- Translate Atom = 0x1f209
+ Translate Atom = 0x20809
Tt Atom = 0x6802
Type Atom = 0xd904
- Typemustmatch Atom = 0x2900d
+ Typemustmatch Atom = 0x2830d
U Atom = 0xb01
Ul Atom = 0xa702
Updateviacache Atom = 0x460e
- Usemap Atom = 0x59e06
+ Usemap Atom = 0x58e06
Value Atom = 0x1505
Var Atom = 0x16d03
- Video Atom = 0x2f105
- Wbr Atom = 0x57c03
- Width Atom = 0x64905
- Workertype Atom = 0x71c0a
- Wrap Atom = 0x72604
+ Video Atom = 0x2e005
+ Wbr Atom = 0x56c03
+ Width Atom = 0x63e05
+ Workertype Atom = 0x7210a
+ Wrap Atom = 0x72b04
Xmp Atom = 0x12f03
)
-const hash0 = 0x81cdf10e
+const hash0 = 0x84f70e16
const maxAtomLen = 25
var table = [1 << 9]Atom{
- 0x1: 0xe60a, // mediagroup
- 0x2: 0x2e404, // lang
- 0x4: 0x2c09, // accesskey
- 0x5: 0x8b08, // frameset
- 0x7: 0x63a08, // onselect
- 0x8: 0x71106, // system
- 0xa: 0x64905, // width
- 0xc: 0x2890b, // formenctype
- 0xd: 0x13702, // ol
- 0xe: 0x3970b, // oncuechange
- 0x10: 0x14b03, // bdo
- 0x11: 0x11505, // audio
- 0x12: 0x17a09, // draggable
- 0x14: 0x2f105, // video
- 0x15: 0x2b102, // mn
- 0x16: 0x38704, // menu
- 0x17: 0x2cf06, // poster
- 0x19: 0xf606, // footer
- 0x1a: 0x2a806, // method
- 0x1b: 0x2b808, // datetime
- 0x1c: 0x19507, // onabort
- 0x1d: 0x460e, // updateviacache
- 0x1e: 0xff05, // async
- 0x1f: 0x49d06, // onload
- 0x21: 0x11908, // oncancel
- 0x22: 0x62908, // onseeked
- 0x23: 0x30205, // image
- 0x24: 0x5d812, // onrejectionhandled
- 0x26: 0x17404, // link
- 0x27: 0x51d06, // output
- 0x28: 0x33104, // head
- 0x29: 0x4ff0c, // onmouseleave
- 0x2a: 0x57f07, // onpaste
- 0x2b: 0x5a409, // onplaying
- 0x2c: 0x1c407, // colspan
- 0x2f: 0x1bf05, // color
- 0x30: 0x5f504, // size
- 0x31: 0x2e80a, // http-equiv
- 0x33: 0x601, // i
- 0x34: 0x5590a, // onpagehide
- 0x35: 0x68c14, // onunhandledrejection
- 0x37: 0x42a07, // onerror
- 0x3a: 0x3b08, // basefont
- 0x3f: 0x1303, // nav
- 0x40: 0x17704, // kind
- 0x41: 0x35708, // readonly
- 0x42: 0x30806, // mglyph
- 0x44: 0xb202, // li
- 0x46: 0x2d506, // hidden
- 0x47: 0x70e03, // svg
- 0x48: 0x58304, // step
- 0x49: 0x23f09, // integrity
- 0x4a: 0x58606, // public
- 0x4c: 0x1ab03, // col
- 0x4d: 0x1870a, // blockquote
- 0x4e: 0x34f02, // h5
- 0x50: 0x5b908, // progress
- 0x51: 0x5f505, // sizes
- 0x52: 0x34502, // h4
- 0x56: 0x33005, // thead
- 0x57: 0xd607, // keytype
- 0x58: 0x5b70a, // onprogress
- 0x59: 0x44b09, // inputmode
- 0x5a: 0x3b109, // ondragend
- 0x5d: 0x3a205, // oncut
- 0x5e: 0x43706, // spacer
- 0x5f: 0x1ab08, // colgroup
- 0x62: 0x16502, // is
- 0x65: 0x3c02, // as
- 0x66: 0x54809, // onoffline
- 0x67: 0x33706, // sorted
- 0x69: 0x48d10, // onlanguagechange
- 0x6c: 0x43d0c, // onhashchange
- 0x6d: 0x9604, // name
- 0x6e: 0xf505, // tfoot
- 0x6f: 0x56104, // desc
- 0x70: 0x33d03, // max
- 0x72: 0x1ea06, // coords
- 0x73: 0x30d02, // h3
- 0x74: 0x6e70e, // onbeforeunload
- 0x75: 0x9c04, // rows
- 0x76: 0x63c06, // select
- 0x77: 0x9805, // meter
- 0x78: 0x38b06, // itemid
- 0x79: 0x53c0c, // onmousewheel
- 0x7a: 0x5c006, // srcdoc
- 0x7d: 0x1ba05, // track
- 0x7f: 0x31f08, // itemtype
- 0x82: 0xa402, // mo
- 0x83: 0x41b08, // onchange
- 0x84: 0x33107, // headers
- 0x85: 0x5cc0c, // onratechange
- 0x86: 0x60819, // onsecuritypolicyviolation
- 0x88: 0x4a508, // datalist
- 0x89: 0x4e80b, // onmousedown
- 0x8a: 0x1ef04, // slot
- 0x8b: 0x4b010, // onloadedmetadata
- 0x8c: 0x1a06, // accept
- 0x8d: 0x26806, // object
- 0x91: 0x6b30e, // onvolumechange
- 0x92: 0x2107, // charset
- 0x93: 0x27613, // onautocompleteerror
- 0x94: 0xc113, // allowpaymentrequest
- 0x95: 0x2804, // body
- 0x96: 0x10a07, // default
- 0x97: 0x63c08, // selected
- 0x98: 0x21e04, // face
- 0x99: 0x1e505, // shape
- 0x9b: 0x68408, // ontoggle
- 0x9e: 0x64b02, // dt
- 0x9f: 0xb604, // mark
- 0xa1: 0xb01, // u
- 0xa4: 0x6ab08, // onunload
- 0xa5: 0x5d04, // loop
- 0xa6: 0x16408, // disabled
- 0xaa: 0x42307, // onended
- 0xab: 0xb00a, // malignmark
- 0xad: 0x67b09, // onsuspend
- 0xae: 0x35105, // mtext
- 0xaf: 0x64f06, // onsort
- 0xb0: 0x19d08, // itemprop
- 0xb3: 0x67109, // itemscope
- 0xb4: 0x17305, // blink
- 0xb6: 0x3b106, // ondrag
- 0xb7: 0xa702, // ul
- 0xb8: 0x26e04, // form
- 0xb9: 0x12907, // sandbox
- 0xba: 0x8b05, // frame
- 0xbb: 0x1505, // value
- 0xbc: 0x66209, // onstorage
- 0xbf: 0xaa07, // acronym
- 0xc0: 0x19a02, // rt
- 0xc2: 0x202, // br
- 0xc3: 0x22608, // fieldset
- 0xc4: 0x2900d, // typemustmatch
- 0xc5: 0xa208, // nomodule
- 0xc6: 0x6c07, // noembed
- 0xc7: 0x69e0d, // onbeforeprint
- 0xc8: 0x19106, // button
- 0xc9: 0x2f507, // onclick
- 0xca: 0x70407, // summary
- 0xcd: 0xfb04, // ruby
- 0xce: 0x56405, // class
- 0xcf: 0x3f40b, // ondragstart
- 0xd0: 0x23107, // caption
- 0xd4: 0xdd0e, // allowusermedia
- 0xd5: 0x4cf0b, // onloadstart
- 0xd9: 0x16b03, // div
- 0xda: 0x4a904, // list
- 0xdb: 0x32e04, // math
- 0xdc: 0x44b05, // input
- 0xdf: 0x3ea0a, // ondragover
- 0xe0: 0x2de02, // h2
- 0xe2: 0x1b209, // plaintext
- 0xe4: 0x4f30c, // onmouseenter
- 0xe7: 0x47907, // checked
- 0xe8: 0x47003, // pre
- 0xea: 0x35f08, // multiple
- 0xeb: 0xba03, // bdi
- 0xec: 0x33d09, // maxlength
- 0xed: 0xcf01, // q
- 0xee: 0x61f0a, // onauxclick
- 0xf0: 0x57c03, // wbr
- 0xf2: 0x3b04, // base
- 0xf3: 0x6e306, // option
- 0xf5: 0x41310, // ondurationchange
- 0xf7: 0x8908, // noframes
- 0xf9: 0x40508, // dropzone
- 0xfb: 0x67505, // scope
- 0xfc: 0x8008, // reversed
- 0xfd: 0x3ba0b, // ondragenter
- 0xfe: 0x3fa05, // start
- 0xff: 0x12f03, // xmp
- 0x100: 0x5f907, // srclang
- 0x101: 0x30703, // img
- 0x104: 0x101, // b
- 0x105: 0x25403, // for
- 0x106: 0x10705, // aside
- 0x107: 0x44907, // oninput
- 0x108: 0x35604, // area
- 0x109: 0x2a40a, // formmethod
- 0x10a: 0x72604, // wrap
- 0x10c: 0x23c02, // rp
- 0x10d: 0x46b0a, // onkeypress
- 0x10e: 0x6802, // tt
- 0x110: 0x34702, // mi
- 0x111: 0x36705, // muted
- 0x112: 0xf303, // alt
- 0x113: 0x5c504, // code
- 0x114: 0x6e02, // em
- 0x115: 0x3c50a, // ondragexit
- 0x117: 0x9f04, // span
- 0x119: 0x6d708, // manifest
- 0x11a: 0x38708, // menuitem
- 0x11b: 0x58b07, // content
- 0x11d: 0x6c109, // onwaiting
- 0x11f: 0x4c609, // onloadend
- 0x121: 0x37e0d, // oncontextmenu
- 0x123: 0x56d06, // onblur
- 0x124: 0x3fc07, // article
- 0x125: 0x9303, // dir
- 0x126: 0xef04, // ping
- 0x127: 0x24c08, // required
- 0x128: 0x45509, // oninvalid
- 0x129: 0xb105, // align
- 0x12b: 0x58a04, // icon
- 0x12c: 0x64d02, // h6
- 0x12d: 0x1c404, // cols
- 0x12e: 0x22e0a, // figcaption
- 0x12f: 0x45e09, // onkeydown
- 0x130: 0x66b08, // onsubmit
- 0x131: 0x14d09, // oncanplay
- 0x132: 0x70b03, // sup
- 0x133: 0xc01, // p
- 0x135: 0x40a09, // onemptied
- 0x136: 0x39106, // oncopy
- 0x137: 0x19c04, // cite
- 0x138: 0x3a70a, // ondblclick
- 0x13a: 0x50b0b, // onmousemove
- 0x13c: 0x66d03, // sub
- 0x13d: 0x48703, // rel
- 0x13e: 0x5f08, // optgroup
- 0x142: 0x9c07, // rowspan
- 0x143: 0x37806, // source
- 0x144: 0x21608, // noscript
- 0x145: 0x1a304, // open
- 0x146: 0x20403, // ins
- 0x147: 0x2540d, // foreignObject
- 0x148: 0x5ad0a, // onpopstate
- 0x14a: 0x28d07, // enctype
- 0x14b: 0x2760e, // onautocomplete
- 0x14c: 0x35208, // textarea
- 0x14e: 0x2780c, // autocomplete
- 0x14f: 0x15702, // hr
- 0x150: 0x1de08, // controls
- 0x151: 0x10902, // id
- 0x153: 0x2360c, // onafterprint
- 0x155: 0x2610d, // foreignobject
- 0x156: 0x32707, // marquee
- 0x157: 0x59a07, // onpause
- 0x158: 0x5e602, // dl
- 0x159: 0x5206, // height
- 0x15a: 0x34703, // min
- 0x15b: 0x9307, // dirname
- 0x15c: 0x1f209, // translate
- 0x15d: 0x5604, // html
- 0x15e: 0x34709, // minlength
- 0x15f: 0x48607, // preload
- 0x160: 0x71408, // template
- 0x161: 0x3df0b, // ondragleave
- 0x162: 0x3a02, // rb
- 0x164: 0x5c003, // src
- 0x165: 0x6dd06, // strong
- 0x167: 0x7804, // samp
- 0x168: 0x6f307, // address
- 0x169: 0x55108, // ononline
- 0x16b: 0x1310b, // placeholder
- 0x16c: 0x2c406, // target
- 0x16d: 0x20605, // small
- 0x16e: 0x6ca07, // onwheel
- 0x16f: 0x1c90a, // annotation
- 0x170: 0x4740a, // spellcheck
- 0x171: 0x7207, // details
- 0x172: 0x10306, // canvas
- 0x173: 0x12109, // autofocus
- 0x174: 0xc05, // param
- 0x176: 0x46308, // download
- 0x177: 0x45203, // del
- 0x178: 0x36c07, // onclose
- 0x179: 0xb903, // kbd
- 0x17a: 0x31906, // applet
- 0x17b: 0x2e004, // href
- 0x17c: 0x5f108, // onresize
- 0x17e: 0x49d0c, // onloadeddata
- 0x180: 0xcc02, // tr
- 0x181: 0x2c00a, // formtarget
- 0x182: 0x11005, // title
- 0x183: 0x6ff05, // style
- 0x184: 0xd206, // strike
- 0x185: 0x59e06, // usemap
- 0x186: 0x2fc06, // iframe
- 0x187: 0x1004, // main
- 0x189: 0x7b07, // picture
- 0x18c: 0x31605, // ismap
- 0x18e: 0x4a504, // data
- 0x18f: 0x5905, // label
- 0x191: 0x3d10e, // referrerpolicy
- 0x192: 0x15602, // th
- 0x194: 0x53606, // prompt
- 0x195: 0x56807, // section
- 0x197: 0x6d107, // optimum
- 0x198: 0x2db04, // high
- 0x199: 0x15c02, // h1
- 0x19a: 0x65909, // onstalled
- 0x19b: 0x16d03, // var
- 0x19c: 0x4204, // time
- 0x19e: 0x67402, // ms
- 0x19f: 0x33106, // header
- 0x1a0: 0x4da09, // onmessage
- 0x1a1: 0x1a605, // nonce
- 0x1a2: 0x26e0a, // formaction
- 0x1a3: 0x22006, // center
- 0x1a4: 0x3704, // nobr
- 0x1a5: 0x59505, // table
- 0x1a6: 0x4a907, // listing
- 0x1a7: 0x18106, // legend
- 0x1a9: 0x29b09, // challenge
- 0x1aa: 0x24806, // figure
- 0x1ab: 0xe605, // media
- 0x1ae: 0xd904, // type
- 0x1af: 0x3f04, // font
- 0x1b0: 0x4da0e, // onmessageerror
- 0x1b1: 0x37108, // seamless
- 0x1b2: 0x8703, // dfn
- 0x1b3: 0x5c705, // defer
- 0x1b4: 0xc303, // low
- 0x1b5: 0x19a03, // rtc
- 0x1b6: 0x5230b, // onmouseover
- 0x1b7: 0x2b20a, // novalidate
- 0x1b8: 0x71c0a, // workertype
- 0x1ba: 0x3cd07, // itemref
- 0x1bd: 0x1, // a
- 0x1be: 0x31803, // map
- 0x1bf: 0x400c, // ontimeupdate
- 0x1c0: 0x15e07, // bgsound
- 0x1c1: 0x3206, // keygen
- 0x1c2: 0x2705, // tbody
- 0x1c5: 0x64406, // onshow
- 0x1c7: 0x2501, // s
- 0x1c8: 0x6607, // pattern
- 0x1cc: 0x14d10, // oncanplaythrough
- 0x1ce: 0x2d702, // dd
- 0x1cf: 0x6f906, // srcset
- 0x1d0: 0x17003, // big
- 0x1d2: 0x65108, // sortable
- 0x1d3: 0x48007, // onkeyup
- 0x1d5: 0x5a406, // onplay
- 0x1d7: 0x4b804, // meta
- 0x1d8: 0x40306, // ondrop
- 0x1da: 0x60008, // onscroll
- 0x1db: 0x1fb0b, // crossorigin
- 0x1dc: 0x5730a, // onpageshow
- 0x1dd: 0x4, // abbr
- 0x1de: 0x9202, // td
- 0x1df: 0x58b0f, // contenteditable
- 0x1e0: 0x27206, // action
- 0x1e1: 0x1400b, // playsinline
- 0x1e2: 0x43107, // onfocus
- 0x1e3: 0x2e008, // hreflang
- 0x1e5: 0x5160a, // onmouseout
- 0x1e6: 0x5ea07, // onreset
- 0x1e7: 0x13c08, // autoplay
- 0x1e8: 0x63109, // onseeking
- 0x1ea: 0x67506, // scoped
- 0x1ec: 0x30a, // radiogroup
- 0x1ee: 0x3800b, // contextmenu
- 0x1ef: 0x52e09, // onmouseup
- 0x1f1: 0x2ca06, // hgroup
- 0x1f2: 0x2080f, // allowfullscreen
- 0x1f3: 0x4be08, // tabindex
- 0x1f6: 0x30f07, // isindex
- 0x1f7: 0x1a0e, // accept-charset
- 0x1f8: 0x2ae0e, // formnovalidate
- 0x1fb: 0x1c90e, // annotation-xml
- 0x1fc: 0x6e05, // embed
- 0x1fd: 0x21806, // script
- 0x1fe: 0xbb06, // dialog
- 0x1ff: 0x1d707, // command
+ 0x1: 0x3ff08, // dropzone
+ 0x2: 0x3b08, // basefont
+ 0x3: 0x23209, // integrity
+ 0x4: 0x43106, // source
+ 0x5: 0x2c09, // accesskey
+ 0x6: 0x1a06, // accept
+ 0x7: 0x6c807, // onwheel
+ 0xb: 0x47407, // onkeyup
+ 0xc: 0x32007, // headers
+ 0xd: 0x67306, // scoped
+ 0xe: 0x67909, // onsuspend
+ 0xf: 0x8908, // noframes
+ 0x10: 0x1fa0b, // crossorigin
+ 0x11: 0x2e407, // onclick
+ 0x12: 0x3f405, // start
+ 0x13: 0x37a0b, // contextmenu
+ 0x14: 0x5e903, // src
+ 0x15: 0x1c404, // cols
+ 0x16: 0xbb06, // dialog
+ 0x17: 0x47a07, // preload
+ 0x18: 0x3c707, // itemref
+ 0x1b: 0x2f105, // image
+ 0x1d: 0x4ba09, // onloadend
+ 0x1e: 0x45d08, // download
+ 0x1f: 0x46a03, // pre
+ 0x23: 0x2970a, // formmethod
+ 0x24: 0x71303, // svg
+ 0x25: 0xcf01, // q
+ 0x26: 0x64002, // dt
+ 0x27: 0x1de08, // controls
+ 0x2a: 0x2804, // body
+ 0x2b: 0xd206, // strike
+ 0x2c: 0x3910b, // oncuechange
+ 0x2d: 0x4c30b, // onloadstart
+ 0x2e: 0x2fe07, // isindex
+ 0x2f: 0xb202, // li
+ 0x30: 0x1400b, // playsinline
+ 0x31: 0x34102, // mi
+ 0x32: 0x30806, // applet
+ 0x33: 0x4ce09, // onmessage
+ 0x35: 0x13702, // ol
+ 0x36: 0x1a304, // open
+ 0x39: 0x14d09, // oncanplay
+ 0x3a: 0x6bf09, // onwaiting
+ 0x3b: 0x11908, // oncancel
+ 0x3c: 0x6a908, // onunload
+ 0x3e: 0x53c09, // onoffline
+ 0x3f: 0x1a0e, // accept-charset
+ 0x40: 0x32004, // head
+ 0x42: 0x3ab09, // ondragend
+ 0x43: 0x1310b, // placeholder
+ 0x44: 0x2b30a, // formtarget
+ 0x45: 0x2540d, // foreignobject
+ 0x47: 0x400c, // ontimeupdate
+ 0x48: 0xdd0e, // allowusermedia
+ 0x4a: 0x69c0d, // onbeforeprint
+ 0x4b: 0x5604, // html
+ 0x4c: 0x9f04, // span
+ 0x4d: 0x64206, // hgroup
+ 0x4e: 0x16408, // disabled
+ 0x4f: 0x4204, // time
+ 0x51: 0x42b07, // onfocus
+ 0x53: 0xb00a, // malignmark
+ 0x55: 0x4650a, // onkeypress
+ 0x56: 0x55805, // class
+ 0x57: 0x1ab08, // colgroup
+ 0x58: 0x33709, // maxlength
+ 0x59: 0x5a908, // progress
+ 0x5b: 0x70405, // style
+ 0x5c: 0x2a10e, // formnovalidate
+ 0x5e: 0x38b06, // oncopy
+ 0x60: 0x26104, // form
+ 0x61: 0xf606, // footer
+ 0x64: 0x30a, // radiogroup
+ 0x66: 0xfb04, // ruby
+ 0x67: 0x4ff0b, // onmousemove
+ 0x68: 0x19d08, // itemprop
+ 0x69: 0x2d70a, // http-equiv
+ 0x6a: 0x15602, // th
+ 0x6c: 0x6e02, // em
+ 0x6d: 0x38108, // menuitem
+ 0x6e: 0x63106, // select
+ 0x6f: 0x48110, // onlanguagechange
+ 0x70: 0x31f05, // thead
+ 0x71: 0x15c02, // h1
+ 0x72: 0x5e906, // srcdoc
+ 0x75: 0x9604, // name
+ 0x76: 0x19106, // button
+ 0x77: 0x55504, // desc
+ 0x78: 0x17704, // kind
+ 0x79: 0x1bf05, // color
+ 0x7c: 0x58e06, // usemap
+ 0x7d: 0x30e08, // itemtype
+ 0x7f: 0x6d508, // manifest
+ 0x81: 0x5300c, // onmousewheel
+ 0x82: 0x4dc0b, // onmousedown
+ 0x84: 0xc05, // param
+ 0x85: 0x2e005, // video
+ 0x86: 0x4910c, // onloadeddata
+ 0x87: 0x6f107, // address
+ 0x8c: 0xef04, // ping
+ 0x8d: 0x24703, // for
+ 0x8f: 0x62f08, // onselect
+ 0x90: 0x30703, // map
+ 0x92: 0xc01, // p
+ 0x93: 0x8008, // reversed
+ 0x94: 0x54d0a, // onpagehide
+ 0x95: 0x3206, // keygen
+ 0x96: 0x34109, // minlength
+ 0x97: 0x3e40a, // ondragover
+ 0x98: 0x42407, // onerror
+ 0x9a: 0x2107, // charset
+ 0x9b: 0x29b06, // method
+ 0x9c: 0x101, // b
+ 0x9d: 0x68208, // ontoggle
+ 0x9e: 0x2bd06, // hidden
+ 0xa0: 0x3f607, // article
+ 0xa2: 0x63906, // onshow
+ 0xa3: 0x64d06, // onsort
+ 0xa5: 0x57b0f, // contenteditable
+ 0xa6: 0x66908, // onsubmit
+ 0xa8: 0x44f09, // oninvalid
+ 0xaa: 0x202, // br
+ 0xab: 0x10902, // id
+ 0xac: 0x5d04, // loop
+ 0xad: 0x5630a, // onpageshow
+ 0xb0: 0x2cf04, // href
+ 0xb2: 0x2210a, // figcaption
+ 0xb3: 0x2690e, // onautocomplete
+ 0xb4: 0x49106, // onload
+ 0xb6: 0x9c04, // rows
+ 0xb7: 0x1a605, // nonce
+ 0xb8: 0x68a14, // onunhandledrejection
+ 0xbb: 0x21306, // center
+ 0xbc: 0x59406, // onplay
+ 0xbd: 0x33f02, // h5
+ 0xbe: 0x49d07, // listing
+ 0xbf: 0x57606, // public
+ 0xc2: 0x23b06, // figure
+ 0xc3: 0x57a04, // icon
+ 0xc4: 0x1ab03, // col
+ 0xc5: 0x47b03, // rel
+ 0xc6: 0xe605, // media
+ 0xc7: 0x12109, // autofocus
+ 0xc8: 0x19a02, // rt
+ 0xca: 0x2d304, // lang
+ 0xcc: 0x49908, // datalist
+ 0xce: 0x2eb06, // iframe
+ 0xcf: 0x36105, // muted
+ 0xd0: 0x6140a, // onauxclick
+ 0xd2: 0x3c02, // as
+ 0xd6: 0x3fd06, // ondrop
+ 0xd7: 0x1c90a, // annotation
+ 0xd8: 0x21908, // fieldset
+ 0xdb: 0x2cf08, // hreflang
+ 0xdc: 0x4e70c, // onmouseenter
+ 0xdd: 0x2a402, // mn
+ 0xde: 0xe60a, // mediagroup
+ 0xdf: 0x9805, // meter
+ 0xe0: 0x56c03, // wbr
+ 0xe2: 0x63e05, // width
+ 0xe3: 0x2290c, // onafterprint
+ 0xe4: 0x30505, // ismap
+ 0xe5: 0x1505, // value
+ 0xe7: 0x1303, // nav
+ 0xe8: 0x54508, // ononline
+ 0xe9: 0xb604, // mark
+ 0xea: 0xc303, // low
+ 0xeb: 0x3ee0b, // ondragstart
+ 0xef: 0x12f03, // xmp
+ 0xf0: 0x22407, // caption
+ 0xf1: 0xd904, // type
+ 0xf2: 0x70907, // summary
+ 0xf3: 0x6802, // tt
+ 0xf4: 0x20809, // translate
+ 0xf5: 0x1870a, // blockquote
+ 0xf8: 0x15702, // hr
+ 0xfa: 0x2705, // tbody
+ 0xfc: 0x7b07, // picture
+ 0xfd: 0x5206, // height
+ 0xfe: 0x19c04, // cite
+ 0xff: 0x2501, // s
+ 0x101: 0xff05, // async
+ 0x102: 0x56f07, // onpaste
+ 0x103: 0x19507, // onabort
+ 0x104: 0x2b706, // target
+ 0x105: 0x14b03, // bdo
+ 0x106: 0x1f006, // coords
+ 0x107: 0x5e108, // onresize
+ 0x108: 0x71908, // template
+ 0x10a: 0x3a02, // rb
+ 0x10b: 0x2a50a, // novalidate
+ 0x10c: 0x460e, // updateviacache
+ 0x10d: 0x71003, // sup
+ 0x10e: 0x6c07, // noembed
+ 0x10f: 0x16b03, // div
+ 0x110: 0x6f707, // srclang
+ 0x111: 0x17a09, // draggable
+ 0x112: 0x67305, // scope
+ 0x113: 0x5905, // label
+ 0x114: 0x22f02, // rp
+ 0x115: 0x23f08, // required
+ 0x116: 0x3780d, // oncontextmenu
+ 0x117: 0x5e504, // size
+ 0x118: 0x5b00a, // spellcheck
+ 0x119: 0x3f04, // font
+ 0x11a: 0x9c07, // rowspan
+ 0x11b: 0x10a07, // default
+ 0x11d: 0x44307, // oninput
+ 0x11e: 0x38506, // itemid
+ 0x11f: 0x5ee04, // code
+ 0x120: 0xaa07, // acronym
+ 0x121: 0x3b04, // base
+ 0x125: 0x2470d, // foreignObject
+ 0x126: 0x2ca04, // high
+ 0x127: 0x3cb0e, // referrerpolicy
+ 0x128: 0x33703, // max
+ 0x129: 0x59d0a, // onpopstate
+ 0x12a: 0x2fc02, // h4
+ 0x12b: 0x4ac04, // meta
+ 0x12c: 0x17305, // blink
+ 0x12e: 0x5f508, // onscroll
+ 0x12f: 0x59409, // onplaying
+ 0x130: 0xc113, // allowpaymentrequest
+ 0x131: 0x19a03, // rtc
+ 0x132: 0x72b04, // wrap
+ 0x134: 0x8b08, // frameset
+ 0x135: 0x32605, // small
+ 0x137: 0x32006, // header
+ 0x138: 0x40409, // onemptied
+ 0x139: 0x34902, // h6
+ 0x13a: 0x35908, // multiple
+ 0x13c: 0x52a06, // prompt
+ 0x13f: 0x28e09, // challenge
+ 0x141: 0x4370c, // onhashchange
+ 0x142: 0x57b07, // content
+ 0x143: 0x1c90e, // annotation-xml
+ 0x144: 0x36607, // onclose
+ 0x145: 0x14d10, // oncanplaythrough
+ 0x148: 0x5170b, // onmouseover
+ 0x149: 0x64f08, // sortable
+ 0x14a: 0xa402, // mo
+ 0x14b: 0x2cd02, // h3
+ 0x14c: 0x2c406, // script
+ 0x14d: 0x41d07, // onended
+ 0x14f: 0x64706, // poster
+ 0x150: 0x7210a, // workertype
+ 0x153: 0x1f505, // shape
+ 0x154: 0x4, // abbr
+ 0x155: 0x1, // a
+ 0x156: 0x2bf02, // dd
+ 0x157: 0x71606, // system
+ 0x158: 0x4ce0e, // onmessageerror
+ 0x159: 0x36b08, // seamless
+ 0x15a: 0x2610a, // formaction
+ 0x15b: 0x6e106, // option
+ 0x15c: 0x31d04, // math
+ 0x15d: 0x62609, // onseeking
+ 0x15e: 0x39c05, // oncut
+ 0x15f: 0x44c03, // del
+ 0x160: 0x11005, // title
+ 0x161: 0x11505, // audio
+ 0x162: 0x63108, // selected
+ 0x165: 0x3b40b, // ondragenter
+ 0x166: 0x46e06, // spacer
+ 0x167: 0x4a410, // onloadedmetadata
+ 0x168: 0x44505, // input
+ 0x16a: 0x58505, // table
+ 0x16b: 0x41508, // onchange
+ 0x16e: 0x5f005, // defer
+ 0x171: 0x50a0a, // onmouseout
+ 0x172: 0x20504, // slot
+ 0x175: 0x3704, // nobr
+ 0x177: 0x1d707, // command
+ 0x17a: 0x7207, // details
+ 0x17b: 0x38104, // menu
+ 0x17c: 0xb903, // kbd
+ 0x17d: 0x57304, // step
+ 0x17e: 0x20303, // ins
+ 0x17f: 0x13c08, // autoplay
+ 0x182: 0x34103, // min
+ 0x183: 0x17404, // link
+ 0x185: 0x40d10, // ondurationchange
+ 0x186: 0x9202, // td
+ 0x187: 0x8b05, // frame
+ 0x18a: 0x2ab08, // datetime
+ 0x18b: 0x44509, // inputmode
+ 0x18c: 0x35108, // readonly
+ 0x18d: 0x21104, // face
+ 0x18f: 0x5e505, // sizes
+ 0x191: 0x4b208, // tabindex
+ 0x192: 0x6db06, // strong
+ 0x193: 0xba03, // bdi
+ 0x194: 0x6fe06, // srcset
+ 0x196: 0x67202, // ms
+ 0x197: 0x5b507, // checked
+ 0x198: 0xb105, // align
+ 0x199: 0x1e507, // section
+ 0x19b: 0x6e05, // embed
+ 0x19d: 0x15e07, // bgsound
+ 0x1a2: 0x49d04, // list
+ 0x1a3: 0x61e08, // onseeked
+ 0x1a4: 0x66009, // onstorage
+ 0x1a5: 0x2f603, // img
+ 0x1a6: 0xf505, // tfoot
+ 0x1a9: 0x26913, // onautocompleteerror
+ 0x1aa: 0x5fd19, // onsecuritypolicyviolation
+ 0x1ad: 0x9303, // dir
+ 0x1ae: 0x9307, // dirname
+ 0x1b0: 0x5a70a, // onprogress
+ 0x1b2: 0x65709, // onstalled
+ 0x1b5: 0x66f09, // itemscope
+ 0x1b6: 0x49904, // data
+ 0x1b7: 0x3d90b, // ondragleave
+ 0x1b8: 0x56102, // h2
+ 0x1b9: 0x2f706, // mglyph
+ 0x1ba: 0x16502, // is
+ 0x1bb: 0x6e50e, // onbeforeunload
+ 0x1bc: 0x2830d, // typemustmatch
+ 0x1bd: 0x3ab06, // ondrag
+ 0x1be: 0x5da07, // onreset
+ 0x1c0: 0x51106, // output
+ 0x1c1: 0x12907, // sandbox
+ 0x1c2: 0x1b209, // plaintext
+ 0x1c4: 0x34c08, // textarea
+ 0x1c7: 0xd607, // keytype
+ 0x1c8: 0x34b05, // mtext
+ 0x1c9: 0x6b10e, // onvolumechange
+ 0x1ca: 0x1ea06, // onblur
+ 0x1cb: 0x58a07, // onpause
+ 0x1cd: 0x5bc0c, // onratechange
+ 0x1ce: 0x10705, // aside
+ 0x1cf: 0x6cf07, // optimum
+ 0x1d1: 0x45809, // onkeydown
+ 0x1d2: 0x1c407, // colspan
+ 0x1d3: 0x1004, // main
+ 0x1d4: 0x66b03, // sub
+ 0x1d5: 0x25b06, // object
+ 0x1d6: 0x55c06, // search
+ 0x1d7: 0x37206, // sorted
+ 0x1d8: 0x17003, // big
+ 0x1d9: 0xb01, // u
+ 0x1db: 0x26b0c, // autocomplete
+ 0x1dc: 0xcc02, // tr
+ 0x1dd: 0xf303, // alt
+ 0x1df: 0x7804, // samp
+ 0x1e0: 0x5c812, // onrejectionhandled
+ 0x1e1: 0x4f30c, // onmouseleave
+ 0x1e2: 0x28007, // enctype
+ 0x1e3: 0xa208, // nomodule
+ 0x1e5: 0x3280f, // allowfullscreen
+ 0x1e6: 0x5f08, // optgroup
+ 0x1e8: 0x27c0b, // formenctype
+ 0x1e9: 0x18106, // legend
+ 0x1ea: 0x10306, // canvas
+ 0x1eb: 0x6607, // pattern
+ 0x1ec: 0x2c208, // noscript
+ 0x1ed: 0x601, // i
+ 0x1ee: 0x5d602, // dl
+ 0x1ef: 0xa702, // ul
+ 0x1f2: 0x52209, // onmouseup
+ 0x1f4: 0x1ba05, // track
+ 0x1f7: 0x3a10a, // ondblclick
+ 0x1f8: 0x3bf0a, // ondragexit
+ 0x1fa: 0x8703, // dfn
+ 0x1fc: 0x26506, // action
+ 0x1fd: 0x35004, // area
+ 0x1fe: 0x31607, // marquee
+ 0x1ff: 0x16d03, // var
}
const atomText = "abbradiogrouparamainavalueaccept-charsetbodyaccesskeygenobrb" +
@@ -758,26 +760,26 @@ const atomText = "abbradiogrouparamainavalueaccept-charsetbodyaccesskeygenobrb"
"dboxmplaceholderautoplaysinlinebdoncanplaythrough1bgsoundisa" +
"bledivarbigblinkindraggablegendblockquotebuttonabortcitempro" +
"penoncecolgrouplaintextrackcolorcolspannotation-xmlcommandco" +
- "ntrolshapecoordslotranslatecrossoriginsmallowfullscreenoscri" +
- "ptfacenterfieldsetfigcaptionafterprintegrityfigurequiredfore" +
- "ignObjectforeignobjectformactionautocompleteerrorformenctype" +
- "mustmatchallengeformmethodformnovalidatetimeformtargethgroup" +
- "osterhiddenhigh2hreflanghttp-equivideonclickiframeimageimgly" +
- "ph3isindexismappletitemtypemarqueematheadersortedmaxlength4m" +
- "inlength5mtextareadonlymultiplemutedoncloseamlessourceoncont" +
- "extmenuitemidoncopyoncuechangeoncutondblclickondragendondrag" +
- "enterondragexitemreferrerpolicyondragleaveondragoverondragst" +
- "articleondropzonemptiedondurationchangeonendedonerroronfocus" +
- "paceronhashchangeoninputmodeloninvalidonkeydownloadonkeypres" +
- "spellcheckedonkeyupreloadonlanguagechangeonloadeddatalisting" +
- "onloadedmetadatabindexonloadendonloadstartonmessageerroronmo" +
- "usedownonmouseenteronmouseleaveonmousemoveonmouseoutputonmou" +
- "seoveronmouseupromptonmousewheelonofflineononlineonpagehides" +
- "classectionbluronpageshowbronpastepublicontenteditableonpaus" +
- "emaponplayingonpopstateonprogressrcdocodeferonratechangeonre" +
- "jectionhandledonresetonresizesrclangonscrollonsecuritypolicy" +
- "violationauxclickonseekedonseekingonselectedonshowidth6onsor" +
- "tableonstalledonstorageonsubmitemscopedonsuspendontoggleonun" +
- "handledrejectionbeforeprintonunloadonvolumechangeonwaitingon" +
- "wheeloptimumanifestrongoptionbeforeunloaddressrcsetstylesumm" +
- "arysupsvgsystemplateworkertypewrap"
+ "ntrolsectionblurcoordshapecrossoriginslotranslatefacenterfie" +
+ "ldsetfigcaptionafterprintegrityfigurequiredforeignObjectfore" +
+ "ignobjectformactionautocompleteerrorformenctypemustmatchalle" +
+ "ngeformmethodformnovalidatetimeformtargethiddenoscripthigh3h" +
+ "reflanghttp-equivideonclickiframeimageimglyph4isindexismappl" +
+ "etitemtypemarqueematheadersmallowfullscreenmaxlength5minleng" +
+ "th6mtextareadonlymultiplemutedoncloseamlessortedoncontextmen" +
+ "uitemidoncopyoncuechangeoncutondblclickondragendondragentero" +
+ "ndragexitemreferrerpolicyondragleaveondragoverondragstarticl" +
+ "eondropzonemptiedondurationchangeonendedonerroronfocusourceo" +
+ "nhashchangeoninputmodeloninvalidonkeydownloadonkeypresspacer" +
+ "onkeyupreloadonlanguagechangeonloadeddatalistingonloadedmeta" +
+ "databindexonloadendonloadstartonmessageerroronmousedownonmou" +
+ "seenteronmouseleaveonmousemoveonmouseoutputonmouseoveronmous" +
+ "eupromptonmousewheelonofflineononlineonpagehidesclassearch2o" +
+ "npageshowbronpastepublicontenteditableonpausemaponplayingonp" +
+ "opstateonprogresspellcheckedonratechangeonrejectionhandledon" +
+ "resetonresizesrcdocodeferonscrollonsecuritypolicyviolationau" +
+ "xclickonseekedonseekingonselectedonshowidthgrouposteronsorta" +
+ "bleonstalledonstorageonsubmitemscopedonsuspendontoggleonunha" +
+ "ndledrejectionbeforeprintonunloadonvolumechangeonwaitingonwh" +
+ "eeloptimumanifestrongoptionbeforeunloaddressrclangsrcsetstyl" +
+ "esummarysupsvgsystemplateworkertypewrap"
diff --git a/vendor/golang.org/x/net/html/parse.go b/vendor/golang.org/x/net/html/parse.go
index 643c674e..518ee4c9 100644
--- a/vendor/golang.org/x/net/html/parse.go
+++ b/vendor/golang.org/x/net/html/parse.go
@@ -924,7 +924,7 @@ func inBodyIM(p *parser) bool {
p.addElement()
p.im = inFramesetIM
return true
- case a.Address, a.Article, a.Aside, a.Blockquote, a.Center, a.Details, a.Dialog, a.Dir, a.Div, a.Dl, a.Fieldset, a.Figcaption, a.Figure, a.Footer, a.Header, a.Hgroup, a.Main, a.Menu, a.Nav, a.Ol, a.P, a.Section, a.Summary, a.Ul:
+ case a.Address, a.Article, a.Aside, a.Blockquote, a.Center, a.Details, a.Dialog, a.Dir, a.Div, a.Dl, a.Fieldset, a.Figcaption, a.Figure, a.Footer, a.Header, a.Hgroup, a.Main, a.Menu, a.Nav, a.Ol, a.P, a.Search, a.Section, a.Summary, a.Ul:
p.popUntil(buttonScope, a.P)
p.addElement()
case a.H1, a.H2, a.H3, a.H4, a.H5, a.H6:
@@ -1136,7 +1136,7 @@ func inBodyIM(p *parser) bool {
return false
}
return true
- case a.Address, a.Article, a.Aside, a.Blockquote, a.Button, a.Center, a.Details, a.Dialog, a.Dir, a.Div, a.Dl, a.Fieldset, a.Figcaption, a.Figure, a.Footer, a.Header, a.Hgroup, a.Listing, a.Main, a.Menu, a.Nav, a.Ol, a.Pre, a.Section, a.Summary, a.Ul:
+ case a.Address, a.Article, a.Aside, a.Blockquote, a.Button, a.Center, a.Details, a.Dialog, a.Dir, a.Div, a.Dl, a.Fieldset, a.Figcaption, a.Figure, a.Footer, a.Header, a.Hgroup, a.Listing, a.Main, a.Menu, a.Nav, a.Ol, a.Pre, a.Search, a.Section, a.Summary, a.Ul:
p.popUntil(defaultScope, p.tok.DataAtom)
case a.Form:
if p.oe.contains(a.Template) {
diff --git a/vendor/golang.org/x/net/html/token.go b/vendor/golang.org/x/net/html/token.go
index 3c57880d..6598c1f7 100644
--- a/vendor/golang.org/x/net/html/token.go
+++ b/vendor/golang.org/x/net/html/token.go
@@ -839,8 +839,22 @@ func (z *Tokenizer) readStartTag() TokenType {
if raw {
z.rawTag = strings.ToLower(string(z.buf[z.data.start:z.data.end]))
}
- // Look for a self-closing token like "
".
- if z.err == nil && z.buf[z.raw.end-2] == '/' {
+ // Look for a self-closing token (e.g.
).
+ //
+ // Originally, we did this by just checking that the last character of the
+ // tag (ignoring the closing bracket) was a solidus (/) character, but this
+ // is not always accurate.
+ //
+ // We need to be careful that we don't misinterpret a non-self-closing tag
+ // as self-closing, as can happen if the tag contains unquoted attribute
+ // values (i.e. ).
+ //
+ // To avoid this, we check that the last non-bracket character of the tag
+ // (z.raw.end-2) isn't the same character as the last non-quote character of
+ // the last attribute of the tag (z.pendingAttr[1].end-1), if the tag has
+ // attributes.
+ nAttrs := len(z.attr)
+ if z.err == nil && z.buf[z.raw.end-2] == '/' && (nAttrs == 0 || z.raw.end-2 != z.attr[nAttrs-1][1].end-1) {
return SelfClosingTagToken
}
return StartTagToken
diff --git a/vendor/golang.org/x/net/http2/config.go b/vendor/golang.org/x/net/http2/config.go
index de58dfb8..ca645d9a 100644
--- a/vendor/golang.org/x/net/http2/config.go
+++ b/vendor/golang.org/x/net/http2/config.go
@@ -60,7 +60,7 @@ func configFromServer(h1 *http.Server, h2 *Server) http2Config {
return conf
}
-// configFromServer merges configuration settings from h2 and h2.t1.HTTP2
+// configFromTransport merges configuration settings from h2 and h2.t1.HTTP2
// (the net/http Transport).
func configFromTransport(h2 *Transport) http2Config {
conf := http2Config{
diff --git a/vendor/golang.org/x/net/http2/config_go124.go b/vendor/golang.org/x/net/http2/config_go124.go
index e3784123..5b516c55 100644
--- a/vendor/golang.org/x/net/http2/config_go124.go
+++ b/vendor/golang.org/x/net/http2/config_go124.go
@@ -13,7 +13,7 @@ func fillNetHTTPServerConfig(conf *http2Config, srv *http.Server) {
fillNetHTTPConfig(conf, srv.HTTP2)
}
-// fillNetHTTPServerConfig sets fields in conf from tr.HTTP2.
+// fillNetHTTPTransportConfig sets fields in conf from tr.HTTP2.
func fillNetHTTPTransportConfig(conf *http2Config, tr *http.Transport) {
fillNetHTTPConfig(conf, tr.HTTP2)
}
diff --git a/vendor/golang.org/x/net/http2/frame.go b/vendor/golang.org/x/net/http2/frame.go
index 81faec7e..97bd8b06 100644
--- a/vendor/golang.org/x/net/http2/frame.go
+++ b/vendor/golang.org/x/net/http2/frame.go
@@ -225,6 +225,11 @@ var fhBytes = sync.Pool{
},
}
+func invalidHTTP1LookingFrameHeader() FrameHeader {
+ fh, _ := readFrameHeader(make([]byte, frameHeaderLen), strings.NewReader("HTTP/1.1 "))
+ return fh
+}
+
// ReadFrameHeader reads 9 bytes from r and returns a FrameHeader.
// Most users should use Framer.ReadFrame instead.
func ReadFrameHeader(r io.Reader) (FrameHeader, error) {
@@ -503,10 +508,16 @@ func (fr *Framer) ReadFrame() (Frame, error) {
return nil, err
}
if fh.Length > fr.maxReadSize {
+ if fh == invalidHTTP1LookingFrameHeader() {
+ return nil, fmt.Errorf("http2: failed reading the frame payload: %w, note that the frame header looked like an HTTP/1.1 header", err)
+ }
return nil, ErrFrameTooLarge
}
payload := fr.getReadBuf(fh.Length)
if _, err := io.ReadFull(fr.r, payload); err != nil {
+ if fh == invalidHTTP1LookingFrameHeader() {
+ return nil, fmt.Errorf("http2: failed reading the frame payload: %w, note that the frame header looked like an HTTP/1.1 header", err)
+ }
return nil, err
}
f, err := typeFrameParser(fh.Type)(fr.frameCache, fh, fr.countError, payload)
diff --git a/vendor/golang.org/x/net/http2/http2.go b/vendor/golang.org/x/net/http2/http2.go
index c7601c90..6c18ea23 100644
--- a/vendor/golang.org/x/net/http2/http2.go
+++ b/vendor/golang.org/x/net/http2/http2.go
@@ -34,11 +34,19 @@ import (
)
var (
- VerboseLogs bool
- logFrameWrites bool
- logFrameReads bool
- inTests bool
- disableExtendedConnectProtocol bool
+ VerboseLogs bool
+ logFrameWrites bool
+ logFrameReads bool
+ inTests bool
+
+ // Enabling extended CONNECT by causes browsers to attempt to use
+ // WebSockets-over-HTTP/2. This results in problems when the server's websocket
+ // package doesn't support extended CONNECT.
+ //
+ // Disable extended CONNECT by default for now.
+ //
+ // Issue #71128.
+ disableExtendedConnectProtocol = true
)
func init() {
@@ -51,8 +59,8 @@ func init() {
logFrameWrites = true
logFrameReads = true
}
- if strings.Contains(e, "http2xconnect=0") {
- disableExtendedConnectProtocol = true
+ if strings.Contains(e, "http2xconnect=1") {
+ disableExtendedConnectProtocol = false
}
}
@@ -407,23 +415,6 @@ func (s *sorter) SortStrings(ss []string) {
s.v = save
}
-// validPseudoPath reports whether v is a valid :path pseudo-header
-// value. It must be either:
-//
-// - a non-empty string starting with '/'
-// - the string '*', for OPTIONS requests.
-//
-// For now this is only used a quick check for deciding when to clean
-// up Opaque URLs before sending requests from the Transport.
-// See golang.org/issue/16847
-//
-// We used to enforce that the path also didn't start with "//", but
-// Google's GFE accepts such paths and Chrome sends them, so ignore
-// that part of the spec. See golang.org/issue/19103.
-func validPseudoPath(v string) bool {
- return (len(v) > 0 && v[0] == '/') || v == "*"
-}
-
// incomparable is a zero-width, non-comparable type. Adding it to a struct
// makes that struct also non-comparable, and generally doesn't add
// any size (as long as it's first).
diff --git a/vendor/golang.org/x/net/http2/server.go b/vendor/golang.org/x/net/http2/server.go
index b55547ae..51fca38f 100644
--- a/vendor/golang.org/x/net/http2/server.go
+++ b/vendor/golang.org/x/net/http2/server.go
@@ -50,6 +50,7 @@ import (
"golang.org/x/net/http/httpguts"
"golang.org/x/net/http2/hpack"
+ "golang.org/x/net/internal/httpcommon"
)
const (
@@ -812,8 +813,7 @@ const maxCachedCanonicalHeadersKeysSize = 2048
func (sc *serverConn) canonicalHeader(v string) string {
sc.serveG.check()
- buildCommonHeaderMapsOnce()
- cv, ok := commonCanonHeader[v]
+ cv, ok := httpcommon.CachedCanonicalHeader(v)
if ok {
return cv
}
@@ -1068,7 +1068,10 @@ func (sc *serverConn) serve(conf http2Config) {
func (sc *serverConn) handlePingTimer(lastFrameReadTime time.Time) {
if sc.pingSent {
- sc.vlogf("timeout waiting for PING response")
+ sc.logf("timeout waiting for PING response")
+ if f := sc.countErrorFunc; f != nil {
+ f("conn_close_lost_ping")
+ }
sc.conn.Close()
return
}
@@ -2233,25 +2236,25 @@ func (sc *serverConn) newStream(id, pusherID uint32, state streamState) *stream
func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*responseWriter, *http.Request, error) {
sc.serveG.check()
- rp := requestParam{
- method: f.PseudoValue("method"),
- scheme: f.PseudoValue("scheme"),
- authority: f.PseudoValue("authority"),
- path: f.PseudoValue("path"),
- protocol: f.PseudoValue("protocol"),
+ rp := httpcommon.ServerRequestParam{
+ Method: f.PseudoValue("method"),
+ Scheme: f.PseudoValue("scheme"),
+ Authority: f.PseudoValue("authority"),
+ Path: f.PseudoValue("path"),
+ Protocol: f.PseudoValue("protocol"),
}
// extended connect is disabled, so we should not see :protocol
- if disableExtendedConnectProtocol && rp.protocol != "" {
+ if disableExtendedConnectProtocol && rp.Protocol != "" {
return nil, nil, sc.countError("bad_connect", streamError(f.StreamID, ErrCodeProtocol))
}
- isConnect := rp.method == "CONNECT"
+ isConnect := rp.Method == "CONNECT"
if isConnect {
- if rp.protocol == "" && (rp.path != "" || rp.scheme != "" || rp.authority == "") {
+ if rp.Protocol == "" && (rp.Path != "" || rp.Scheme != "" || rp.Authority == "") {
return nil, nil, sc.countError("bad_connect", streamError(f.StreamID, ErrCodeProtocol))
}
- } else if rp.method == "" || rp.path == "" || (rp.scheme != "https" && rp.scheme != "http") {
+ } else if rp.Method == "" || rp.Path == "" || (rp.Scheme != "https" && rp.Scheme != "http") {
// See 8.1.2.6 Malformed Requests and Responses:
//
// Malformed requests or responses that are detected
@@ -2265,15 +2268,16 @@ func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*res
return nil, nil, sc.countError("bad_path_method", streamError(f.StreamID, ErrCodeProtocol))
}
- rp.header = make(http.Header)
+ header := make(http.Header)
+ rp.Header = header
for _, hf := range f.RegularFields() {
- rp.header.Add(sc.canonicalHeader(hf.Name), hf.Value)
+ header.Add(sc.canonicalHeader(hf.Name), hf.Value)
}
- if rp.authority == "" {
- rp.authority = rp.header.Get("Host")
+ if rp.Authority == "" {
+ rp.Authority = header.Get("Host")
}
- if rp.protocol != "" {
- rp.header.Set(":protocol", rp.protocol)
+ if rp.Protocol != "" {
+ header.Set(":protocol", rp.Protocol)
}
rw, req, err := sc.newWriterAndRequestNoBody(st, rp)
@@ -2282,7 +2286,7 @@ func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*res
}
bodyOpen := !f.StreamEnded()
if bodyOpen {
- if vv, ok := rp.header["Content-Length"]; ok {
+ if vv, ok := rp.Header["Content-Length"]; ok {
if cl, err := strconv.ParseUint(vv[0], 10, 63); err == nil {
req.ContentLength = int64(cl)
} else {
@@ -2298,84 +2302,38 @@ func (sc *serverConn) newWriterAndRequest(st *stream, f *MetaHeadersFrame) (*res
return rw, req, nil
}
-type requestParam struct {
- method string
- scheme, authority, path string
- protocol string
- header http.Header
-}
-
-func (sc *serverConn) newWriterAndRequestNoBody(st *stream, rp requestParam) (*responseWriter, *http.Request, error) {
+func (sc *serverConn) newWriterAndRequestNoBody(st *stream, rp httpcommon.ServerRequestParam) (*responseWriter, *http.Request, error) {
sc.serveG.check()
var tlsState *tls.ConnectionState // nil if not scheme https
- if rp.scheme == "https" {
+ if rp.Scheme == "https" {
tlsState = sc.tlsState
}
- needsContinue := httpguts.HeaderValuesContainsToken(rp.header["Expect"], "100-continue")
- if needsContinue {
- rp.header.Del("Expect")
- }
- // Merge Cookie headers into one "; "-delimited value.
- if cookies := rp.header["Cookie"]; len(cookies) > 1 {
- rp.header.Set("Cookie", strings.Join(cookies, "; "))
- }
-
- // Setup Trailers
- var trailer http.Header
- for _, v := range rp.header["Trailer"] {
- for _, key := range strings.Split(v, ",") {
- key = http.CanonicalHeaderKey(textproto.TrimString(key))
- switch key {
- case "Transfer-Encoding", "Trailer", "Content-Length":
- // Bogus. (copy of http1 rules)
- // Ignore.
- default:
- if trailer == nil {
- trailer = make(http.Header)
- }
- trailer[key] = nil
- }
- }
- }
- delete(rp.header, "Trailer")
-
- var url_ *url.URL
- var requestURI string
- if rp.method == "CONNECT" && rp.protocol == "" {
- url_ = &url.URL{Host: rp.authority}
- requestURI = rp.authority // mimic HTTP/1 server behavior
- } else {
- var err error
- url_, err = url.ParseRequestURI(rp.path)
- if err != nil {
- return nil, nil, sc.countError("bad_path", streamError(st.id, ErrCodeProtocol))
- }
- requestURI = rp.path
+ res := httpcommon.NewServerRequest(rp)
+ if res.InvalidReason != "" {
+ return nil, nil, sc.countError(res.InvalidReason, streamError(st.id, ErrCodeProtocol))
}
body := &requestBody{
conn: sc,
stream: st,
- needsContinue: needsContinue,
+ needsContinue: res.NeedsContinue,
}
- req := &http.Request{
- Method: rp.method,
- URL: url_,
+ req := (&http.Request{
+ Method: rp.Method,
+ URL: res.URL,
RemoteAddr: sc.remoteAddrStr,
- Header: rp.header,
- RequestURI: requestURI,
+ Header: rp.Header,
+ RequestURI: res.RequestURI,
Proto: "HTTP/2.0",
ProtoMajor: 2,
ProtoMinor: 0,
TLS: tlsState,
- Host: rp.authority,
+ Host: rp.Authority,
Body: body,
- Trailer: trailer,
- }
- req = req.WithContext(st.ctx)
-
+ Trailer: res.Trailer,
+ }).WithContext(st.ctx)
rw := sc.newResponseWriter(st, req)
return rw, req, nil
}
@@ -3270,12 +3228,12 @@ func (sc *serverConn) startPush(msg *startPushRequest) {
// we start in "half closed (remote)" for simplicity.
// See further comments at the definition of stateHalfClosedRemote.
promised := sc.newStream(promisedID, msg.parent.id, stateHalfClosedRemote)
- rw, req, err := sc.newWriterAndRequestNoBody(promised, requestParam{
- method: msg.method,
- scheme: msg.url.Scheme,
- authority: msg.url.Host,
- path: msg.url.RequestURI(),
- header: cloneHeader(msg.header), // clone since handler runs concurrently with writing the PUSH_PROMISE
+ rw, req, err := sc.newWriterAndRequestNoBody(promised, httpcommon.ServerRequestParam{
+ Method: msg.method,
+ Scheme: msg.url.Scheme,
+ Authority: msg.url.Host,
+ Path: msg.url.RequestURI(),
+ Header: cloneHeader(msg.header), // clone since handler runs concurrently with writing the PUSH_PROMISE
})
if err != nil {
// Should not happen, since we've already validated msg.url.
diff --git a/vendor/golang.org/x/net/http2/transport.go b/vendor/golang.org/x/net/http2/transport.go
index 090d0e1b..f26356b9 100644
--- a/vendor/golang.org/x/net/http2/transport.go
+++ b/vendor/golang.org/x/net/http2/transport.go
@@ -25,7 +25,6 @@ import (
"net/http"
"net/http/httptrace"
"net/textproto"
- "sort"
"strconv"
"strings"
"sync"
@@ -35,6 +34,7 @@ import (
"golang.org/x/net/http/httpguts"
"golang.org/x/net/http2/hpack"
"golang.org/x/net/idna"
+ "golang.org/x/net/internal/httpcommon"
)
const (
@@ -375,6 +375,7 @@ type ClientConn struct {
doNotReuse bool // whether conn is marked to not be reused for any future requests
closing bool
closed bool
+ closedOnIdle bool // true if conn was closed for idleness
seenSettings bool // true if we've seen a settings frame, false otherwise
seenSettingsChan chan struct{} // closed when seenSettings is true or frame reading fails
wantSettingsAck bool // we sent a SETTINGS frame and haven't heard back
@@ -1089,10 +1090,12 @@ func (cc *ClientConn) idleStateLocked() (st clientConnIdleState) {
// If this connection has never been used for a request and is closed,
// then let it take a request (which will fail).
+ // If the conn was closed for idleness, we're racing the idle timer;
+ // don't try to use the conn. (Issue #70515.)
//
// This avoids a situation where an error early in a connection's lifetime
// goes unreported.
- if cc.nextStreamID == 1 && cc.streamsReserved == 0 && cc.closed {
+ if cc.nextStreamID == 1 && cc.streamsReserved == 0 && cc.closed && !cc.closedOnIdle {
st.canTakeNewRequest = true
}
@@ -1155,6 +1158,7 @@ func (cc *ClientConn) closeIfIdle() {
return
}
cc.closed = true
+ cc.closedOnIdle = true
nextID := cc.nextStreamID
// TODO: do clients send GOAWAY too? maybe? Just Close:
cc.mu.Unlock()
@@ -1271,23 +1275,6 @@ func (cc *ClientConn) closeForLostPing() {
// exported. At least they'll be DeepEqual for h1-vs-h2 comparisons tests.
var errRequestCanceled = errors.New("net/http: request canceled")
-func commaSeparatedTrailers(req *http.Request) (string, error) {
- keys := make([]string, 0, len(req.Trailer))
- for k := range req.Trailer {
- k = canonicalHeader(k)
- switch k {
- case "Transfer-Encoding", "Trailer", "Content-Length":
- return "", fmt.Errorf("invalid Trailer key %q", k)
- }
- keys = append(keys, k)
- }
- if len(keys) > 0 {
- sort.Strings(keys)
- return strings.Join(keys, ","), nil
- }
- return "", nil
-}
-
func (cc *ClientConn) responseHeaderTimeout() time.Duration {
if cc.t.t1 != nil {
return cc.t.t1.ResponseHeaderTimeout
@@ -1299,22 +1286,6 @@ func (cc *ClientConn) responseHeaderTimeout() time.Duration {
return 0
}
-// checkConnHeaders checks whether req has any invalid connection-level headers.
-// per RFC 7540 section 8.1.2.2: Connection-Specific Header Fields.
-// Certain headers are special-cased as okay but not transmitted later.
-func checkConnHeaders(req *http.Request) error {
- if v := req.Header.Get("Upgrade"); v != "" {
- return fmt.Errorf("http2: invalid Upgrade request header: %q", req.Header["Upgrade"])
- }
- if vv := req.Header["Transfer-Encoding"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && vv[0] != "chunked") {
- return fmt.Errorf("http2: invalid Transfer-Encoding request header: %q", vv)
- }
- if vv := req.Header["Connection"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && !asciiEqualFold(vv[0], "close") && !asciiEqualFold(vv[0], "keep-alive")) {
- return fmt.Errorf("http2: invalid Connection request header: %q", vv)
- }
- return nil
-}
-
// actualContentLength returns a sanitized version of
// req.ContentLength, where 0 actually means zero (not unknown) and -1
// means unknown.
@@ -1360,25 +1331,7 @@ func (cc *ClientConn) roundTrip(req *http.Request, streamf func(*clientStream))
donec: make(chan struct{}),
}
- // TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere?
- if !cc.t.disableCompression() &&
- req.Header.Get("Accept-Encoding") == "" &&
- req.Header.Get("Range") == "" &&
- !cs.isHead {
- // Request gzip only, not deflate. Deflate is ambiguous and
- // not as universally supported anyway.
- // See: https://zlib.net/zlib_faq.html#faq39
- //
- // Note that we don't request this for HEAD requests,
- // due to a bug in nginx:
- // http://trac.nginx.org/nginx/ticket/358
- // https://golang.org/issue/5522
- //
- // We don't request gzip if the request is for a range, since
- // auto-decoding a portion of a gzipped document will just fail
- // anyway. See https://golang.org/issue/8923
- cs.requestedGzip = true
- }
+ cs.requestedGzip = httpcommon.IsRequestGzip(req.Method, req.Header, cc.t.disableCompression())
go cs.doRequest(req, streamf)
@@ -1492,10 +1445,6 @@ func (cs *clientStream) writeRequest(req *http.Request, streamf func(*clientStre
cc := cs.cc
ctx := cs.ctx
- if err := checkConnHeaders(req); err != nil {
- return err
- }
-
// wait for setting frames to be received, a server can change this value later,
// but we just wait for the first settings frame
var isExtendedConnect bool
@@ -1659,26 +1608,39 @@ func (cs *clientStream) encodeAndWriteHeaders(req *http.Request) error {
// we send: HEADERS{1}, CONTINUATION{0,} + DATA{0,} (DATA is
// sent by writeRequestBody below, along with any Trailers,
// again in form HEADERS{1}, CONTINUATION{0,})
- trailers, err := commaSeparatedTrailers(req)
+ cc.hbuf.Reset()
+ res, err := encodeRequestHeaders(req, cs.requestedGzip, cc.peerMaxHeaderListSize, func(name, value string) {
+ cc.writeHeader(name, value)
+ })
if err != nil {
- return err
- }
- hasTrailers := trailers != ""
- contentLen := actualContentLength(req)
- hasBody := contentLen != 0
- hdrs, err := cc.encodeHeaders(req, cs.requestedGzip, trailers, contentLen)
- if err != nil {
- return err
+ return fmt.Errorf("http2: %w", err)
}
+ hdrs := cc.hbuf.Bytes()
// Write the request.
- endStream := !hasBody && !hasTrailers
+ endStream := !res.HasBody && !res.HasTrailers
cs.sentHeaders = true
err = cc.writeHeaders(cs.ID, endStream, int(cc.maxFrameSize), hdrs)
traceWroteHeaders(cs.trace)
return err
}
+func encodeRequestHeaders(req *http.Request, addGzipHeader bool, peerMaxHeaderListSize uint64, headerf func(name, value string)) (httpcommon.EncodeHeadersResult, error) {
+ return httpcommon.EncodeHeaders(req.Context(), httpcommon.EncodeHeadersParam{
+ Request: httpcommon.Request{
+ Header: req.Header,
+ Trailer: req.Trailer,
+ URL: req.URL,
+ Host: req.Host,
+ Method: req.Method,
+ ActualContentLength: actualContentLength(req),
+ },
+ AddGzipHeader: addGzipHeader,
+ PeerMaxHeaderListSize: peerMaxHeaderListSize,
+ DefaultUserAgent: defaultUserAgent,
+ }, headerf)
+}
+
// cleanupWriteRequest performs post-request tasks.
//
// If err (the result of writeRequest) is non-nil and the stream is not closed,
@@ -2066,218 +2028,6 @@ func (cs *clientStream) awaitFlowControl(maxBytes int) (taken int32, err error)
}
}
-func validateHeaders(hdrs http.Header) string {
- for k, vv := range hdrs {
- if !httpguts.ValidHeaderFieldName(k) && k != ":protocol" {
- return fmt.Sprintf("name %q", k)
- }
- for _, v := range vv {
- if !httpguts.ValidHeaderFieldValue(v) {
- // Don't include the value in the error,
- // because it may be sensitive.
- return fmt.Sprintf("value for header %q", k)
- }
- }
- }
- return ""
-}
-
-var errNilRequestURL = errors.New("http2: Request.URI is nil")
-
-func isNormalConnect(req *http.Request) bool {
- return req.Method == "CONNECT" && req.Header.Get(":protocol") == ""
-}
-
-// requires cc.wmu be held.
-func (cc *ClientConn) encodeHeaders(req *http.Request, addGzipHeader bool, trailers string, contentLength int64) ([]byte, error) {
- cc.hbuf.Reset()
- if req.URL == nil {
- return nil, errNilRequestURL
- }
-
- host := req.Host
- if host == "" {
- host = req.URL.Host
- }
- host, err := httpguts.PunycodeHostPort(host)
- if err != nil {
- return nil, err
- }
- if !httpguts.ValidHostHeader(host) {
- return nil, errors.New("http2: invalid Host header")
- }
-
- var path string
- if !isNormalConnect(req) {
- path = req.URL.RequestURI()
- if !validPseudoPath(path) {
- orig := path
- path = strings.TrimPrefix(path, req.URL.Scheme+"://"+host)
- if !validPseudoPath(path) {
- if req.URL.Opaque != "" {
- return nil, fmt.Errorf("invalid request :path %q from URL.Opaque = %q", orig, req.URL.Opaque)
- } else {
- return nil, fmt.Errorf("invalid request :path %q", orig)
- }
- }
- }
- }
-
- // Check for any invalid headers+trailers and return an error before we
- // potentially pollute our hpack state. (We want to be able to
- // continue to reuse the hpack encoder for future requests)
- if err := validateHeaders(req.Header); err != "" {
- return nil, fmt.Errorf("invalid HTTP header %s", err)
- }
- if err := validateHeaders(req.Trailer); err != "" {
- return nil, fmt.Errorf("invalid HTTP trailer %s", err)
- }
-
- enumerateHeaders := func(f func(name, value string)) {
- // 8.1.2.3 Request Pseudo-Header Fields
- // The :path pseudo-header field includes the path and query parts of the
- // target URI (the path-absolute production and optionally a '?' character
- // followed by the query production, see Sections 3.3 and 3.4 of
- // [RFC3986]).
- f(":authority", host)
- m := req.Method
- if m == "" {
- m = http.MethodGet
- }
- f(":method", m)
- if !isNormalConnect(req) {
- f(":path", path)
- f(":scheme", req.URL.Scheme)
- }
- if trailers != "" {
- f("trailer", trailers)
- }
-
- var didUA bool
- for k, vv := range req.Header {
- if asciiEqualFold(k, "host") || asciiEqualFold(k, "content-length") {
- // Host is :authority, already sent.
- // Content-Length is automatic, set below.
- continue
- } else if asciiEqualFold(k, "connection") ||
- asciiEqualFold(k, "proxy-connection") ||
- asciiEqualFold(k, "transfer-encoding") ||
- asciiEqualFold(k, "upgrade") ||
- asciiEqualFold(k, "keep-alive") {
- // Per 8.1.2.2 Connection-Specific Header
- // Fields, don't send connection-specific
- // fields. We have already checked if any
- // are error-worthy so just ignore the rest.
- continue
- } else if asciiEqualFold(k, "user-agent") {
- // Match Go's http1 behavior: at most one
- // User-Agent. If set to nil or empty string,
- // then omit it. Otherwise if not mentioned,
- // include the default (below).
- didUA = true
- if len(vv) < 1 {
- continue
- }
- vv = vv[:1]
- if vv[0] == "" {
- continue
- }
- } else if asciiEqualFold(k, "cookie") {
- // Per 8.1.2.5 To allow for better compression efficiency, the
- // Cookie header field MAY be split into separate header fields,
- // each with one or more cookie-pairs.
- for _, v := range vv {
- for {
- p := strings.IndexByte(v, ';')
- if p < 0 {
- break
- }
- f("cookie", v[:p])
- p++
- // strip space after semicolon if any.
- for p+1 <= len(v) && v[p] == ' ' {
- p++
- }
- v = v[p:]
- }
- if len(v) > 0 {
- f("cookie", v)
- }
- }
- continue
- }
-
- for _, v := range vv {
- f(k, v)
- }
- }
- if shouldSendReqContentLength(req.Method, contentLength) {
- f("content-length", strconv.FormatInt(contentLength, 10))
- }
- if addGzipHeader {
- f("accept-encoding", "gzip")
- }
- if !didUA {
- f("user-agent", defaultUserAgent)
- }
- }
-
- // Do a first pass over the headers counting bytes to ensure
- // we don't exceed cc.peerMaxHeaderListSize. This is done as a
- // separate pass before encoding the headers to prevent
- // modifying the hpack state.
- hlSize := uint64(0)
- enumerateHeaders(func(name, value string) {
- hf := hpack.HeaderField{Name: name, Value: value}
- hlSize += uint64(hf.Size())
- })
-
- if hlSize > cc.peerMaxHeaderListSize {
- return nil, errRequestHeaderListSize
- }
-
- trace := httptrace.ContextClientTrace(req.Context())
- traceHeaders := traceHasWroteHeaderField(trace)
-
- // Header list size is ok. Write the headers.
- enumerateHeaders(func(name, value string) {
- name, ascii := lowerHeader(name)
- if !ascii {
- // Skip writing invalid headers. Per RFC 7540, Section 8.1.2, header
- // field names have to be ASCII characters (just as in HTTP/1.x).
- return
- }
- cc.writeHeader(name, value)
- if traceHeaders {
- traceWroteHeaderField(trace, name, value)
- }
- })
-
- return cc.hbuf.Bytes(), nil
-}
-
-// shouldSendReqContentLength reports whether the http2.Transport should send
-// a "content-length" request header. This logic is basically a copy of the net/http
-// transferWriter.shouldSendContentLength.
-// The contentLength is the corrected contentLength (so 0 means actually 0, not unknown).
-// -1 means unknown.
-func shouldSendReqContentLength(method string, contentLength int64) bool {
- if contentLength > 0 {
- return true
- }
- if contentLength < 0 {
- return false
- }
- // For zero bodies, whether we send a content-length depends on the method.
- // It also kinda doesn't matter for http2 either way, with END_STREAM.
- switch method {
- case "POST", "PUT", "PATCH":
- return true
- default:
- return false
- }
-}
-
// requires cc.wmu be held.
func (cc *ClientConn) encodeTrailers(trailer http.Header) ([]byte, error) {
cc.hbuf.Reset()
@@ -2294,7 +2044,7 @@ func (cc *ClientConn) encodeTrailers(trailer http.Header) ([]byte, error) {
}
for k, vv := range trailer {
- lowKey, ascii := lowerHeader(k)
+ lowKey, ascii := httpcommon.LowerHeader(k)
if !ascii {
// Skip writing invalid headers. Per RFC 7540, Section 8.1.2, header
// field names have to be ASCII characters (just as in HTTP/1.x).
@@ -2434,9 +2184,12 @@ func (rl *clientConnReadLoop) cleanup() {
// This avoids a situation where new connections are constantly created,
// added to the pool, fail, and are removed from the pool, without any error
// being surfaced to the user.
- const unusedWaitTime = 5 * time.Second
+ unusedWaitTime := 5 * time.Second
+ if cc.idleTimeout > 0 && unusedWaitTime > cc.idleTimeout {
+ unusedWaitTime = cc.idleTimeout
+ }
idleTime := cc.t.now().Sub(cc.lastActive)
- if atomic.LoadUint32(&cc.atomicReused) == 0 && idleTime < unusedWaitTime {
+ if atomic.LoadUint32(&cc.atomicReused) == 0 && idleTime < unusedWaitTime && !cc.closedOnIdle {
cc.idleTimer = cc.t.afterFunc(unusedWaitTime-idleTime, func() {
cc.t.connPool().MarkDead(cc)
})
@@ -2457,6 +2210,13 @@ func (rl *clientConnReadLoop) cleanup() {
}
cc.cond.Broadcast()
cc.mu.Unlock()
+
+ if !cc.seenSettings {
+ // If we have a pending request that wants extended CONNECT,
+ // let it continue and fail with the connection error.
+ cc.extendedConnectAllowed = true
+ close(cc.seenSettingsChan)
+ }
}
// countReadFrameError calls Transport.CountError with a string
@@ -2549,9 +2309,6 @@ func (rl *clientConnReadLoop) run() error {
if VerboseLogs {
cc.vlogf("http2: Transport conn %p received error from processing frame %v: %v", cc, summarizeFrame(f), err)
}
- if !cc.seenSettings {
- close(cc.seenSettingsChan)
- }
return err
}
}
@@ -2646,7 +2403,7 @@ func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFra
Status: status + " " + http.StatusText(statusCode),
}
for _, hf := range regularFields {
- key := canonicalHeader(hf.Name)
+ key := httpcommon.CanonicalHeader(hf.Name)
if key == "Trailer" {
t := res.Trailer
if t == nil {
@@ -2654,7 +2411,7 @@ func (rl *clientConnReadLoop) handleResponse(cs *clientStream, f *MetaHeadersFra
res.Trailer = t
}
foreachHeaderElement(hf.Value, func(v string) {
- t[canonicalHeader(v)] = nil
+ t[httpcommon.CanonicalHeader(v)] = nil
})
} else {
vv := header[key]
@@ -2778,7 +2535,7 @@ func (rl *clientConnReadLoop) processTrailers(cs *clientStream, f *MetaHeadersFr
trailer := make(http.Header)
for _, hf := range f.RegularFields() {
- key := canonicalHeader(hf.Name)
+ key := httpcommon.CanonicalHeader(hf.Name)
trailer[key] = append(trailer[key], hf.Value)
}
cs.trailer = trailer
@@ -3324,7 +3081,7 @@ func (cc *ClientConn) writeStreamReset(streamID uint32, code ErrCode, ping bool,
var (
errResponseHeaderListSize = errors.New("http2: response header list larger than advertised limit")
- errRequestHeaderListSize = errors.New("http2: request header list larger than peer's advertised limit")
+ errRequestHeaderListSize = httpcommon.ErrRequestHeaderListSize
)
func (cc *ClientConn) logf(format string, args ...interface{}) {
@@ -3508,16 +3265,6 @@ func traceFirstResponseByte(trace *httptrace.ClientTrace) {
}
}
-func traceHasWroteHeaderField(trace *httptrace.ClientTrace) bool {
- return trace != nil && trace.WroteHeaderField != nil
-}
-
-func traceWroteHeaderField(trace *httptrace.ClientTrace, k, v string) {
- if trace != nil && trace.WroteHeaderField != nil {
- trace.WroteHeaderField(k, []string{v})
- }
-}
-
func traceGot1xxResponseFunc(trace *httptrace.ClientTrace) func(int, textproto.MIMEHeader) error {
if trace != nil {
return trace.Got1xxResponse
diff --git a/vendor/golang.org/x/net/http2/write.go b/vendor/golang.org/x/net/http2/write.go
index 6ff6bee7..fdb35b94 100644
--- a/vendor/golang.org/x/net/http2/write.go
+++ b/vendor/golang.org/x/net/http2/write.go
@@ -13,6 +13,7 @@ import (
"golang.org/x/net/http/httpguts"
"golang.org/x/net/http2/hpack"
+ "golang.org/x/net/internal/httpcommon"
)
// writeFramer is implemented by any type that is used to write frames.
@@ -351,7 +352,7 @@ func encodeHeaders(enc *hpack.Encoder, h http.Header, keys []string) {
}
for _, k := range keys {
vv := h[k]
- k, ascii := lowerHeader(k)
+ k, ascii := httpcommon.LowerHeader(k)
if !ascii {
// Skip writing invalid headers. Per RFC 7540, Section 8.1.2, header
// field names have to be ASCII characters (just as in HTTP/1.x).
diff --git a/vendor/golang.org/x/net/internal/httpcommon/ascii.go b/vendor/golang.org/x/net/internal/httpcommon/ascii.go
new file mode 100644
index 00000000..ed14da5a
--- /dev/null
+++ b/vendor/golang.org/x/net/internal/httpcommon/ascii.go
@@ -0,0 +1,53 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package httpcommon
+
+import "strings"
+
+// The HTTP protocols are defined in terms of ASCII, not Unicode. This file
+// contains helper functions which may use Unicode-aware functions which would
+// otherwise be unsafe and could introduce vulnerabilities if used improperly.
+
+// asciiEqualFold is strings.EqualFold, ASCII only. It reports whether s and t
+// are equal, ASCII-case-insensitively.
+func asciiEqualFold(s, t string) bool {
+ if len(s) != len(t) {
+ return false
+ }
+ for i := 0; i < len(s); i++ {
+ if lower(s[i]) != lower(t[i]) {
+ return false
+ }
+ }
+ return true
+}
+
+// lower returns the ASCII lowercase version of b.
+func lower(b byte) byte {
+ if 'A' <= b && b <= 'Z' {
+ return b + ('a' - 'A')
+ }
+ return b
+}
+
+// isASCIIPrint returns whether s is ASCII and printable according to
+// https://tools.ietf.org/html/rfc20#section-4.2.
+func isASCIIPrint(s string) bool {
+ for i := 0; i < len(s); i++ {
+ if s[i] < ' ' || s[i] > '~' {
+ return false
+ }
+ }
+ return true
+}
+
+// asciiToLower returns the lowercase version of s if s is ASCII and printable,
+// and whether or not it was.
+func asciiToLower(s string) (lower string, ok bool) {
+ if !isASCIIPrint(s) {
+ return "", false
+ }
+ return strings.ToLower(s), true
+}
diff --git a/vendor/golang.org/x/net/http2/headermap.go b/vendor/golang.org/x/net/internal/httpcommon/headermap.go
similarity index 74%
rename from vendor/golang.org/x/net/http2/headermap.go
rename to vendor/golang.org/x/net/internal/httpcommon/headermap.go
index 149b3dd2..92483d8e 100644
--- a/vendor/golang.org/x/net/http2/headermap.go
+++ b/vendor/golang.org/x/net/internal/httpcommon/headermap.go
@@ -1,11 +1,11 @@
-// Copyright 2014 The Go Authors. All rights reserved.
+// Copyright 2025 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-package http2
+package httpcommon
import (
- "net/http"
+ "net/textproto"
"sync"
)
@@ -82,13 +82,15 @@ func buildCommonHeaderMaps() {
commonLowerHeader = make(map[string]string, len(common))
commonCanonHeader = make(map[string]string, len(common))
for _, v := range common {
- chk := http.CanonicalHeaderKey(v)
+ chk := textproto.CanonicalMIMEHeaderKey(v)
commonLowerHeader[chk] = v
commonCanonHeader[v] = chk
}
}
-func lowerHeader(v string) (lower string, ascii bool) {
+// LowerHeader returns the lowercase form of a header name,
+// used on the wire for HTTP/2 and HTTP/3 requests.
+func LowerHeader(v string) (lower string, ascii bool) {
buildCommonHeaderMapsOnce()
if s, ok := commonLowerHeader[v]; ok {
return s, true
@@ -96,10 +98,18 @@ func lowerHeader(v string) (lower string, ascii bool) {
return asciiToLower(v)
}
-func canonicalHeader(v string) string {
+// CanonicalHeader canonicalizes a header name. (For example, "host" becomes "Host".)
+func CanonicalHeader(v string) string {
buildCommonHeaderMapsOnce()
if s, ok := commonCanonHeader[v]; ok {
return s
}
- return http.CanonicalHeaderKey(v)
+ return textproto.CanonicalMIMEHeaderKey(v)
+}
+
+// CachedCanonicalHeader returns the canonical form of a well-known header name.
+func CachedCanonicalHeader(v string) (string, bool) {
+ buildCommonHeaderMapsOnce()
+ s, ok := commonCanonHeader[v]
+ return s, ok
}
diff --git a/vendor/golang.org/x/net/internal/httpcommon/request.go b/vendor/golang.org/x/net/internal/httpcommon/request.go
new file mode 100644
index 00000000..4b705531
--- /dev/null
+++ b/vendor/golang.org/x/net/internal/httpcommon/request.go
@@ -0,0 +1,467 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package httpcommon
+
+import (
+ "context"
+ "errors"
+ "fmt"
+ "net/http/httptrace"
+ "net/textproto"
+ "net/url"
+ "sort"
+ "strconv"
+ "strings"
+
+ "golang.org/x/net/http/httpguts"
+ "golang.org/x/net/http2/hpack"
+)
+
+var (
+ ErrRequestHeaderListSize = errors.New("request header list larger than peer's advertised limit")
+)
+
+// Request is a subset of http.Request.
+// It'd be simpler to pass an *http.Request, of course, but we can't depend on net/http
+// without creating a dependency cycle.
+type Request struct {
+ URL *url.URL
+ Method string
+ Host string
+ Header map[string][]string
+ Trailer map[string][]string
+ ActualContentLength int64 // 0 means 0, -1 means unknown
+}
+
+// EncodeHeadersParam is parameters to EncodeHeaders.
+type EncodeHeadersParam struct {
+ Request Request
+
+ // AddGzipHeader indicates that an "accept-encoding: gzip" header should be
+ // added to the request.
+ AddGzipHeader bool
+
+ // PeerMaxHeaderListSize, when non-zero, is the peer's MAX_HEADER_LIST_SIZE setting.
+ PeerMaxHeaderListSize uint64
+
+ // DefaultUserAgent is the User-Agent header to send when the request
+ // neither contains a User-Agent nor disables it.
+ DefaultUserAgent string
+}
+
+// EncodeHeadersParam is the result of EncodeHeaders.
+type EncodeHeadersResult struct {
+ HasBody bool
+ HasTrailers bool
+}
+
+// EncodeHeaders constructs request headers common to HTTP/2 and HTTP/3.
+// It validates a request and calls headerf with each pseudo-header and header
+// for the request.
+// The headerf function is called with the validated, canonicalized header name.
+func EncodeHeaders(ctx context.Context, param EncodeHeadersParam, headerf func(name, value string)) (res EncodeHeadersResult, _ error) {
+ req := param.Request
+
+ // Check for invalid connection-level headers.
+ if err := checkConnHeaders(req.Header); err != nil {
+ return res, err
+ }
+
+ if req.URL == nil {
+ return res, errors.New("Request.URL is nil")
+ }
+
+ host := req.Host
+ if host == "" {
+ host = req.URL.Host
+ }
+ host, err := httpguts.PunycodeHostPort(host)
+ if err != nil {
+ return res, err
+ }
+ if !httpguts.ValidHostHeader(host) {
+ return res, errors.New("invalid Host header")
+ }
+
+ // isNormalConnect is true if this is a non-extended CONNECT request.
+ isNormalConnect := false
+ var protocol string
+ if vv := req.Header[":protocol"]; len(vv) > 0 {
+ protocol = vv[0]
+ }
+ if req.Method == "CONNECT" && protocol == "" {
+ isNormalConnect = true
+ } else if protocol != "" && req.Method != "CONNECT" {
+ return res, errors.New("invalid :protocol header in non-CONNECT request")
+ }
+
+ // Validate the path, except for non-extended CONNECT requests which have no path.
+ var path string
+ if !isNormalConnect {
+ path = req.URL.RequestURI()
+ if !validPseudoPath(path) {
+ orig := path
+ path = strings.TrimPrefix(path, req.URL.Scheme+"://"+host)
+ if !validPseudoPath(path) {
+ if req.URL.Opaque != "" {
+ return res, fmt.Errorf("invalid request :path %q from URL.Opaque = %q", orig, req.URL.Opaque)
+ } else {
+ return res, fmt.Errorf("invalid request :path %q", orig)
+ }
+ }
+ }
+ }
+
+ // Check for any invalid headers+trailers and return an error before we
+ // potentially pollute our hpack state. (We want to be able to
+ // continue to reuse the hpack encoder for future requests)
+ if err := validateHeaders(req.Header); err != "" {
+ return res, fmt.Errorf("invalid HTTP header %s", err)
+ }
+ if err := validateHeaders(req.Trailer); err != "" {
+ return res, fmt.Errorf("invalid HTTP trailer %s", err)
+ }
+
+ trailers, err := commaSeparatedTrailers(req.Trailer)
+ if err != nil {
+ return res, err
+ }
+
+ enumerateHeaders := func(f func(name, value string)) {
+ // 8.1.2.3 Request Pseudo-Header Fields
+ // The :path pseudo-header field includes the path and query parts of the
+ // target URI (the path-absolute production and optionally a '?' character
+ // followed by the query production, see Sections 3.3 and 3.4 of
+ // [RFC3986]).
+ f(":authority", host)
+ m := req.Method
+ if m == "" {
+ m = "GET"
+ }
+ f(":method", m)
+ if !isNormalConnect {
+ f(":path", path)
+ f(":scheme", req.URL.Scheme)
+ }
+ if protocol != "" {
+ f(":protocol", protocol)
+ }
+ if trailers != "" {
+ f("trailer", trailers)
+ }
+
+ var didUA bool
+ for k, vv := range req.Header {
+ if asciiEqualFold(k, "host") || asciiEqualFold(k, "content-length") {
+ // Host is :authority, already sent.
+ // Content-Length is automatic, set below.
+ continue
+ } else if asciiEqualFold(k, "connection") ||
+ asciiEqualFold(k, "proxy-connection") ||
+ asciiEqualFold(k, "transfer-encoding") ||
+ asciiEqualFold(k, "upgrade") ||
+ asciiEqualFold(k, "keep-alive") {
+ // Per 8.1.2.2 Connection-Specific Header
+ // Fields, don't send connection-specific
+ // fields. We have already checked if any
+ // are error-worthy so just ignore the rest.
+ continue
+ } else if asciiEqualFold(k, "user-agent") {
+ // Match Go's http1 behavior: at most one
+ // User-Agent. If set to nil or empty string,
+ // then omit it. Otherwise if not mentioned,
+ // include the default (below).
+ didUA = true
+ if len(vv) < 1 {
+ continue
+ }
+ vv = vv[:1]
+ if vv[0] == "" {
+ continue
+ }
+ } else if asciiEqualFold(k, "cookie") {
+ // Per 8.1.2.5 To allow for better compression efficiency, the
+ // Cookie header field MAY be split into separate header fields,
+ // each with one or more cookie-pairs.
+ for _, v := range vv {
+ for {
+ p := strings.IndexByte(v, ';')
+ if p < 0 {
+ break
+ }
+ f("cookie", v[:p])
+ p++
+ // strip space after semicolon if any.
+ for p+1 <= len(v) && v[p] == ' ' {
+ p++
+ }
+ v = v[p:]
+ }
+ if len(v) > 0 {
+ f("cookie", v)
+ }
+ }
+ continue
+ } else if k == ":protocol" {
+ // :protocol pseudo-header was already sent above.
+ continue
+ }
+
+ for _, v := range vv {
+ f(k, v)
+ }
+ }
+ if shouldSendReqContentLength(req.Method, req.ActualContentLength) {
+ f("content-length", strconv.FormatInt(req.ActualContentLength, 10))
+ }
+ if param.AddGzipHeader {
+ f("accept-encoding", "gzip")
+ }
+ if !didUA {
+ f("user-agent", param.DefaultUserAgent)
+ }
+ }
+
+ // Do a first pass over the headers counting bytes to ensure
+ // we don't exceed cc.peerMaxHeaderListSize. This is done as a
+ // separate pass before encoding the headers to prevent
+ // modifying the hpack state.
+ if param.PeerMaxHeaderListSize > 0 {
+ hlSize := uint64(0)
+ enumerateHeaders(func(name, value string) {
+ hf := hpack.HeaderField{Name: name, Value: value}
+ hlSize += uint64(hf.Size())
+ })
+
+ if hlSize > param.PeerMaxHeaderListSize {
+ return res, ErrRequestHeaderListSize
+ }
+ }
+
+ trace := httptrace.ContextClientTrace(ctx)
+
+ // Header list size is ok. Write the headers.
+ enumerateHeaders(func(name, value string) {
+ name, ascii := LowerHeader(name)
+ if !ascii {
+ // Skip writing invalid headers. Per RFC 7540, Section 8.1.2, header
+ // field names have to be ASCII characters (just as in HTTP/1.x).
+ return
+ }
+
+ headerf(name, value)
+
+ if trace != nil && trace.WroteHeaderField != nil {
+ trace.WroteHeaderField(name, []string{value})
+ }
+ })
+
+ res.HasBody = req.ActualContentLength != 0
+ res.HasTrailers = trailers != ""
+ return res, nil
+}
+
+// IsRequestGzip reports whether we should add an Accept-Encoding: gzip header
+// for a request.
+func IsRequestGzip(method string, header map[string][]string, disableCompression bool) bool {
+ // TODO(bradfitz): this is a copy of the logic in net/http. Unify somewhere?
+ if !disableCompression &&
+ len(header["Accept-Encoding"]) == 0 &&
+ len(header["Range"]) == 0 &&
+ method != "HEAD" {
+ // Request gzip only, not deflate. Deflate is ambiguous and
+ // not as universally supported anyway.
+ // See: https://zlib.net/zlib_faq.html#faq39
+ //
+ // Note that we don't request this for HEAD requests,
+ // due to a bug in nginx:
+ // http://trac.nginx.org/nginx/ticket/358
+ // https://golang.org/issue/5522
+ //
+ // We don't request gzip if the request is for a range, since
+ // auto-decoding a portion of a gzipped document will just fail
+ // anyway. See https://golang.org/issue/8923
+ return true
+ }
+ return false
+}
+
+// checkConnHeaders checks whether req has any invalid connection-level headers.
+//
+// https://www.rfc-editor.org/rfc/rfc9114.html#section-4.2-3
+// https://www.rfc-editor.org/rfc/rfc9113.html#section-8.2.2-1
+//
+// Certain headers are special-cased as okay but not transmitted later.
+// For example, we allow "Transfer-Encoding: chunked", but drop the header when encoding.
+func checkConnHeaders(h map[string][]string) error {
+ if vv := h["Upgrade"]; len(vv) > 0 && (vv[0] != "" && vv[0] != "chunked") {
+ return fmt.Errorf("invalid Upgrade request header: %q", vv)
+ }
+ if vv := h["Transfer-Encoding"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && vv[0] != "chunked") {
+ return fmt.Errorf("invalid Transfer-Encoding request header: %q", vv)
+ }
+ if vv := h["Connection"]; len(vv) > 0 && (len(vv) > 1 || vv[0] != "" && !asciiEqualFold(vv[0], "close") && !asciiEqualFold(vv[0], "keep-alive")) {
+ return fmt.Errorf("invalid Connection request header: %q", vv)
+ }
+ return nil
+}
+
+func commaSeparatedTrailers(trailer map[string][]string) (string, error) {
+ keys := make([]string, 0, len(trailer))
+ for k := range trailer {
+ k = CanonicalHeader(k)
+ switch k {
+ case "Transfer-Encoding", "Trailer", "Content-Length":
+ return "", fmt.Errorf("invalid Trailer key %q", k)
+ }
+ keys = append(keys, k)
+ }
+ if len(keys) > 0 {
+ sort.Strings(keys)
+ return strings.Join(keys, ","), nil
+ }
+ return "", nil
+}
+
+// validPseudoPath reports whether v is a valid :path pseudo-header
+// value. It must be either:
+//
+// - a non-empty string starting with '/'
+// - the string '*', for OPTIONS requests.
+//
+// For now this is only used a quick check for deciding when to clean
+// up Opaque URLs before sending requests from the Transport.
+// See golang.org/issue/16847
+//
+// We used to enforce that the path also didn't start with "//", but
+// Google's GFE accepts such paths and Chrome sends them, so ignore
+// that part of the spec. See golang.org/issue/19103.
+func validPseudoPath(v string) bool {
+ return (len(v) > 0 && v[0] == '/') || v == "*"
+}
+
+func validateHeaders(hdrs map[string][]string) string {
+ for k, vv := range hdrs {
+ if !httpguts.ValidHeaderFieldName(k) && k != ":protocol" {
+ return fmt.Sprintf("name %q", k)
+ }
+ for _, v := range vv {
+ if !httpguts.ValidHeaderFieldValue(v) {
+ // Don't include the value in the error,
+ // because it may be sensitive.
+ return fmt.Sprintf("value for header %q", k)
+ }
+ }
+ }
+ return ""
+}
+
+// shouldSendReqContentLength reports whether we should send
+// a "content-length" request header. This logic is basically a copy of the net/http
+// transferWriter.shouldSendContentLength.
+// The contentLength is the corrected contentLength (so 0 means actually 0, not unknown).
+// -1 means unknown.
+func shouldSendReqContentLength(method string, contentLength int64) bool {
+ if contentLength > 0 {
+ return true
+ }
+ if contentLength < 0 {
+ return false
+ }
+ // For zero bodies, whether we send a content-length depends on the method.
+ // It also kinda doesn't matter for http2 either way, with END_STREAM.
+ switch method {
+ case "POST", "PUT", "PATCH":
+ return true
+ default:
+ return false
+ }
+}
+
+// ServerRequestParam is parameters to NewServerRequest.
+type ServerRequestParam struct {
+ Method string
+ Scheme, Authority, Path string
+ Protocol string
+ Header map[string][]string
+}
+
+// ServerRequestResult is the result of NewServerRequest.
+type ServerRequestResult struct {
+ // Various http.Request fields.
+ URL *url.URL
+ RequestURI string
+ Trailer map[string][]string
+
+ NeedsContinue bool // client provided an "Expect: 100-continue" header
+
+ // If the request should be rejected, this is a short string suitable for passing
+ // to the http2 package's CountError function.
+ // It might be a bit odd to return errors this way rather than returing an error,
+ // but this ensures we don't forget to include a CountError reason.
+ InvalidReason string
+}
+
+func NewServerRequest(rp ServerRequestParam) ServerRequestResult {
+ needsContinue := httpguts.HeaderValuesContainsToken(rp.Header["Expect"], "100-continue")
+ if needsContinue {
+ delete(rp.Header, "Expect")
+ }
+ // Merge Cookie headers into one "; "-delimited value.
+ if cookies := rp.Header["Cookie"]; len(cookies) > 1 {
+ rp.Header["Cookie"] = []string{strings.Join(cookies, "; ")}
+ }
+
+ // Setup Trailers
+ var trailer map[string][]string
+ for _, v := range rp.Header["Trailer"] {
+ for _, key := range strings.Split(v, ",") {
+ key = textproto.CanonicalMIMEHeaderKey(textproto.TrimString(key))
+ switch key {
+ case "Transfer-Encoding", "Trailer", "Content-Length":
+ // Bogus. (copy of http1 rules)
+ // Ignore.
+ default:
+ if trailer == nil {
+ trailer = make(map[string][]string)
+ }
+ trailer[key] = nil
+ }
+ }
+ }
+ delete(rp.Header, "Trailer")
+
+ // "':authority' MUST NOT include the deprecated userinfo subcomponent
+ // for "http" or "https" schemed URIs."
+ // https://www.rfc-editor.org/rfc/rfc9113.html#section-8.3.1-2.3.8
+ if strings.IndexByte(rp.Authority, '@') != -1 && (rp.Scheme == "http" || rp.Scheme == "https") {
+ return ServerRequestResult{
+ InvalidReason: "userinfo_in_authority",
+ }
+ }
+
+ var url_ *url.URL
+ var requestURI string
+ if rp.Method == "CONNECT" && rp.Protocol == "" {
+ url_ = &url.URL{Host: rp.Authority}
+ requestURI = rp.Authority // mimic HTTP/1 server behavior
+ } else {
+ var err error
+ url_, err = url.ParseRequestURI(rp.Path)
+ if err != nil {
+ return ServerRequestResult{
+ InvalidReason: "bad_path",
+ }
+ }
+ requestURI = rp.Path
+ }
+
+ return ServerRequestResult{
+ URL: url_,
+ NeedsContinue: needsContinue,
+ RequestURI: requestURI,
+ Trailer: trailer,
+ }
+}
diff --git a/vendor/golang.org/x/net/proxy/per_host.go b/vendor/golang.org/x/net/proxy/per_host.go
index d7d4b8b6..32bdf435 100644
--- a/vendor/golang.org/x/net/proxy/per_host.go
+++ b/vendor/golang.org/x/net/proxy/per_host.go
@@ -7,6 +7,7 @@ package proxy
import (
"context"
"net"
+ "net/netip"
"strings"
)
@@ -57,7 +58,8 @@ func (p *PerHost) DialContext(ctx context.Context, network, addr string) (c net.
}
func (p *PerHost) dialerForRequest(host string) Dialer {
- if ip := net.ParseIP(host); ip != nil {
+ if nip, err := netip.ParseAddr(host); err == nil {
+ ip := net.IP(nip.AsSlice())
for _, net := range p.bypassNetworks {
if net.Contains(ip) {
return p.bypass
@@ -108,8 +110,8 @@ func (p *PerHost) AddFromString(s string) {
}
continue
}
- if ip := net.ParseIP(host); ip != nil {
- p.AddIP(ip)
+ if nip, err := netip.ParseAddr(host); err == nil {
+ p.AddIP(net.IP(nip.AsSlice()))
continue
}
if strings.HasPrefix(host, "*.") {
diff --git a/vendor/golang.org/x/net/websocket/websocket.go b/vendor/golang.org/x/net/websocket/websocket.go
index ac76165c..3448d203 100644
--- a/vendor/golang.org/x/net/websocket/websocket.go
+++ b/vendor/golang.org/x/net/websocket/websocket.go
@@ -6,9 +6,10 @@
// as specified in RFC 6455.
//
// This package currently lacks some features found in an alternative
-// and more actively maintained WebSocket package:
+// and more actively maintained WebSocket packages:
//
-// https://pkg.go.dev/github.com/coder/websocket
+// - [github.com/gorilla/websocket]
+// - [github.com/coder/websocket]
package websocket // import "golang.org/x/net/websocket"
import (
diff --git a/vendor/golang.org/x/sync/errgroup/errgroup.go b/vendor/golang.org/x/sync/errgroup/errgroup.go
index 948a3ee6..f8c3c092 100644
--- a/vendor/golang.org/x/sync/errgroup/errgroup.go
+++ b/vendor/golang.org/x/sync/errgroup/errgroup.go
@@ -18,7 +18,7 @@ import (
type token struct{}
// A Group is a collection of goroutines working on subtasks that are part of
-// the same overall task.
+// the same overall task. A Group should not be reused for different tasks.
//
// A zero Group is valid, has no limit on the number of active goroutines,
// and does not cancel on error.
@@ -46,7 +46,7 @@ func (g *Group) done() {
// returns a non-nil error or the first time Wait returns, whichever occurs
// first.
func WithContext(ctx context.Context) (*Group, context.Context) {
- ctx, cancel := withCancelCause(ctx)
+ ctx, cancel := context.WithCancelCause(ctx)
return &Group{cancel: cancel}, ctx
}
@@ -61,6 +61,7 @@ func (g *Group) Wait() error {
}
// Go calls the given function in a new goroutine.
+// The first call to Go must happen before a Wait.
// It blocks until the new goroutine can be added without the number of
// active goroutines in the group exceeding the configured limit.
//
@@ -118,6 +119,7 @@ func (g *Group) TryGo(f func() error) bool {
// SetLimit limits the number of active goroutines in this group to at most n.
// A negative value indicates no limit.
+// A limit of zero will prevent any new goroutines from being added.
//
// Any subsequent call to the Go method will block until it can add an active
// goroutine without exceeding the configured limit.
diff --git a/vendor/golang.org/x/sync/errgroup/go120.go b/vendor/golang.org/x/sync/errgroup/go120.go
deleted file mode 100644
index f93c740b..00000000
--- a/vendor/golang.org/x/sync/errgroup/go120.go
+++ /dev/null
@@ -1,13 +0,0 @@
-// Copyright 2023 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build go1.20
-
-package errgroup
-
-import "context"
-
-func withCancelCause(parent context.Context) (context.Context, func(error)) {
- return context.WithCancelCause(parent)
-}
diff --git a/vendor/golang.org/x/sync/errgroup/pre_go120.go b/vendor/golang.org/x/sync/errgroup/pre_go120.go
deleted file mode 100644
index 88ce3343..00000000
--- a/vendor/golang.org/x/sync/errgroup/pre_go120.go
+++ /dev/null
@@ -1,14 +0,0 @@
-// Copyright 2023 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build !go1.20
-
-package errgroup
-
-import "context"
-
-func withCancelCause(parent context.Context) (context.Context, func(error)) {
- ctx, cancel := context.WithCancel(parent)
- return ctx, func(error) { cancel() }
-}
diff --git a/vendor/golang.org/x/sys/cpu/cpu.go b/vendor/golang.org/x/sys/cpu/cpu.go
index 02609d5b..2e73ee19 100644
--- a/vendor/golang.org/x/sys/cpu/cpu.go
+++ b/vendor/golang.org/x/sys/cpu/cpu.go
@@ -72,6 +72,9 @@ var X86 struct {
HasSSSE3 bool // Supplemental streaming SIMD extension 3
HasSSE41 bool // Streaming SIMD extension 4 and 4.1
HasSSE42 bool // Streaming SIMD extension 4 and 4.2
+ HasAVXIFMA bool // Advanced vector extension Integer Fused Multiply Add
+ HasAVXVNNI bool // Advanced vector extension Vector Neural Network Instructions
+ HasAVXVNNIInt8 bool // Advanced vector extension Vector Neural Network Int8 instructions
_ CacheLinePad
}
@@ -146,6 +149,18 @@ var ARM struct {
_ CacheLinePad
}
+// The booleans in Loong64 contain the correspondingly named cpu feature bit.
+// The struct is padded to avoid false sharing.
+var Loong64 struct {
+ _ CacheLinePad
+ HasLSX bool // support 128-bit vector extension
+ HasLASX bool // support 256-bit vector extension
+ HasCRC32 bool // support CRC instruction
+ HasLAM_BH bool // support AM{SWAP/ADD}[_DB].{B/H} instruction
+ HasLAMCAS bool // support AMCAS[_DB].{B/H/W/D} instruction
+ _ CacheLinePad
+}
+
// MIPS64X contains the supported CPU features of the current mips64/mips64le
// platforms. If the current platform is not mips64/mips64le or the current
// operating system is not Linux then all feature flags are false.
diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_loong64.go b/vendor/golang.org/x/sys/cpu/cpu_linux_loong64.go
new file mode 100644
index 00000000..4f341143
--- /dev/null
+++ b/vendor/golang.org/x/sys/cpu/cpu_linux_loong64.go
@@ -0,0 +1,22 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cpu
+
+// HWCAP bits. These are exposed by the Linux kernel.
+const (
+ hwcap_LOONGARCH_LSX = 1 << 4
+ hwcap_LOONGARCH_LASX = 1 << 5
+)
+
+func doinit() {
+ // TODO: Features that require kernel support like LSX and LASX can
+ // be detected here once needed in std library or by the compiler.
+ Loong64.HasLSX = hwcIsSet(hwCap, hwcap_LOONGARCH_LSX)
+ Loong64.HasLASX = hwcIsSet(hwCap, hwcap_LOONGARCH_LASX)
+}
+
+func hwcIsSet(hwc uint, val uint) bool {
+ return hwc&val != 0
+}
diff --git a/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go b/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go
index 7d902b68..a428dec9 100644
--- a/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go
+++ b/vendor/golang.org/x/sys/cpu/cpu_linux_noinit.go
@@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-//go:build linux && !arm && !arm64 && !mips64 && !mips64le && !ppc64 && !ppc64le && !s390x && !riscv64
+//go:build linux && !arm && !arm64 && !loong64 && !mips64 && !mips64le && !ppc64 && !ppc64le && !s390x && !riscv64
package cpu
diff --git a/vendor/golang.org/x/sys/cpu/cpu_loong64.go b/vendor/golang.org/x/sys/cpu/cpu_loong64.go
index 55863585..45ecb29a 100644
--- a/vendor/golang.org/x/sys/cpu/cpu_loong64.go
+++ b/vendor/golang.org/x/sys/cpu/cpu_loong64.go
@@ -8,5 +8,43 @@ package cpu
const cacheLineSize = 64
+// Bit fields for CPUCFG registers, Related reference documents:
+// https://loongson.github.io/LoongArch-Documentation/LoongArch-Vol1-EN.html#_cpucfg
+const (
+ // CPUCFG1 bits
+ cpucfg1_CRC32 = 1 << 25
+
+ // CPUCFG2 bits
+ cpucfg2_LAM_BH = 1 << 27
+ cpucfg2_LAMCAS = 1 << 28
+)
+
func initOptions() {
+ options = []option{
+ {Name: "lsx", Feature: &Loong64.HasLSX},
+ {Name: "lasx", Feature: &Loong64.HasLASX},
+ {Name: "crc32", Feature: &Loong64.HasCRC32},
+ {Name: "lam_bh", Feature: &Loong64.HasLAM_BH},
+ {Name: "lamcas", Feature: &Loong64.HasLAMCAS},
+ }
+
+ // The CPUCFG data on Loong64 only reflects the hardware capabilities,
+ // not the kernel support status, so features such as LSX and LASX that
+ // require kernel support cannot be obtained from the CPUCFG data.
+ //
+ // These features only require hardware capability support and do not
+ // require kernel specific support, so they can be obtained directly
+ // through CPUCFG
+ cfg1 := get_cpucfg(1)
+ cfg2 := get_cpucfg(2)
+
+ Loong64.HasCRC32 = cfgIsSet(cfg1, cpucfg1_CRC32)
+ Loong64.HasLAMCAS = cfgIsSet(cfg2, cpucfg2_LAMCAS)
+ Loong64.HasLAM_BH = cfgIsSet(cfg2, cpucfg2_LAM_BH)
+}
+
+func get_cpucfg(reg uint32) uint32
+
+func cfgIsSet(cfg uint32, val uint32) bool {
+ return cfg&val != 0
}
diff --git a/vendor/golang.org/x/sys/cpu/cpu_loong64.s b/vendor/golang.org/x/sys/cpu/cpu_loong64.s
new file mode 100644
index 00000000..71cbaf1c
--- /dev/null
+++ b/vendor/golang.org/x/sys/cpu/cpu_loong64.s
@@ -0,0 +1,13 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "textflag.h"
+
+// func get_cpucfg(reg uint32) uint32
+TEXT ·get_cpucfg(SB), NOSPLIT|NOFRAME, $0
+ MOVW reg+0(FP), R5
+ // CPUCFG R5, R4 = 0x00006ca4
+ WORD $0x00006ca4
+ MOVW R4, ret+8(FP)
+ RET
diff --git a/vendor/golang.org/x/sys/cpu/cpu_x86.go b/vendor/golang.org/x/sys/cpu/cpu_x86.go
index 600a6807..1e642f33 100644
--- a/vendor/golang.org/x/sys/cpu/cpu_x86.go
+++ b/vendor/golang.org/x/sys/cpu/cpu_x86.go
@@ -53,6 +53,9 @@ func initOptions() {
{Name: "sse41", Feature: &X86.HasSSE41},
{Name: "sse42", Feature: &X86.HasSSE42},
{Name: "ssse3", Feature: &X86.HasSSSE3},
+ {Name: "avxifma", Feature: &X86.HasAVXIFMA},
+ {Name: "avxvnni", Feature: &X86.HasAVXVNNI},
+ {Name: "avxvnniint8", Feature: &X86.HasAVXVNNIInt8},
// These capabilities should always be enabled on amd64:
{Name: "sse2", Feature: &X86.HasSSE2, Required: runtime.GOARCH == "amd64"},
@@ -106,7 +109,7 @@ func archInit() {
return
}
- _, ebx7, ecx7, edx7 := cpuid(7, 0)
+ eax7, ebx7, ecx7, edx7 := cpuid(7, 0)
X86.HasBMI1 = isSet(3, ebx7)
X86.HasAVX2 = isSet(5, ebx7) && osSupportsAVX
X86.HasBMI2 = isSet(8, ebx7)
@@ -134,14 +137,24 @@ func archInit() {
X86.HasAVX512VAES = isSet(9, ecx7)
X86.HasAVX512VBMI2 = isSet(6, ecx7)
X86.HasAVX512BITALG = isSet(12, ecx7)
-
- eax71, _, _, _ := cpuid(7, 1)
- X86.HasAVX512BF16 = isSet(5, eax71)
}
X86.HasAMXTile = isSet(24, edx7)
X86.HasAMXInt8 = isSet(25, edx7)
X86.HasAMXBF16 = isSet(22, edx7)
+
+ // These features depend on the second level of extended features.
+ if eax7 >= 1 {
+ eax71, _, _, edx71 := cpuid(7, 1)
+ if X86.HasAVX512 {
+ X86.HasAVX512BF16 = isSet(5, eax71)
+ }
+ if X86.HasAVX {
+ X86.HasAVXIFMA = isSet(23, eax71)
+ X86.HasAVXVNNI = isSet(4, eax71)
+ X86.HasAVXVNNIInt8 = isSet(4, edx71)
+ }
+ }
}
func isSet(bitpos uint, value uint32) bool {
diff --git a/vendor/golang.org/x/sys/cpu/parse.go b/vendor/golang.org/x/sys/cpu/parse.go
index 762b63d6..56a7e1a1 100644
--- a/vendor/golang.org/x/sys/cpu/parse.go
+++ b/vendor/golang.org/x/sys/cpu/parse.go
@@ -13,7 +13,7 @@ import "strconv"
// https://golang.org/cl/209597.
func parseRelease(rel string) (major, minor, patch int, ok bool) {
// Strip anything after a dash or plus.
- for i := 0; i < len(rel); i++ {
+ for i := range len(rel) {
if rel[i] == '-' || rel[i] == '+' {
rel = rel[:i]
break
@@ -21,7 +21,7 @@ func parseRelease(rel string) (major, minor, patch int, ok bool) {
}
next := func() (int, bool) {
- for i := 0; i < len(rel); i++ {
+ for i := range len(rel) {
if rel[i] == '.' {
ver, err := strconv.Atoi(rel[:i])
rel = rel[i+1:]
diff --git a/vendor/golang.org/x/sys/unix/auxv.go b/vendor/golang.org/x/sys/unix/auxv.go
new file mode 100644
index 00000000..37a82528
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/auxv.go
@@ -0,0 +1,36 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.21 && (aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos)
+
+package unix
+
+import (
+ "syscall"
+ "unsafe"
+)
+
+//go:linkname runtime_getAuxv runtime.getAuxv
+func runtime_getAuxv() []uintptr
+
+// Auxv returns the ELF auxiliary vector as a sequence of key/value pairs.
+// The returned slice is always a fresh copy, owned by the caller.
+// It returns an error on non-ELF platforms, or if the auxiliary vector cannot be accessed,
+// which happens in some locked-down environments and build modes.
+func Auxv() ([][2]uintptr, error) {
+ vec := runtime_getAuxv()
+ vecLen := len(vec)
+
+ if vecLen == 0 {
+ return nil, syscall.ENOENT
+ }
+
+ if vecLen%2 != 0 {
+ return nil, syscall.EINVAL
+ }
+
+ result := make([]uintptr, vecLen)
+ copy(result, vec)
+ return unsafe.Slice((*[2]uintptr)(unsafe.Pointer(&result[0])), vecLen/2), nil
+}
diff --git a/vendor/golang.org/x/sys/unix/auxv_unsupported.go b/vendor/golang.org/x/sys/unix/auxv_unsupported.go
new file mode 100644
index 00000000..1200487f
--- /dev/null
+++ b/vendor/golang.org/x/sys/unix/auxv_unsupported.go
@@ -0,0 +1,13 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !go1.21 && (aix || darwin || dragonfly || freebsd || linux || netbsd || openbsd || solaris || zos)
+
+package unix
+
+import "syscall"
+
+func Auxv() ([][2]uintptr, error) {
+ return nil, syscall.ENOTSUP
+}
diff --git a/vendor/golang.org/x/sys/unix/syscall_darwin.go b/vendor/golang.org/x/sys/unix/syscall_darwin.go
index 099867de..798f61ad 100644
--- a/vendor/golang.org/x/sys/unix/syscall_darwin.go
+++ b/vendor/golang.org/x/sys/unix/syscall_darwin.go
@@ -602,7 +602,150 @@ func Connectx(fd int, srcIf uint32, srcAddr, dstAddr Sockaddr, associd SaeAssocI
return
}
-//sys connectx(fd int, endpoints *SaEndpoints, associd SaeAssocID, flags uint32, iov []Iovec, n *uintptr, connid *SaeConnID) (err error)
+// sys connectx(fd int, endpoints *SaEndpoints, associd SaeAssocID, flags uint32, iov []Iovec, n *uintptr, connid *SaeConnID) (err error)
+const minIovec = 8
+
+func Readv(fd int, iovs [][]byte) (n int, err error) {
+ if !darwinKernelVersionMin(11, 0, 0) {
+ return 0, ENOSYS
+ }
+
+ iovecs := make([]Iovec, 0, minIovec)
+ iovecs = appendBytes(iovecs, iovs)
+ n, err = readv(fd, iovecs)
+ readvRacedetect(iovecs, n, err)
+ return n, err
+}
+
+func Preadv(fd int, iovs [][]byte, offset int64) (n int, err error) {
+ if !darwinKernelVersionMin(11, 0, 0) {
+ return 0, ENOSYS
+ }
+ iovecs := make([]Iovec, 0, minIovec)
+ iovecs = appendBytes(iovecs, iovs)
+ n, err = preadv(fd, iovecs, offset)
+ readvRacedetect(iovecs, n, err)
+ return n, err
+}
+
+func Writev(fd int, iovs [][]byte) (n int, err error) {
+ if !darwinKernelVersionMin(11, 0, 0) {
+ return 0, ENOSYS
+ }
+
+ iovecs := make([]Iovec, 0, minIovec)
+ iovecs = appendBytes(iovecs, iovs)
+ if raceenabled {
+ raceReleaseMerge(unsafe.Pointer(&ioSync))
+ }
+ n, err = writev(fd, iovecs)
+ writevRacedetect(iovecs, n)
+ return n, err
+}
+
+func Pwritev(fd int, iovs [][]byte, offset int64) (n int, err error) {
+ if !darwinKernelVersionMin(11, 0, 0) {
+ return 0, ENOSYS
+ }
+
+ iovecs := make([]Iovec, 0, minIovec)
+ iovecs = appendBytes(iovecs, iovs)
+ if raceenabled {
+ raceReleaseMerge(unsafe.Pointer(&ioSync))
+ }
+ n, err = pwritev(fd, iovecs, offset)
+ writevRacedetect(iovecs, n)
+ return n, err
+}
+
+func appendBytes(vecs []Iovec, bs [][]byte) []Iovec {
+ for _, b := range bs {
+ var v Iovec
+ v.SetLen(len(b))
+ if len(b) > 0 {
+ v.Base = &b[0]
+ } else {
+ v.Base = (*byte)(unsafe.Pointer(&_zero))
+ }
+ vecs = append(vecs, v)
+ }
+ return vecs
+}
+
+func writevRacedetect(iovecs []Iovec, n int) {
+ if !raceenabled {
+ return
+ }
+ for i := 0; n > 0 && i < len(iovecs); i++ {
+ m := int(iovecs[i].Len)
+ if m > n {
+ m = n
+ }
+ n -= m
+ if m > 0 {
+ raceReadRange(unsafe.Pointer(iovecs[i].Base), m)
+ }
+ }
+}
+
+func readvRacedetect(iovecs []Iovec, n int, err error) {
+ if !raceenabled {
+ return
+ }
+ for i := 0; n > 0 && i < len(iovecs); i++ {
+ m := int(iovecs[i].Len)
+ if m > n {
+ m = n
+ }
+ n -= m
+ if m > 0 {
+ raceWriteRange(unsafe.Pointer(iovecs[i].Base), m)
+ }
+ }
+ if err == nil {
+ raceAcquire(unsafe.Pointer(&ioSync))
+ }
+}
+
+func darwinMajorMinPatch() (maj, min, patch int, err error) {
+ var un Utsname
+ err = Uname(&un)
+ if err != nil {
+ return
+ }
+
+ var mmp [3]int
+ c := 0
+Loop:
+ for _, b := range un.Release[:] {
+ switch {
+ case b >= '0' && b <= '9':
+ mmp[c] = 10*mmp[c] + int(b-'0')
+ case b == '.':
+ c++
+ if c > 2 {
+ return 0, 0, 0, ENOTSUP
+ }
+ case b == 0:
+ break Loop
+ default:
+ return 0, 0, 0, ENOTSUP
+ }
+ }
+ if c != 2 {
+ return 0, 0, 0, ENOTSUP
+ }
+ return mmp[0], mmp[1], mmp[2], nil
+}
+
+func darwinKernelVersionMin(maj, min, patch int) bool {
+ actualMaj, actualMin, actualPatch, err := darwinMajorMinPatch()
+ if err != nil {
+ return false
+ }
+ return actualMaj > maj || actualMaj == maj && (actualMin > min || actualMin == min && actualPatch >= patch)
+}
+
//sys sendfile(infd int, outfd int, offset int64, len *int64, hdtr unsafe.Pointer, flags int) (err error)
//sys shmat(id int, addr uintptr, flag int) (ret uintptr, err error)
@@ -705,3 +848,7 @@ func Connectx(fd int, srcIf uint32, srcAddr, dstAddr Sockaddr, associd SaeAssocI
//sys write(fd int, p []byte) (n int, err error)
//sys mmap(addr uintptr, length uintptr, prot int, flag int, fd int, pos int64) (ret uintptr, err error)
//sys munmap(addr uintptr, length uintptr) (err error)
+//sys readv(fd int, iovecs []Iovec) (n int, err error)
+//sys preadv(fd int, iovecs []Iovec, offset int64) (n int, err error)
+//sys writev(fd int, iovecs []Iovec) (n int, err error)
+//sys pwritev(fd int, iovecs []Iovec, offset int64) (n int, err error)
diff --git a/vendor/golang.org/x/sys/unix/syscall_linux.go b/vendor/golang.org/x/sys/unix/syscall_linux.go
index 230a9454..4958a657 100644
--- a/vendor/golang.org/x/sys/unix/syscall_linux.go
+++ b/vendor/golang.org/x/sys/unix/syscall_linux.go
@@ -13,6 +13,7 @@ package unix
import (
"encoding/binary"
+ "slices"
"strconv"
"syscall"
"time"
@@ -417,7 +418,7 @@ func (sa *SockaddrUnix) sockaddr() (unsafe.Pointer, _Socklen, error) {
return nil, 0, EINVAL
}
sa.raw.Family = AF_UNIX
- for i := 0; i < n; i++ {
+ for i := range n {
sa.raw.Path[i] = int8(name[i])
}
// length is family (uint16), name, NUL.
@@ -507,7 +508,7 @@ func (sa *SockaddrL2) sockaddr() (unsafe.Pointer, _Socklen, error) {
psm := (*[2]byte)(unsafe.Pointer(&sa.raw.Psm))
psm[0] = byte(sa.PSM)
psm[1] = byte(sa.PSM >> 8)
- for i := 0; i < len(sa.Addr); i++ {
+ for i := range len(sa.Addr) {
sa.raw.Bdaddr[i] = sa.Addr[len(sa.Addr)-1-i]
}
cid := (*[2]byte)(unsafe.Pointer(&sa.raw.Cid))
@@ -589,11 +590,11 @@ func (sa *SockaddrCAN) sockaddr() (unsafe.Pointer, _Socklen, error) {
sa.raw.Family = AF_CAN
sa.raw.Ifindex = int32(sa.Ifindex)
rx := (*[4]byte)(unsafe.Pointer(&sa.RxID))
- for i := 0; i < 4; i++ {
+ for i := range 4 {
sa.raw.Addr[i] = rx[i]
}
tx := (*[4]byte)(unsafe.Pointer(&sa.TxID))
- for i := 0; i < 4; i++ {
+ for i := range 4 {
sa.raw.Addr[i+4] = tx[i]
}
return unsafe.Pointer(&sa.raw), SizeofSockaddrCAN, nil
@@ -618,11 +619,11 @@ func (sa *SockaddrCANJ1939) sockaddr() (unsafe.Pointer, _Socklen, error) {
sa.raw.Family = AF_CAN
sa.raw.Ifindex = int32(sa.Ifindex)
n := (*[8]byte)(unsafe.Pointer(&sa.Name))
- for i := 0; i < 8; i++ {
+ for i := range 8 {
sa.raw.Addr[i] = n[i]
}
p := (*[4]byte)(unsafe.Pointer(&sa.PGN))
- for i := 0; i < 4; i++ {
+ for i := range 4 {
sa.raw.Addr[i+8] = p[i]
}
sa.raw.Addr[12] = sa.Addr
@@ -911,7 +912,7 @@ func (sa *SockaddrIUCV) sockaddr() (unsafe.Pointer, _Socklen, error) {
// These are EBCDIC encoded by the kernel, but we still need to pad them
// with blanks. Initializing with blanks allows the caller to feed in either
// a padded or an unpadded string.
- for i := 0; i < 8; i++ {
+ for i := range 8 {
sa.raw.Nodeid[i] = ' '
sa.raw.User_id[i] = ' '
sa.raw.Name[i] = ' '
@@ -1148,7 +1149,7 @@ func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) {
var user [8]byte
var name [8]byte
- for i := 0; i < 8; i++ {
+ for i := range 8 {
user[i] = byte(pp.User_id[i])
name[i] = byte(pp.Name[i])
}
@@ -1173,11 +1174,11 @@ func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) {
Ifindex: int(pp.Ifindex),
}
name := (*[8]byte)(unsafe.Pointer(&sa.Name))
- for i := 0; i < 8; i++ {
+ for i := range 8 {
name[i] = pp.Addr[i]
}
pgn := (*[4]byte)(unsafe.Pointer(&sa.PGN))
- for i := 0; i < 4; i++ {
+ for i := range 4 {
pgn[i] = pp.Addr[i+8]
}
addr := (*[1]byte)(unsafe.Pointer(&sa.Addr))
@@ -1188,11 +1189,11 @@ func anyToSockaddr(fd int, rsa *RawSockaddrAny) (Sockaddr, error) {
Ifindex: int(pp.Ifindex),
}
rx := (*[4]byte)(unsafe.Pointer(&sa.RxID))
- for i := 0; i < 4; i++ {
+ for i := range 4 {
rx[i] = pp.Addr[i]
}
tx := (*[4]byte)(unsafe.Pointer(&sa.TxID))
- for i := 0; i < 4; i++ {
+ for i := range 4 {
tx[i] = pp.Addr[i+4]
}
return sa, nil
@@ -2216,10 +2217,7 @@ func readvRacedetect(iovecs []Iovec, n int, err error) {
return
}
for i := 0; n > 0 && i < len(iovecs); i++ {
- m := int(iovecs[i].Len)
- if m > n {
- m = n
- }
+ m := min(int(iovecs[i].Len), n)
n -= m
if m > 0 {
raceWriteRange(unsafe.Pointer(iovecs[i].Base), m)
@@ -2270,10 +2268,7 @@ func writevRacedetect(iovecs []Iovec, n int) {
return
}
for i := 0; n > 0 && i < len(iovecs); i++ {
- m := int(iovecs[i].Len)
- if m > n {
- m = n
- }
+ m := min(int(iovecs[i].Len), n)
n -= m
if m > 0 {
raceReadRange(unsafe.Pointer(iovecs[i].Base), m)
@@ -2320,12 +2315,7 @@ func isGroupMember(gid int) bool {
return false
}
- for _, g := range groups {
- if g == gid {
- return true
- }
- }
- return false
+ return slices.Contains(groups, gid)
}
func isCapDacOverrideSet() bool {
diff --git a/vendor/golang.org/x/sys/unix/syscall_solaris.go b/vendor/golang.org/x/sys/unix/syscall_solaris.go
index 21974af0..abc39554 100644
--- a/vendor/golang.org/x/sys/unix/syscall_solaris.go
+++ b/vendor/golang.org/x/sys/unix/syscall_solaris.go
@@ -1102,3 +1102,90 @@ func (s *Strioctl) SetInt(i int) {
func IoctlSetStrioctlRetInt(fd int, req int, s *Strioctl) (int, error) {
return ioctlPtrRet(fd, req, unsafe.Pointer(s))
}
+
+// Ucred Helpers
+// See ucred(3c) and getpeerucred(3c)
+
+//sys getpeerucred(fd uintptr, ucred *uintptr) (err error)
+//sys ucredFree(ucred uintptr) = ucred_free
+//sys ucredGet(pid int) (ucred uintptr, err error) = ucred_get
+//sys ucredGeteuid(ucred uintptr) (uid int) = ucred_geteuid
+//sys ucredGetegid(ucred uintptr) (gid int) = ucred_getegid
+//sys ucredGetruid(ucred uintptr) (uid int) = ucred_getruid
+//sys ucredGetrgid(ucred uintptr) (gid int) = ucred_getrgid
+//sys ucredGetsuid(ucred uintptr) (uid int) = ucred_getsuid
+//sys ucredGetsgid(ucred uintptr) (gid int) = ucred_getsgid
+//sys ucredGetpid(ucred uintptr) (pid int) = ucred_getpid
+
+// Ucred is an opaque struct that holds user credentials.
+type Ucred struct {
+ ucred uintptr
+}
+
+// We need to ensure that ucredFree is called on the underlying ucred
+// when the Ucred is garbage collected.
+func ucredFinalizer(u *Ucred) {
+ ucredFree(u.ucred)
+}
+
+func GetPeerUcred(fd uintptr) (*Ucred, error) {
+ var ucred uintptr
+ err := getpeerucred(fd, &ucred)
+ if err != nil {
+ return nil, err
+ }
+ result := &Ucred{
+ ucred: ucred,
+ }
+ // set the finalizer on the result so that the ucred will be freed
+ runtime.SetFinalizer(result, ucredFinalizer)
+ return result, nil
+}
+
+func UcredGet(pid int) (*Ucred, error) {
+ ucred, err := ucredGet(pid)
+ if err != nil {
+ return nil, err
+ }
+ result := &Ucred{
+ ucred: ucred,
+ }
+ // set the finalizer on the result so that the ucred will be freed
+ runtime.SetFinalizer(result, ucredFinalizer)
+ return result, nil
+}
+
+func (u *Ucred) Geteuid() int {
+ defer runtime.KeepAlive(u)
+ return ucredGeteuid(u.ucred)
+}
+
+func (u *Ucred) Getruid() int {
+ defer runtime.KeepAlive(u)
+ return ucredGetruid(u.ucred)
+}
+
+func (u *Ucred) Getsuid() int {
+ defer runtime.KeepAlive(u)
+ return ucredGetsuid(u.ucred)
+}
+
+func (u *Ucred) Getegid() int {
+ defer runtime.KeepAlive(u)
+ return ucredGetegid(u.ucred)
+}
+
+func (u *Ucred) Getrgid() int {
+ defer runtime.KeepAlive(u)
+ return ucredGetrgid(u.ucred)
+}
+
+func (u *Ucred) Getsgid() int {
+ defer runtime.KeepAlive(u)
+ return ucredGetsgid(u.ucred)
+}
+
+func (u *Ucred) Getpid() int {
+ defer runtime.KeepAlive(u)
+ return ucredGetpid(u.ucred)
+}
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux.go b/vendor/golang.org/x/sys/unix/zerrors_linux.go
index 6ebc48b3..4f432bfe 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux.go
@@ -1245,6 +1245,7 @@ const (
FAN_REPORT_DFID_NAME = 0xc00
FAN_REPORT_DFID_NAME_TARGET = 0x1e00
FAN_REPORT_DIR_FID = 0x400
+ FAN_REPORT_FD_ERROR = 0x2000
FAN_REPORT_FID = 0x200
FAN_REPORT_NAME = 0x800
FAN_REPORT_PIDFD = 0x80
@@ -1330,8 +1331,10 @@ const (
FUSE_SUPER_MAGIC = 0x65735546
FUTEXFS_SUPER_MAGIC = 0xbad1dea
F_ADD_SEALS = 0x409
+ F_CREATED_QUERY = 0x404
F_DUPFD = 0x0
F_DUPFD_CLOEXEC = 0x406
+ F_DUPFD_QUERY = 0x403
F_EXLCK = 0x4
F_GETFD = 0x1
F_GETFL = 0x3
@@ -1551,6 +1554,7 @@ const (
IPPROTO_ROUTING = 0x2b
IPPROTO_RSVP = 0x2e
IPPROTO_SCTP = 0x84
+ IPPROTO_SMC = 0x100
IPPROTO_TCP = 0x6
IPPROTO_TP = 0x1d
IPPROTO_UDP = 0x11
@@ -1623,6 +1627,8 @@ const (
IPV6_UNICAST_IF = 0x4c
IPV6_USER_FLOW = 0xe
IPV6_V6ONLY = 0x1a
+ IPV6_VERSION = 0x60
+ IPV6_VERSION_MASK = 0xf0
IPV6_XFRM_POLICY = 0x23
IP_ADD_MEMBERSHIP = 0x23
IP_ADD_SOURCE_MEMBERSHIP = 0x27
@@ -1867,6 +1873,7 @@ const (
MADV_UNMERGEABLE = 0xd
MADV_WILLNEED = 0x3
MADV_WIPEONFORK = 0x12
+ MAP_DROPPABLE = 0x8
MAP_FILE = 0x0
MAP_FIXED = 0x10
MAP_FIXED_NOREPLACE = 0x100000
@@ -1967,6 +1974,7 @@ const (
MSG_PEEK = 0x2
MSG_PROXY = 0x10
MSG_RST = 0x1000
+ MSG_SOCK_DEVMEM = 0x2000000
MSG_SYN = 0x400
MSG_TRUNC = 0x20
MSG_TRYHARD = 0x4
@@ -2083,6 +2091,7 @@ const (
NFC_ATR_REQ_MAXSIZE = 0x40
NFC_ATR_RES_GB_MAXSIZE = 0x2f
NFC_ATR_RES_MAXSIZE = 0x40
+ NFC_ATS_MAXSIZE = 0x14
NFC_COMM_ACTIVE = 0x0
NFC_COMM_PASSIVE = 0x1
NFC_DEVICE_NAME_MAXSIZE = 0x8
@@ -2163,6 +2172,7 @@ const (
NFNL_SUBSYS_QUEUE = 0x3
NFNL_SUBSYS_ULOG = 0x4
NFS_SUPER_MAGIC = 0x6969
+ NFT_BITWISE_BOOL = 0x0
NFT_CHAIN_FLAGS = 0x7
NFT_CHAIN_MAXNAMELEN = 0x100
NFT_CT_MAX = 0x17
@@ -2491,6 +2501,7 @@ const (
PR_GET_PDEATHSIG = 0x2
PR_GET_SECCOMP = 0x15
PR_GET_SECUREBITS = 0x1b
+ PR_GET_SHADOW_STACK_STATUS = 0x4a
PR_GET_SPECULATION_CTRL = 0x34
PR_GET_TAGGED_ADDR_CTRL = 0x38
PR_GET_THP_DISABLE = 0x2a
@@ -2499,6 +2510,7 @@ const (
PR_GET_TIMING = 0xd
PR_GET_TSC = 0x19
PR_GET_UNALIGN = 0x5
+ PR_LOCK_SHADOW_STACK_STATUS = 0x4c
PR_MCE_KILL = 0x21
PR_MCE_KILL_CLEAR = 0x0
PR_MCE_KILL_DEFAULT = 0x2
@@ -2525,6 +2537,8 @@ const (
PR_PAC_GET_ENABLED_KEYS = 0x3d
PR_PAC_RESET_KEYS = 0x36
PR_PAC_SET_ENABLED_KEYS = 0x3c
+ PR_PMLEN_MASK = 0x7f000000
+ PR_PMLEN_SHIFT = 0x18
PR_PPC_DEXCR_CTRL_CLEAR = 0x4
PR_PPC_DEXCR_CTRL_CLEAR_ONEXEC = 0x10
PR_PPC_DEXCR_CTRL_EDITABLE = 0x1
@@ -2592,6 +2606,7 @@ const (
PR_SET_PTRACER = 0x59616d61
PR_SET_SECCOMP = 0x16
PR_SET_SECUREBITS = 0x1c
+ PR_SET_SHADOW_STACK_STATUS = 0x4b
PR_SET_SPECULATION_CTRL = 0x35
PR_SET_SYSCALL_USER_DISPATCH = 0x3b
PR_SET_TAGGED_ADDR_CTRL = 0x37
@@ -2602,6 +2617,9 @@ const (
PR_SET_UNALIGN = 0x6
PR_SET_VMA = 0x53564d41
PR_SET_VMA_ANON_NAME = 0x0
+ PR_SHADOW_STACK_ENABLE = 0x1
+ PR_SHADOW_STACK_PUSH = 0x4
+ PR_SHADOW_STACK_WRITE = 0x2
PR_SME_GET_VL = 0x40
PR_SME_SET_VL = 0x3f
PR_SME_SET_VL_ONEXEC = 0x40000
@@ -2911,7 +2929,6 @@ const (
RTM_NEWNEXTHOP = 0x68
RTM_NEWNEXTHOPBUCKET = 0x74
RTM_NEWNSID = 0x58
- RTM_NEWNVLAN = 0x70
RTM_NEWPREFIX = 0x34
RTM_NEWQDISC = 0x24
RTM_NEWROUTE = 0x18
@@ -2920,6 +2937,7 @@ const (
RTM_NEWTCLASS = 0x28
RTM_NEWTFILTER = 0x2c
RTM_NEWTUNNEL = 0x78
+ RTM_NEWVLAN = 0x70
RTM_NR_FAMILIES = 0x1b
RTM_NR_MSGTYPES = 0x6c
RTM_SETDCB = 0x4f
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go
index c0d45e32..75207613 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_386.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_386.go
@@ -116,6 +116,8 @@ const (
IN_CLOEXEC = 0x80000
IN_NONBLOCK = 0x800
IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9
+ IPV6_FLOWINFO_MASK = 0xffffff0f
+ IPV6_FLOWLABEL_MASK = 0xffff0f00
ISIG = 0x1
IUCLC = 0x200
IXOFF = 0x1000
@@ -304,6 +306,7 @@ const (
SCM_TIMESTAMPING_OPT_STATS = 0x36
SCM_TIMESTAMPING_PKTINFO = 0x3a
SCM_TIMESTAMPNS = 0x23
+ SCM_TS_OPT_ID = 0x51
SCM_TXTIME = 0x3d
SCM_WIFI_STATUS = 0x29
SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go
index c731d24f..c68acda5 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_amd64.go
@@ -116,6 +116,8 @@ const (
IN_CLOEXEC = 0x80000
IN_NONBLOCK = 0x800
IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9
+ IPV6_FLOWINFO_MASK = 0xffffff0f
+ IPV6_FLOWLABEL_MASK = 0xffff0f00
ISIG = 0x1
IUCLC = 0x200
IXOFF = 0x1000
@@ -305,6 +307,7 @@ const (
SCM_TIMESTAMPING_OPT_STATS = 0x36
SCM_TIMESTAMPING_PKTINFO = 0x3a
SCM_TIMESTAMPNS = 0x23
+ SCM_TS_OPT_ID = 0x51
SCM_TXTIME = 0x3d
SCM_WIFI_STATUS = 0x29
SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go
index 680018a4..a8c607ab 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm.go
@@ -115,6 +115,8 @@ const (
IN_CLOEXEC = 0x80000
IN_NONBLOCK = 0x800
IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9
+ IPV6_FLOWINFO_MASK = 0xffffff0f
+ IPV6_FLOWLABEL_MASK = 0xffff0f00
ISIG = 0x1
IUCLC = 0x200
IXOFF = 0x1000
@@ -310,6 +312,7 @@ const (
SCM_TIMESTAMPING_OPT_STATS = 0x36
SCM_TIMESTAMPING_PKTINFO = 0x3a
SCM_TIMESTAMPNS = 0x23
+ SCM_TS_OPT_ID = 0x51
SCM_TXTIME = 0x3d
SCM_WIFI_STATUS = 0x29
SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go
index a63909f3..18563dd8 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_arm64.go
@@ -109,6 +109,7 @@ const (
F_SETOWN = 0x8
F_UNLCK = 0x2
F_WRLCK = 0x1
+ GCS_MAGIC = 0x47435300
HIDIOCGRAWINFO = 0x80084803
HIDIOCGRDESC = 0x90044802
HIDIOCGRDESCSIZE = 0x80044801
@@ -119,6 +120,8 @@ const (
IN_CLOEXEC = 0x80000
IN_NONBLOCK = 0x800
IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9
+ IPV6_FLOWINFO_MASK = 0xffffff0f
+ IPV6_FLOWLABEL_MASK = 0xffff0f00
ISIG = 0x1
IUCLC = 0x200
IXOFF = 0x1000
@@ -302,6 +305,7 @@ const (
SCM_TIMESTAMPING_OPT_STATS = 0x36
SCM_TIMESTAMPING_PKTINFO = 0x3a
SCM_TIMESTAMPNS = 0x23
+ SCM_TS_OPT_ID = 0x51
SCM_TXTIME = 0x3d
SCM_WIFI_STATUS = 0x29
SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go
index 9b0a2573..22912cda 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_loong64.go
@@ -116,6 +116,8 @@ const (
IN_CLOEXEC = 0x80000
IN_NONBLOCK = 0x800
IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9
+ IPV6_FLOWINFO_MASK = 0xffffff0f
+ IPV6_FLOWLABEL_MASK = 0xffff0f00
ISIG = 0x1
IUCLC = 0x200
IXOFF = 0x1000
@@ -297,6 +299,7 @@ const (
SCM_TIMESTAMPING_OPT_STATS = 0x36
SCM_TIMESTAMPING_PKTINFO = 0x3a
SCM_TIMESTAMPNS = 0x23
+ SCM_TS_OPT_ID = 0x51
SCM_TXTIME = 0x3d
SCM_WIFI_STATUS = 0x29
SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go
index 958e6e06..29344eb3 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips.go
@@ -115,6 +115,8 @@ const (
IN_CLOEXEC = 0x80000
IN_NONBLOCK = 0x80
IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9
+ IPV6_FLOWINFO_MASK = 0xfffffff
+ IPV6_FLOWLABEL_MASK = 0xfffff
ISIG = 0x1
IUCLC = 0x200
IXOFF = 0x1000
@@ -303,6 +305,7 @@ const (
SCM_TIMESTAMPING_OPT_STATS = 0x36
SCM_TIMESTAMPING_PKTINFO = 0x3a
SCM_TIMESTAMPNS = 0x23
+ SCM_TS_OPT_ID = 0x51
SCM_TXTIME = 0x3d
SCM_WIFI_STATUS = 0x29
SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go
index 50c7f25b..20d51fb9 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64.go
@@ -115,6 +115,8 @@ const (
IN_CLOEXEC = 0x80000
IN_NONBLOCK = 0x80
IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9
+ IPV6_FLOWINFO_MASK = 0xfffffff
+ IPV6_FLOWLABEL_MASK = 0xfffff
ISIG = 0x1
IUCLC = 0x200
IXOFF = 0x1000
@@ -303,6 +305,7 @@ const (
SCM_TIMESTAMPING_OPT_STATS = 0x36
SCM_TIMESTAMPING_PKTINFO = 0x3a
SCM_TIMESTAMPNS = 0x23
+ SCM_TS_OPT_ID = 0x51
SCM_TXTIME = 0x3d
SCM_WIFI_STATUS = 0x29
SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go
index ced21d66..321b6090 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mips64le.go
@@ -115,6 +115,8 @@ const (
IN_CLOEXEC = 0x80000
IN_NONBLOCK = 0x80
IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9
+ IPV6_FLOWINFO_MASK = 0xffffff0f
+ IPV6_FLOWLABEL_MASK = 0xffff0f00
ISIG = 0x1
IUCLC = 0x200
IXOFF = 0x1000
@@ -303,6 +305,7 @@ const (
SCM_TIMESTAMPING_OPT_STATS = 0x36
SCM_TIMESTAMPING_PKTINFO = 0x3a
SCM_TIMESTAMPNS = 0x23
+ SCM_TS_OPT_ID = 0x51
SCM_TXTIME = 0x3d
SCM_WIFI_STATUS = 0x29
SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go
index 226c0441..9bacdf1e 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_mipsle.go
@@ -115,6 +115,8 @@ const (
IN_CLOEXEC = 0x80000
IN_NONBLOCK = 0x80
IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9
+ IPV6_FLOWINFO_MASK = 0xffffff0f
+ IPV6_FLOWLABEL_MASK = 0xffff0f00
ISIG = 0x1
IUCLC = 0x200
IXOFF = 0x1000
@@ -303,6 +305,7 @@ const (
SCM_TIMESTAMPING_OPT_STATS = 0x36
SCM_TIMESTAMPING_PKTINFO = 0x3a
SCM_TIMESTAMPNS = 0x23
+ SCM_TS_OPT_ID = 0x51
SCM_TXTIME = 0x3d
SCM_WIFI_STATUS = 0x29
SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go
index 3122737c..c2242726 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc.go
@@ -115,6 +115,8 @@ const (
IN_CLOEXEC = 0x80000
IN_NONBLOCK = 0x800
IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9
+ IPV6_FLOWINFO_MASK = 0xfffffff
+ IPV6_FLOWLABEL_MASK = 0xfffff
ISIG = 0x80
IUCLC = 0x1000
IXOFF = 0x400
@@ -358,6 +360,7 @@ const (
SCM_TIMESTAMPING_OPT_STATS = 0x36
SCM_TIMESTAMPING_PKTINFO = 0x3a
SCM_TIMESTAMPNS = 0x23
+ SCM_TS_OPT_ID = 0x51
SCM_TXTIME = 0x3d
SCM_WIFI_STATUS = 0x29
SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go
index eb5d3467..6270c8ee 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64.go
@@ -115,6 +115,8 @@ const (
IN_CLOEXEC = 0x80000
IN_NONBLOCK = 0x800
IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9
+ IPV6_FLOWINFO_MASK = 0xfffffff
+ IPV6_FLOWLABEL_MASK = 0xfffff
ISIG = 0x80
IUCLC = 0x1000
IXOFF = 0x400
@@ -362,6 +364,7 @@ const (
SCM_TIMESTAMPING_OPT_STATS = 0x36
SCM_TIMESTAMPING_PKTINFO = 0x3a
SCM_TIMESTAMPNS = 0x23
+ SCM_TS_OPT_ID = 0x51
SCM_TXTIME = 0x3d
SCM_WIFI_STATUS = 0x29
SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go
index e921ebc6..9966c194 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_ppc64le.go
@@ -115,6 +115,8 @@ const (
IN_CLOEXEC = 0x80000
IN_NONBLOCK = 0x800
IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9
+ IPV6_FLOWINFO_MASK = 0xffffff0f
+ IPV6_FLOWLABEL_MASK = 0xffff0f00
ISIG = 0x80
IUCLC = 0x1000
IXOFF = 0x400
@@ -362,6 +364,7 @@ const (
SCM_TIMESTAMPING_OPT_STATS = 0x36
SCM_TIMESTAMPING_PKTINFO = 0x3a
SCM_TIMESTAMPNS = 0x23
+ SCM_TS_OPT_ID = 0x51
SCM_TXTIME = 0x3d
SCM_WIFI_STATUS = 0x29
SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go
index 38ba81c5..848e5fcc 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_riscv64.go
@@ -115,6 +115,8 @@ const (
IN_CLOEXEC = 0x80000
IN_NONBLOCK = 0x800
IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9
+ IPV6_FLOWINFO_MASK = 0xffffff0f
+ IPV6_FLOWLABEL_MASK = 0xffff0f00
ISIG = 0x1
IUCLC = 0x200
IXOFF = 0x1000
@@ -294,6 +296,7 @@ const (
SCM_TIMESTAMPING_OPT_STATS = 0x36
SCM_TIMESTAMPING_PKTINFO = 0x3a
SCM_TIMESTAMPNS = 0x23
+ SCM_TS_OPT_ID = 0x51
SCM_TXTIME = 0x3d
SCM_WIFI_STATUS = 0x29
SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go
index 71f04009..669b2adb 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_s390x.go
@@ -115,6 +115,8 @@ const (
IN_CLOEXEC = 0x80000
IN_NONBLOCK = 0x800
IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x7b9
+ IPV6_FLOWINFO_MASK = 0xfffffff
+ IPV6_FLOWLABEL_MASK = 0xfffff
ISIG = 0x1
IUCLC = 0x200
IXOFF = 0x1000
@@ -366,6 +368,7 @@ const (
SCM_TIMESTAMPING_OPT_STATS = 0x36
SCM_TIMESTAMPING_PKTINFO = 0x3a
SCM_TIMESTAMPNS = 0x23
+ SCM_TS_OPT_ID = 0x51
SCM_TXTIME = 0x3d
SCM_WIFI_STATUS = 0x29
SECCOMP_IOCTL_NOTIF_ADDFD = 0x40182103
diff --git a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go
index c44a3133..4834e575 100644
--- a/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go
+++ b/vendor/golang.org/x/sys/unix/zerrors_linux_sparc64.go
@@ -119,6 +119,8 @@ const (
IN_CLOEXEC = 0x400000
IN_NONBLOCK = 0x4000
IOCTL_VM_SOCKETS_GET_LOCAL_CID = 0x200007b9
+ IPV6_FLOWINFO_MASK = 0xfffffff
+ IPV6_FLOWLABEL_MASK = 0xfffff
ISIG = 0x1
IUCLC = 0x200
IXOFF = 0x1000
@@ -357,6 +359,7 @@ const (
SCM_TIMESTAMPING_OPT_STATS = 0x38
SCM_TIMESTAMPING_PKTINFO = 0x3c
SCM_TIMESTAMPNS = 0x21
+ SCM_TS_OPT_ID = 0x5a
SCM_TXTIME = 0x3f
SCM_WIFI_STATUS = 0x25
SECCOMP_IOCTL_NOTIF_ADDFD = 0x80182103
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go
index 24b346e1..813c05b6 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.go
@@ -2512,6 +2512,90 @@ var libc_munmap_trampoline_addr uintptr
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func readv(fd int, iovecs []Iovec) (n int, err error) {
+ var _p0 unsafe.Pointer
+ if len(iovecs) > 0 {
+ _p0 = unsafe.Pointer(&iovecs[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := syscall_syscall(libc_readv_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(iovecs)))
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+var libc_readv_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_readv readv "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func preadv(fd int, iovecs []Iovec, offset int64) (n int, err error) {
+ var _p0 unsafe.Pointer
+ if len(iovecs) > 0 {
+ _p0 = unsafe.Pointer(&iovecs[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := syscall_syscall6(libc_preadv_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(iovecs)), uintptr(offset), 0, 0)
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+var libc_preadv_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_preadv preadv "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func writev(fd int, iovecs []Iovec) (n int, err error) {
+ var _p0 unsafe.Pointer
+ if len(iovecs) > 0 {
+ _p0 = unsafe.Pointer(&iovecs[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := syscall_syscall(libc_writev_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(iovecs)))
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+var libc_writev_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_writev writev "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func pwritev(fd int, iovecs []Iovec, offset int64) (n int, err error) {
+ var _p0 unsafe.Pointer
+ if len(iovecs) > 0 {
+ _p0 = unsafe.Pointer(&iovecs[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := syscall_syscall6(libc_pwritev_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(iovecs)), uintptr(offset), 0, 0)
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+var libc_pwritev_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_pwritev pwritev "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Fstat(fd int, stat *Stat_t) (err error) {
_, _, e1 := syscall_syscall(libc_fstat64_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0)
if e1 != 0 {
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s
index ebd21310..fda32858 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s
+++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_amd64.s
@@ -738,6 +738,26 @@ TEXT libc_munmap_trampoline<>(SB),NOSPLIT,$0-0
GLOBL ·libc_munmap_trampoline_addr(SB), RODATA, $8
DATA ·libc_munmap_trampoline_addr(SB)/8, $libc_munmap_trampoline<>(SB)
+TEXT libc_readv_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_readv(SB)
+GLOBL ·libc_readv_trampoline_addr(SB), RODATA, $8
+DATA ·libc_readv_trampoline_addr(SB)/8, $libc_readv_trampoline<>(SB)
+
+TEXT libc_preadv_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_preadv(SB)
+GLOBL ·libc_preadv_trampoline_addr(SB), RODATA, $8
+DATA ·libc_preadv_trampoline_addr(SB)/8, $libc_preadv_trampoline<>(SB)
+
+TEXT libc_writev_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_writev(SB)
+GLOBL ·libc_writev_trampoline_addr(SB), RODATA, $8
+DATA ·libc_writev_trampoline_addr(SB)/8, $libc_writev_trampoline<>(SB)
+
+TEXT libc_pwritev_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_pwritev(SB)
+GLOBL ·libc_pwritev_trampoline_addr(SB), RODATA, $8
+DATA ·libc_pwritev_trampoline_addr(SB)/8, $libc_pwritev_trampoline<>(SB)
+
TEXT libc_fstat64_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_fstat64(SB)
GLOBL ·libc_fstat64_trampoline_addr(SB), RODATA, $8
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go
index 824b9c2d..e6f58f3c 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.go
@@ -2512,6 +2512,90 @@ var libc_munmap_trampoline_addr uintptr
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func readv(fd int, iovecs []Iovec) (n int, err error) {
+ var _p0 unsafe.Pointer
+ if len(iovecs) > 0 {
+ _p0 = unsafe.Pointer(&iovecs[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := syscall_syscall(libc_readv_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(iovecs)))
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+var libc_readv_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_readv readv "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func preadv(fd int, iovecs []Iovec, offset int64) (n int, err error) {
+ var _p0 unsafe.Pointer
+ if len(iovecs) > 0 {
+ _p0 = unsafe.Pointer(&iovecs[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := syscall_syscall6(libc_preadv_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(iovecs)), uintptr(offset), 0, 0)
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+var libc_preadv_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_preadv preadv "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func writev(fd int, iovecs []Iovec) (n int, err error) {
+ var _p0 unsafe.Pointer
+ if len(iovecs) > 0 {
+ _p0 = unsafe.Pointer(&iovecs[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := syscall_syscall(libc_writev_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(iovecs)))
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+var libc_writev_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_writev writev "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func pwritev(fd int, iovecs []Iovec, offset int64) (n int, err error) {
+ var _p0 unsafe.Pointer
+ if len(iovecs) > 0 {
+ _p0 = unsafe.Pointer(&iovecs[0])
+ } else {
+ _p0 = unsafe.Pointer(&_zero)
+ }
+ r0, _, e1 := syscall_syscall6(libc_pwritev_trampoline_addr, uintptr(fd), uintptr(_p0), uintptr(len(iovecs)), uintptr(offset), 0, 0)
+ n = int(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+var libc_pwritev_trampoline_addr uintptr
+
+//go:cgo_import_dynamic libc_pwritev pwritev "/usr/lib/libSystem.B.dylib"
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func Fstat(fd int, stat *Stat_t) (err error) {
_, _, e1 := syscall_syscall(libc_fstat_trampoline_addr, uintptr(fd), uintptr(unsafe.Pointer(stat)), 0)
if e1 != 0 {
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s
index 4f178a22..7f8998b9 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s
+++ b/vendor/golang.org/x/sys/unix/zsyscall_darwin_arm64.s
@@ -738,6 +738,26 @@ TEXT libc_munmap_trampoline<>(SB),NOSPLIT,$0-0
GLOBL ·libc_munmap_trampoline_addr(SB), RODATA, $8
DATA ·libc_munmap_trampoline_addr(SB)/8, $libc_munmap_trampoline<>(SB)
+TEXT libc_readv_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_readv(SB)
+GLOBL ·libc_readv_trampoline_addr(SB), RODATA, $8
+DATA ·libc_readv_trampoline_addr(SB)/8, $libc_readv_trampoline<>(SB)
+
+TEXT libc_preadv_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_preadv(SB)
+GLOBL ·libc_preadv_trampoline_addr(SB), RODATA, $8
+DATA ·libc_preadv_trampoline_addr(SB)/8, $libc_preadv_trampoline<>(SB)
+
+TEXT libc_writev_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_writev(SB)
+GLOBL ·libc_writev_trampoline_addr(SB), RODATA, $8
+DATA ·libc_writev_trampoline_addr(SB)/8, $libc_writev_trampoline<>(SB)
+
+TEXT libc_pwritev_trampoline<>(SB),NOSPLIT,$0-0
+ JMP libc_pwritev(SB)
+GLOBL ·libc_pwritev_trampoline_addr(SB), RODATA, $8
+DATA ·libc_pwritev_trampoline_addr(SB)/8, $libc_pwritev_trampoline<>(SB)
+
TEXT libc_fstat_trampoline<>(SB),NOSPLIT,$0-0
JMP libc_fstat(SB)
GLOBL ·libc_fstat_trampoline_addr(SB), RODATA, $8
diff --git a/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go b/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go
index 829b87fe..c6545413 100644
--- a/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go
+++ b/vendor/golang.org/x/sys/unix/zsyscall_solaris_amd64.go
@@ -141,6 +141,16 @@ import (
//go:cgo_import_dynamic libc_getpeername getpeername "libsocket.so"
//go:cgo_import_dynamic libc_setsockopt setsockopt "libsocket.so"
//go:cgo_import_dynamic libc_recvfrom recvfrom "libsocket.so"
+//go:cgo_import_dynamic libc_getpeerucred getpeerucred "libc.so"
+//go:cgo_import_dynamic libc_ucred_get ucred_get "libc.so"
+//go:cgo_import_dynamic libc_ucred_geteuid ucred_geteuid "libc.so"
+//go:cgo_import_dynamic libc_ucred_getegid ucred_getegid "libc.so"
+//go:cgo_import_dynamic libc_ucred_getruid ucred_getruid "libc.so"
+//go:cgo_import_dynamic libc_ucred_getrgid ucred_getrgid "libc.so"
+//go:cgo_import_dynamic libc_ucred_getsuid ucred_getsuid "libc.so"
+//go:cgo_import_dynamic libc_ucred_getsgid ucred_getsgid "libc.so"
+//go:cgo_import_dynamic libc_ucred_getpid ucred_getpid "libc.so"
+//go:cgo_import_dynamic libc_ucred_free ucred_free "libc.so"
//go:cgo_import_dynamic libc_port_create port_create "libc.so"
//go:cgo_import_dynamic libc_port_associate port_associate "libc.so"
//go:cgo_import_dynamic libc_port_dissociate port_dissociate "libc.so"
@@ -280,6 +290,16 @@ import (
//go:linkname procgetpeername libc_getpeername
//go:linkname procsetsockopt libc_setsockopt
//go:linkname procrecvfrom libc_recvfrom
+//go:linkname procgetpeerucred libc_getpeerucred
+//go:linkname procucred_get libc_ucred_get
+//go:linkname procucred_geteuid libc_ucred_geteuid
+//go:linkname procucred_getegid libc_ucred_getegid
+//go:linkname procucred_getruid libc_ucred_getruid
+//go:linkname procucred_getrgid libc_ucred_getrgid
+//go:linkname procucred_getsuid libc_ucred_getsuid
+//go:linkname procucred_getsgid libc_ucred_getsgid
+//go:linkname procucred_getpid libc_ucred_getpid
+//go:linkname procucred_free libc_ucred_free
//go:linkname procport_create libc_port_create
//go:linkname procport_associate libc_port_associate
//go:linkname procport_dissociate libc_port_dissociate
@@ -420,6 +440,16 @@ var (
procgetpeername,
procsetsockopt,
procrecvfrom,
+ procgetpeerucred,
+ procucred_get,
+ procucred_geteuid,
+ procucred_getegid,
+ procucred_getruid,
+ procucred_getrgid,
+ procucred_getsuid,
+ procucred_getsgid,
+ procucred_getpid,
+ procucred_free,
procport_create,
procport_associate,
procport_dissociate,
@@ -2029,6 +2059,90 @@ func recvfrom(fd int, p []byte, flags int, from *RawSockaddrAny, fromlen *_Sockl
// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+func getpeerucred(fd uintptr, ucred *uintptr) (err error) {
+ _, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procgetpeerucred)), 2, uintptr(fd), uintptr(unsafe.Pointer(ucred)), 0, 0, 0, 0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func ucredGet(pid int) (ucred uintptr, err error) {
+ r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procucred_get)), 1, uintptr(pid), 0, 0, 0, 0, 0)
+ ucred = uintptr(r0)
+ if e1 != 0 {
+ err = errnoErr(e1)
+ }
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func ucredGeteuid(ucred uintptr) (uid int) {
+ r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procucred_geteuid)), 1, uintptr(ucred), 0, 0, 0, 0, 0)
+ uid = int(r0)
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func ucredGetegid(ucred uintptr) (gid int) {
+ r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procucred_getegid)), 1, uintptr(ucred), 0, 0, 0, 0, 0)
+ gid = int(r0)
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func ucredGetruid(ucred uintptr) (uid int) {
+ r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procucred_getruid)), 1, uintptr(ucred), 0, 0, 0, 0, 0)
+ uid = int(r0)
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func ucredGetrgid(ucred uintptr) (gid int) {
+ r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procucred_getrgid)), 1, uintptr(ucred), 0, 0, 0, 0, 0)
+ gid = int(r0)
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func ucredGetsuid(ucred uintptr) (uid int) {
+ r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procucred_getsuid)), 1, uintptr(ucred), 0, 0, 0, 0, 0)
+ uid = int(r0)
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func ucredGetsgid(ucred uintptr) (gid int) {
+ r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procucred_getsgid)), 1, uintptr(ucred), 0, 0, 0, 0, 0)
+ gid = int(r0)
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func ucredGetpid(ucred uintptr) (pid int) {
+ r0, _, _ := sysvicall6(uintptr(unsafe.Pointer(&procucred_getpid)), 1, uintptr(ucred), 0, 0, 0, 0, 0)
+ pid = int(r0)
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
+func ucredFree(ucred uintptr) {
+ sysvicall6(uintptr(unsafe.Pointer(&procucred_free)), 1, uintptr(ucred), 0, 0, 0, 0, 0)
+ return
+}
+
+// THIS FILE IS GENERATED BY THE COMMAND AT THE TOP; DO NOT EDIT
+
func port_create() (n int, err error) {
r0, _, e1 := sysvicall6(uintptr(unsafe.Pointer(&procport_create)), 0, 0, 0, 0, 0, 0, 0)
n = int(r0)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go
index 524b0820..c79aaff3 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_386.go
@@ -458,4 +458,8 @@ const (
SYS_LSM_SET_SELF_ATTR = 460
SYS_LSM_LIST_MODULES = 461
SYS_MSEAL = 462
+ SYS_SETXATTRAT = 463
+ SYS_GETXATTRAT = 464
+ SYS_LISTXATTRAT = 465
+ SYS_REMOVEXATTRAT = 466
)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go
index f485dbf4..5eb45069 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_amd64.go
@@ -381,4 +381,8 @@ const (
SYS_LSM_SET_SELF_ATTR = 460
SYS_LSM_LIST_MODULES = 461
SYS_MSEAL = 462
+ SYS_SETXATTRAT = 463
+ SYS_GETXATTRAT = 464
+ SYS_LISTXATTRAT = 465
+ SYS_REMOVEXATTRAT = 466
)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go
index 70b35bf3..05e50297 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm.go
@@ -422,4 +422,8 @@ const (
SYS_LSM_SET_SELF_ATTR = 460
SYS_LSM_LIST_MODULES = 461
SYS_MSEAL = 462
+ SYS_SETXATTRAT = 463
+ SYS_GETXATTRAT = 464
+ SYS_LISTXATTRAT = 465
+ SYS_REMOVEXATTRAT = 466
)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go
index 1893e2fe..38c53ec5 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_arm64.go
@@ -325,4 +325,8 @@ const (
SYS_LSM_SET_SELF_ATTR = 460
SYS_LSM_LIST_MODULES = 461
SYS_MSEAL = 462
+ SYS_SETXATTRAT = 463
+ SYS_GETXATTRAT = 464
+ SYS_LISTXATTRAT = 465
+ SYS_REMOVEXATTRAT = 466
)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go
index 16a4017d..31d2e71a 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_loong64.go
@@ -321,4 +321,8 @@ const (
SYS_LSM_SET_SELF_ATTR = 460
SYS_LSM_LIST_MODULES = 461
SYS_MSEAL = 462
+ SYS_SETXATTRAT = 463
+ SYS_GETXATTRAT = 464
+ SYS_LISTXATTRAT = 465
+ SYS_REMOVEXATTRAT = 466
)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go
index 7e567f1e..f4184a33 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips.go
@@ -442,4 +442,8 @@ const (
SYS_LSM_SET_SELF_ATTR = 4460
SYS_LSM_LIST_MODULES = 4461
SYS_MSEAL = 4462
+ SYS_SETXATTRAT = 4463
+ SYS_GETXATTRAT = 4464
+ SYS_LISTXATTRAT = 4465
+ SYS_REMOVEXATTRAT = 4466
)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go
index 38ae55e5..05b99622 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64.go
@@ -372,4 +372,8 @@ const (
SYS_LSM_SET_SELF_ATTR = 5460
SYS_LSM_LIST_MODULES = 5461
SYS_MSEAL = 5462
+ SYS_SETXATTRAT = 5463
+ SYS_GETXATTRAT = 5464
+ SYS_LISTXATTRAT = 5465
+ SYS_REMOVEXATTRAT = 5466
)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go
index 55e92e60..43a256e9 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mips64le.go
@@ -372,4 +372,8 @@ const (
SYS_LSM_SET_SELF_ATTR = 5460
SYS_LSM_LIST_MODULES = 5461
SYS_MSEAL = 5462
+ SYS_SETXATTRAT = 5463
+ SYS_GETXATTRAT = 5464
+ SYS_LISTXATTRAT = 5465
+ SYS_REMOVEXATTRAT = 5466
)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go
index 60658d6a..eea5ddfc 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_mipsle.go
@@ -442,4 +442,8 @@ const (
SYS_LSM_SET_SELF_ATTR = 4460
SYS_LSM_LIST_MODULES = 4461
SYS_MSEAL = 4462
+ SYS_SETXATTRAT = 4463
+ SYS_GETXATTRAT = 4464
+ SYS_LISTXATTRAT = 4465
+ SYS_REMOVEXATTRAT = 4466
)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go
index e203e8a7..0d777bfb 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc.go
@@ -449,4 +449,8 @@ const (
SYS_LSM_SET_SELF_ATTR = 460
SYS_LSM_LIST_MODULES = 461
SYS_MSEAL = 462
+ SYS_SETXATTRAT = 463
+ SYS_GETXATTRAT = 464
+ SYS_LISTXATTRAT = 465
+ SYS_REMOVEXATTRAT = 466
)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go
index 5944b97d..b4463650 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64.go
@@ -421,4 +421,8 @@ const (
SYS_LSM_SET_SELF_ATTR = 460
SYS_LSM_LIST_MODULES = 461
SYS_MSEAL = 462
+ SYS_SETXATTRAT = 463
+ SYS_GETXATTRAT = 464
+ SYS_LISTXATTRAT = 465
+ SYS_REMOVEXATTRAT = 466
)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go
index c66d416d..0c7d21c1 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_ppc64le.go
@@ -421,4 +421,8 @@ const (
SYS_LSM_SET_SELF_ATTR = 460
SYS_LSM_LIST_MODULES = 461
SYS_MSEAL = 462
+ SYS_SETXATTRAT = 463
+ SYS_GETXATTRAT = 464
+ SYS_LISTXATTRAT = 465
+ SYS_REMOVEXATTRAT = 466
)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go
index a5459e76..84053916 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_riscv64.go
@@ -326,4 +326,8 @@ const (
SYS_LSM_SET_SELF_ATTR = 460
SYS_LSM_LIST_MODULES = 461
SYS_MSEAL = 462
+ SYS_SETXATTRAT = 463
+ SYS_GETXATTRAT = 464
+ SYS_LISTXATTRAT = 465
+ SYS_REMOVEXATTRAT = 466
)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go
index 01d86825..fcf1b790 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_s390x.go
@@ -387,4 +387,8 @@ const (
SYS_LSM_SET_SELF_ATTR = 460
SYS_LSM_LIST_MODULES = 461
SYS_MSEAL = 462
+ SYS_SETXATTRAT = 463
+ SYS_GETXATTRAT = 464
+ SYS_LISTXATTRAT = 465
+ SYS_REMOVEXATTRAT = 466
)
diff --git a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go
index 7b703e77..52d15b5f 100644
--- a/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go
+++ b/vendor/golang.org/x/sys/unix/zsysnum_linux_sparc64.go
@@ -400,4 +400,8 @@ const (
SYS_LSM_SET_SELF_ATTR = 460
SYS_LSM_LIST_MODULES = 461
SYS_MSEAL = 462
+ SYS_SETXATTRAT = 463
+ SYS_GETXATTRAT = 464
+ SYS_LISTXATTRAT = 465
+ SYS_REMOVEXATTRAT = 466
)
diff --git a/vendor/golang.org/x/sys/unix/ztypes_linux.go b/vendor/golang.org/x/sys/unix/ztypes_linux.go
index 5537148d..a46abe64 100644
--- a/vendor/golang.org/x/sys/unix/ztypes_linux.go
+++ b/vendor/golang.org/x/sys/unix/ztypes_linux.go
@@ -4747,7 +4747,7 @@ const (
NL80211_ATTR_MAC_HINT = 0xc8
NL80211_ATTR_MAC_MASK = 0xd7
NL80211_ATTR_MAX_AP_ASSOC_STA = 0xca
- NL80211_ATTR_MAX = 0x14c
+ NL80211_ATTR_MAX = 0x14d
NL80211_ATTR_MAX_CRIT_PROT_DURATION = 0xb4
NL80211_ATTR_MAX_CSA_COUNTERS = 0xce
NL80211_ATTR_MAX_MATCH_SETS = 0x85
@@ -5519,7 +5519,7 @@ const (
NL80211_MNTR_FLAG_CONTROL = 0x3
NL80211_MNTR_FLAG_COOK_FRAMES = 0x5
NL80211_MNTR_FLAG_FCSFAIL = 0x1
- NL80211_MNTR_FLAG_MAX = 0x6
+ NL80211_MNTR_FLAG_MAX = 0x7
NL80211_MNTR_FLAG_OTHER_BSS = 0x4
NL80211_MNTR_FLAG_PLCPFAIL = 0x2
NL80211_MPATH_FLAG_ACTIVE = 0x1
@@ -6174,3 +6174,5 @@ type SockDiagReq struct {
Family uint8
Protocol uint8
}
+
+const RTM_NEWNVLAN = 0x70
diff --git a/vendor/golang.org/x/sys/windows/registry/key.go b/vendor/golang.org/x/sys/windows/registry/key.go
index fd863244..39aeeb64 100644
--- a/vendor/golang.org/x/sys/windows/registry/key.go
+++ b/vendor/golang.org/x/sys/windows/registry/key.go
@@ -164,7 +164,12 @@ loopItems:
func CreateKey(k Key, path string, access uint32) (newk Key, openedExisting bool, err error) {
var h syscall.Handle
var d uint32
- err = regCreateKeyEx(syscall.Handle(k), syscall.StringToUTF16Ptr(path),
+ var pathPointer *uint16
+ pathPointer, err = syscall.UTF16PtrFromString(path)
+ if err != nil {
+ return 0, false, err
+ }
+ err = regCreateKeyEx(syscall.Handle(k), pathPointer,
0, nil, _REG_OPTION_NON_VOLATILE, access, nil, &h, &d)
if err != nil {
return 0, false, err
@@ -174,7 +179,11 @@ func CreateKey(k Key, path string, access uint32) (newk Key, openedExisting bool
// DeleteKey deletes the subkey path of key k and its values.
func DeleteKey(k Key, path string) error {
- return regDeleteKey(syscall.Handle(k), syscall.StringToUTF16Ptr(path))
+ pathPointer, err := syscall.UTF16PtrFromString(path)
+ if err != nil {
+ return err
+ }
+ return regDeleteKey(syscall.Handle(k), pathPointer)
}
// A KeyInfo describes the statistics of a key. It is returned by Stat.
diff --git a/vendor/golang.org/x/sys/windows/registry/value.go b/vendor/golang.org/x/sys/windows/registry/value.go
index 74db26b9..a1bcbb23 100644
--- a/vendor/golang.org/x/sys/windows/registry/value.go
+++ b/vendor/golang.org/x/sys/windows/registry/value.go
@@ -340,7 +340,11 @@ func (k Key) SetBinaryValue(name string, value []byte) error {
// DeleteValue removes a named value from the key k.
func (k Key) DeleteValue(name string) error {
- return regDeleteValue(syscall.Handle(k), syscall.StringToUTF16Ptr(name))
+ namePointer, err := syscall.UTF16PtrFromString(name)
+ if err != nil {
+ return err
+ }
+ return regDeleteValue(syscall.Handle(k), namePointer)
}
// ReadValueNames returns the value names of key k.
diff --git a/vendor/golang.org/x/sys/windows/types_windows.go b/vendor/golang.org/x/sys/windows/types_windows.go
index 9d138de5..ad67df2f 100644
--- a/vendor/golang.org/x/sys/windows/types_windows.go
+++ b/vendor/golang.org/x/sys/windows/types_windows.go
@@ -1074,6 +1074,7 @@ const (
IP_ADD_MEMBERSHIP = 0xc
IP_DROP_MEMBERSHIP = 0xd
IP_PKTINFO = 0x13
+ IP_MTU_DISCOVER = 0x47
IPV6_V6ONLY = 0x1b
IPV6_UNICAST_HOPS = 0x4
@@ -1083,6 +1084,7 @@ const (
IPV6_JOIN_GROUP = 0xc
IPV6_LEAVE_GROUP = 0xd
IPV6_PKTINFO = 0x13
+ IPV6_MTU_DISCOVER = 0x47
MSG_OOB = 0x1
MSG_PEEK = 0x2
@@ -1132,6 +1134,15 @@ const (
WSASYS_STATUS_LEN = 128
)
+// enum PMTUD_STATE from ws2ipdef.h
+const (
+ IP_PMTUDISC_NOT_SET = 0
+ IP_PMTUDISC_DO = 1
+ IP_PMTUDISC_DONT = 2
+ IP_PMTUDISC_PROBE = 3
+ IP_PMTUDISC_MAX = 4
+)
+
type WSABuf struct {
Len uint32
Buf *byte
@@ -1146,6 +1157,22 @@ type WSAMsg struct {
Flags uint32
}
+type WSACMSGHDR struct {
+ Len uintptr
+ Level int32
+ Type int32
+}
+
+type IN_PKTINFO struct {
+ Addr [4]byte
+ Ifindex uint32
+}
+
+type IN6_PKTINFO struct {
+ Addr [16]byte
+ Ifindex uint32
+}
+
// Flags for WSASocket
const (
WSA_FLAG_OVERLAPPED = 0x01
diff --git a/vendor/golang.org/x/term/terminal.go b/vendor/golang.org/x/term/terminal.go
index f636667f..14f89470 100644
--- a/vendor/golang.org/x/term/terminal.go
+++ b/vendor/golang.org/x/term/terminal.go
@@ -44,6 +44,8 @@ type Terminal struct {
// bytes, as an index into |line|). If it returns ok=false, the key
// press is processed normally. Otherwise it returns a replacement line
// and the new cursor position.
+ //
+ // This will be disabled during ReadPassword.
AutoCompleteCallback func(line string, pos int, key rune) (newLine string, newPos int, ok bool)
// Escape contains a pointer to the escape codes for this terminal.
@@ -692,6 +694,8 @@ func (t *Terminal) Write(buf []byte) (n int, err error) {
// ReadPassword temporarily changes the prompt and reads a password, without
// echo, from the terminal.
+//
+// The AutoCompleteCallback is disabled during this call.
func (t *Terminal) ReadPassword(prompt string) (line string, err error) {
t.lock.Lock()
defer t.lock.Unlock()
@@ -699,6 +703,11 @@ func (t *Terminal) ReadPassword(prompt string) (line string, err error) {
oldPrompt := t.prompt
t.prompt = []rune(prompt)
t.echo = false
+ oldAutoCompleteCallback := t.AutoCompleteCallback
+ t.AutoCompleteCallback = nil
+ defer func() {
+ t.AutoCompleteCallback = oldAutoCompleteCallback
+ }()
line, err = t.readLine()
diff --git a/vendor/golang.org/x/text/language/parse.go b/vendor/golang.org/x/text/language/parse.go
index 4d57222e..053336e2 100644
--- a/vendor/golang.org/x/text/language/parse.go
+++ b/vendor/golang.org/x/text/language/parse.go
@@ -59,7 +59,7 @@ func (c CanonType) Parse(s string) (t Tag, err error) {
if changed {
tt.RemakeString()
}
- return makeTag(tt), err
+ return makeTag(tt), nil
}
// Compose creates a Tag from individual parts, which may be of type Tag, Base,
diff --git a/vendor/golang.org/x/time/rate/rate.go b/vendor/golang.org/x/time/rate/rate.go
index 8f6c7f49..794b2e32 100644
--- a/vendor/golang.org/x/time/rate/rate.go
+++ b/vendor/golang.org/x/time/rate/rate.go
@@ -85,7 +85,7 @@ func (lim *Limiter) Burst() int {
// TokensAt returns the number of tokens available at time t.
func (lim *Limiter) TokensAt(t time.Time) float64 {
lim.mu.Lock()
- _, tokens := lim.advance(t) // does not mutate lim
+ tokens := lim.advance(t) // does not mutate lim
lim.mu.Unlock()
return tokens
}
@@ -99,8 +99,9 @@ func (lim *Limiter) Tokens() float64 {
// bursts of at most b tokens.
func NewLimiter(r Limit, b int) *Limiter {
return &Limiter{
- limit: r,
- burst: b,
+ limit: r,
+ burst: b,
+ tokens: float64(b),
}
}
@@ -185,7 +186,7 @@ func (r *Reservation) CancelAt(t time.Time) {
return
}
// advance time to now
- t, tokens := r.lim.advance(t)
+ tokens := r.lim.advance(t)
// calculate new number of tokens
tokens += restoreTokens
if burst := float64(r.lim.burst); tokens > burst {
@@ -306,7 +307,7 @@ func (lim *Limiter) SetLimitAt(t time.Time, newLimit Limit) {
lim.mu.Lock()
defer lim.mu.Unlock()
- t, tokens := lim.advance(t)
+ tokens := lim.advance(t)
lim.last = t
lim.tokens = tokens
@@ -323,7 +324,7 @@ func (lim *Limiter) SetBurstAt(t time.Time, newBurst int) {
lim.mu.Lock()
defer lim.mu.Unlock()
- t, tokens := lim.advance(t)
+ tokens := lim.advance(t)
lim.last = t
lim.tokens = tokens
@@ -344,21 +345,9 @@ func (lim *Limiter) reserveN(t time.Time, n int, maxFutureReserve time.Duration)
tokens: n,
timeToAct: t,
}
- } else if lim.limit == 0 {
- var ok bool
- if lim.burst >= n {
- ok = true
- lim.burst -= n
- }
- return Reservation{
- ok: ok,
- lim: lim,
- tokens: lim.burst,
- timeToAct: t,
- }
}
- t, tokens := lim.advance(t)
+ tokens := lim.advance(t)
// Calculate the remaining number of tokens resulting from the request.
tokens -= float64(n)
@@ -391,10 +380,11 @@ func (lim *Limiter) reserveN(t time.Time, n int, maxFutureReserve time.Duration)
return r
}
-// advance calculates and returns an updated state for lim resulting from the passage of time.
+// advance calculates and returns an updated number of tokens for lim
+// resulting from the passage of time.
// lim is not changed.
// advance requires that lim.mu is held.
-func (lim *Limiter) advance(t time.Time) (newT time.Time, newTokens float64) {
+func (lim *Limiter) advance(t time.Time) (newTokens float64) {
last := lim.last
if t.Before(last) {
last = t
@@ -407,7 +397,7 @@ func (lim *Limiter) advance(t time.Time) (newT time.Time, newTokens float64) {
if burst := float64(lim.burst); tokens > burst {
tokens = burst
}
- return t, tokens
+ return tokens
}
// durationFromTokens is a unit conversion function from the number of tokens to the duration
@@ -416,8 +406,15 @@ func (limit Limit) durationFromTokens(tokens float64) time.Duration {
if limit <= 0 {
return InfDuration
}
- seconds := tokens / float64(limit)
- return time.Duration(float64(time.Second) * seconds)
+
+ duration := (tokens / float64(limit)) * float64(time.Second)
+
+ // Cap the duration to the maximum representable int64 value, to avoid overflow.
+ if duration > float64(math.MaxInt64) {
+ return InfDuration
+ }
+
+ return time.Duration(duration)
}
// tokensFromDuration is a unit conversion function from a time duration to the number of tokens
diff --git a/vendor/golang.org/x/tools/cmd/stringer/gotypesalias.go b/vendor/golang.org/x/tools/cmd/stringer/gotypesalias.go
deleted file mode 100644
index 288c10c2..00000000
--- a/vendor/golang.org/x/tools/cmd/stringer/gotypesalias.go
+++ /dev/null
@@ -1,12 +0,0 @@
-// Copyright 2024 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build go1.23
-
-//go:debug gotypesalias=1
-
-package main
-
-// Materialize aliases whenever the go toolchain version is after 1.23 (#69772).
-// Remove this file after go.mod >= 1.23 (which implies gotypesalias=1).
diff --git a/vendor/golang.org/x/tools/cmd/stringer/stringer.go b/vendor/golang.org/x/tools/cmd/stringer/stringer.go
index 09be11ca..038e8e83 100644
--- a/vendor/golang.org/x/tools/cmd/stringer/stringer.go
+++ b/vendor/golang.org/x/tools/cmd/stringer/stringer.go
@@ -244,10 +244,10 @@ type Generator struct {
buf bytes.Buffer // Accumulated output.
pkg *Package // Package we are scanning.
- logf func(format string, args ...interface{}) // test logging hook; nil when not testing
+ logf func(format string, args ...any) // test logging hook; nil when not testing
}
-func (g *Generator) Printf(format string, args ...interface{}) {
+func (g *Generator) Printf(format string, args ...any) {
fmt.Fprintf(&g.buf, format, args...)
}
@@ -279,7 +279,7 @@ type Package struct {
func loadPackages(
patterns, tags []string,
trimPrefix string, lineComment bool,
- logf func(format string, args ...interface{}),
+ logf func(format string, args ...any),
) []*Package {
cfg := &packages.Config{
Mode: packages.NeedName | packages.NeedTypes | packages.NeedTypesInfo | packages.NeedSyntax | packages.NeedFiles,
diff --git a/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go b/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go
index f3ab0a2e..7b90bc92 100644
--- a/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go
+++ b/vendor/golang.org/x/tools/go/gcexportdata/gcexportdata.go
@@ -106,24 +106,18 @@ func Find(importPath, srcDir string) (filename, path string) {
// additional trailing data beyond the end of the export data.
func NewReader(r io.Reader) (io.Reader, error) {
buf := bufio.NewReader(r)
- _, size, err := gcimporter.FindExportData(buf)
+ size, err := gcimporter.FindExportData(buf)
if err != nil {
return nil, err
}
- if size >= 0 {
- // We were given an archive and found the __.PKGDEF in it.
- // This tells us the size of the export data, and we don't
- // need to return the entire file.
- return &io.LimitedReader{
- R: buf,
- N: size,
- }, nil
- } else {
- // We were given an object file. As such, we don't know how large
- // the export data is and must return the entire file.
- return buf, nil
- }
+ // We were given an archive and found the __.PKGDEF in it.
+ // This tells us the size of the export data, and we don't
+ // need to return the entire file.
+ return &io.LimitedReader{
+ R: buf,
+ N: size,
+ }, nil
}
// readAll works the same way as io.ReadAll, but avoids allocations and copies
@@ -199,10 +193,7 @@ func Read(in io.Reader, fset *token.FileSet, imports map[string]*types.Package,
return pkg, err
default:
- l := len(data)
- if l > 10 {
- l = 10
- }
+ l := min(len(data), 10)
return nil, fmt.Errorf("unexpected export data with prefix %q for path %s", string(data[:l]), path)
}
}
diff --git a/vendor/golang.org/x/tools/go/packages/external.go b/vendor/golang.org/x/tools/go/packages/external.go
index 96db9daf..f37bc651 100644
--- a/vendor/golang.org/x/tools/go/packages/external.go
+++ b/vendor/golang.org/x/tools/go/packages/external.go
@@ -13,6 +13,7 @@ import (
"fmt"
"os"
"os/exec"
+ "slices"
"strings"
)
@@ -89,7 +90,7 @@ func findExternalDriver(cfg *Config) driver {
const toolPrefix = "GOPACKAGESDRIVER="
tool := ""
for _, env := range cfg.Env {
- if val := strings.TrimPrefix(env, toolPrefix); val != env {
+ if val, ok := strings.CutPrefix(env, toolPrefix); ok {
tool = val
}
}
@@ -131,7 +132,7 @@ func findExternalDriver(cfg *Config) driver {
// command.
//
// (See similar trick in Invocation.run in ../../internal/gocommand/invoke.go)
- cmd.Env = append(slicesClip(cfg.Env), "PWD="+cfg.Dir)
+ cmd.Env = append(slices.Clip(cfg.Env), "PWD="+cfg.Dir)
cmd.Stdin = bytes.NewReader(req)
cmd.Stdout = buf
cmd.Stderr = stderr
@@ -150,7 +151,3 @@ func findExternalDriver(cfg *Config) driver {
return &response, nil
}
}
-
-// slicesClip removes unused capacity from the slice, returning s[:len(s):len(s)].
-// TODO(adonovan): use go1.21 slices.Clip.
-func slicesClip[S ~[]E, E any](s S) S { return s[:len(s):len(s)] }
diff --git a/vendor/golang.org/x/tools/go/packages/golist.go b/vendor/golang.org/x/tools/go/packages/golist.go
index 76f910ec..0458b4f9 100644
--- a/vendor/golang.org/x/tools/go/packages/golist.go
+++ b/vendor/golang.org/x/tools/go/packages/golist.go
@@ -322,6 +322,7 @@ type jsonPackage struct {
ImportPath string
Dir string
Name string
+ Target string
Export string
GoFiles []string
CompiledGoFiles []string
@@ -505,13 +506,15 @@ func (state *golistState) createDriverResponse(words ...string) (*DriverResponse
pkg := &Package{
Name: p.Name,
ID: p.ImportPath,
+ Dir: p.Dir,
+ Target: p.Target,
GoFiles: absJoin(p.Dir, p.GoFiles, p.CgoFiles),
CompiledGoFiles: absJoin(p.Dir, p.CompiledGoFiles),
OtherFiles: absJoin(p.Dir, otherFiles(p)...),
EmbedFiles: absJoin(p.Dir, p.EmbedFiles),
EmbedPatterns: absJoin(p.Dir, p.EmbedPatterns),
IgnoredFiles: absJoin(p.Dir, p.IgnoredGoFiles, p.IgnoredOtherFiles),
- forTest: p.ForTest,
+ ForTest: p.ForTest,
depsErrors: p.DepsErrors,
Module: p.Module,
}
@@ -795,7 +798,7 @@ func jsonFlag(cfg *Config, goVersion int) string {
// Request Dir in the unlikely case Export is not absolute.
addFields("Dir", "Export")
}
- if cfg.Mode&needInternalForTest != 0 {
+ if cfg.Mode&NeedForTest != 0 {
addFields("ForTest")
}
if cfg.Mode&needInternalDepsErrors != 0 {
@@ -810,6 +813,9 @@ func jsonFlag(cfg *Config, goVersion int) string {
if cfg.Mode&NeedEmbedPatterns != 0 {
addFields("EmbedPatterns")
}
+ if cfg.Mode&NeedTarget != 0 {
+ addFields("Target")
+ }
return "-json=" + strings.Join(fields, ",")
}
diff --git a/vendor/golang.org/x/tools/go/packages/loadmode_string.go b/vendor/golang.org/x/tools/go/packages/loadmode_string.go
index 5fcad6ea..69eec9f4 100644
--- a/vendor/golang.org/x/tools/go/packages/loadmode_string.go
+++ b/vendor/golang.org/x/tools/go/packages/loadmode_string.go
@@ -23,9 +23,11 @@ var modes = [...]struct {
{NeedSyntax, "NeedSyntax"},
{NeedTypesInfo, "NeedTypesInfo"},
{NeedTypesSizes, "NeedTypesSizes"},
+ {NeedForTest, "NeedForTest"},
{NeedModule, "NeedModule"},
{NeedEmbedFiles, "NeedEmbedFiles"},
{NeedEmbedPatterns, "NeedEmbedPatterns"},
+ {NeedTarget, "NeedTarget"},
}
func (mode LoadMode) String() string {
diff --git a/vendor/golang.org/x/tools/go/packages/packages.go b/vendor/golang.org/x/tools/go/packages/packages.go
index 2ecc6423..6665a04c 100644
--- a/vendor/golang.org/x/tools/go/packages/packages.go
+++ b/vendor/golang.org/x/tools/go/packages/packages.go
@@ -43,19 +43,33 @@ import (
// ID and Errors (if present) will always be filled.
// [Load] may return more information than requested.
//
+// The Mode flag is a union of several bits named NeedName,
+// NeedFiles, and so on, each of which determines whether
+// a given field of Package (Name, Files, etc) should be
+// populated.
+//
+// For convenience, we provide named constants for the most
+// common combinations of Need flags:
+//
+// [LoadFiles] lists of files in each package
+// [LoadImports] ... plus imports
+// [LoadTypes] ... plus type information
+// [LoadSyntax] ... plus type-annotated syntax
+// [LoadAllSyntax] ... for all dependencies
+//
// Unfortunately there are a number of open bugs related to
// interactions among the LoadMode bits:
-// - https://github.com/golang/go/issues/56633
-// - https://github.com/golang/go/issues/56677
-// - https://github.com/golang/go/issues/58726
-// - https://github.com/golang/go/issues/63517
+// - https://go.dev/issue/56633
+// - https://go.dev/issue/56677
+// - https://go.dev/issue/58726
+// - https://go.dev/issue/63517
type LoadMode int
const (
// NeedName adds Name and PkgPath.
NeedName LoadMode = 1 << iota
- // NeedFiles adds GoFiles, OtherFiles, and IgnoredFiles
+ // NeedFiles adds Dir, GoFiles, OtherFiles, and IgnoredFiles
NeedFiles
// NeedCompiledGoFiles adds CompiledGoFiles.
@@ -86,9 +100,10 @@ const (
// needInternalDepsErrors adds the internal deps errors field for use by gopls.
needInternalDepsErrors
- // needInternalForTest adds the internal forTest field.
+ // NeedForTest adds ForTest.
+ //
// Tests must also be set on the context for this field to be populated.
- needInternalForTest
+ NeedForTest
// typecheckCgo enables full support for type checking cgo. Requires Go 1.15+.
// Modifies CompiledGoFiles and Types, and has no effect on its own.
@@ -103,41 +118,31 @@ const (
// NeedEmbedPatterns adds EmbedPatterns.
NeedEmbedPatterns
+ // NeedTarget adds Target.
+ NeedTarget
+
// Be sure to update loadmode_string.go when adding new items!
)
const (
// LoadFiles loads the name and file names for the initial packages.
- //
- // Deprecated: LoadFiles exists for historical compatibility
- // and should not be used. Please directly specify the needed fields using the Need values.
LoadFiles = NeedName | NeedFiles | NeedCompiledGoFiles
// LoadImports loads the name, file names, and import mapping for the initial packages.
- //
- // Deprecated: LoadImports exists for historical compatibility
- // and should not be used. Please directly specify the needed fields using the Need values.
LoadImports = LoadFiles | NeedImports
// LoadTypes loads exported type information for the initial packages.
- //
- // Deprecated: LoadTypes exists for historical compatibility
- // and should not be used. Please directly specify the needed fields using the Need values.
LoadTypes = LoadImports | NeedTypes | NeedTypesSizes
// LoadSyntax loads typed syntax for the initial packages.
- //
- // Deprecated: LoadSyntax exists for historical compatibility
- // and should not be used. Please directly specify the needed fields using the Need values.
LoadSyntax = LoadTypes | NeedSyntax | NeedTypesInfo
// LoadAllSyntax loads typed syntax for the initial packages and all dependencies.
- //
- // Deprecated: LoadAllSyntax exists for historical compatibility
- // and should not be used. Please directly specify the needed fields using the Need values.
LoadAllSyntax = LoadSyntax | NeedDeps
// Deprecated: NeedExportsFile is a historical misspelling of NeedExportFile.
+ //
+ //go:fix inline
NeedExportsFile = NeedExportFile
)
@@ -158,7 +163,7 @@ type Config struct {
// If the user provides a logger, debug logging is enabled.
// If the GOPACKAGESDEBUG environment variable is set to true,
// but the logger is nil, default to log.Printf.
- Logf func(format string, args ...interface{})
+ Logf func(format string, args ...any)
// Dir is the directory in which to run the build system's query tool
// that provides information about the packages.
@@ -434,6 +439,12 @@ type Package struct {
// PkgPath is the package path as used by the go/types package.
PkgPath string
+ // Dir is the directory associated with the package, if it exists.
+ //
+ // For packages listed by the go command, this is the directory containing
+ // the package files.
+ Dir string
+
// Errors contains any errors encountered querying the metadata
// of the package, or while parsing or type-checking its files.
Errors []Error
@@ -473,6 +484,10 @@ type Package struct {
// information for the package as provided by the build system.
ExportFile string
+ // Target is the absolute install path of the .a file, for libraries,
+ // and of the executable file, for binaries.
+ Target string
+
// Imports maps import paths appearing in the package's Go source files
// to corresponding loaded Packages.
Imports map[string]*Package
@@ -521,8 +536,8 @@ type Package struct {
// -- internal --
- // forTest is the package under test, if any.
- forTest string
+ // ForTest is the package under test, if any.
+ ForTest string
// depsErrors is the DepsErrors field from the go list response, if any.
depsErrors []*packagesinternal.PackageError
@@ -551,21 +566,17 @@ type ModuleError struct {
}
func init() {
- packagesinternal.GetForTest = func(p interface{}) string {
- return p.(*Package).forTest
- }
- packagesinternal.GetDepsErrors = func(p interface{}) []*packagesinternal.PackageError {
+ packagesinternal.GetDepsErrors = func(p any) []*packagesinternal.PackageError {
return p.(*Package).depsErrors
}
- packagesinternal.SetModFile = func(config interface{}, value string) {
+ packagesinternal.SetModFile = func(config any, value string) {
config.(*Config).modFile = value
}
- packagesinternal.SetModFlag = func(config interface{}, value string) {
+ packagesinternal.SetModFlag = func(config any, value string) {
config.(*Config).modFlag = value
}
packagesinternal.TypecheckCgo = int(typecheckCgo)
packagesinternal.DepsErrors = int(needInternalDepsErrors)
- packagesinternal.ForTest = int(needInternalForTest)
}
// An Error describes a problem with a package's metadata, syntax, or types.
@@ -730,7 +741,7 @@ func newLoader(cfg *Config) *loader {
if debug {
ld.Config.Logf = log.Printf
} else {
- ld.Config.Logf = func(format string, args ...interface{}) {}
+ ld.Config.Logf = func(format string, args ...any) {}
}
}
if ld.Config.Mode == 0 {
diff --git a/vendor/golang.org/x/tools/go/types/typeutil/callee.go b/vendor/golang.org/x/tools/go/types/typeutil/callee.go
index 75438035..53b71339 100644
--- a/vendor/golang.org/x/tools/go/types/typeutil/callee.go
+++ b/vendor/golang.org/x/tools/go/types/typeutil/callee.go
@@ -7,45 +7,23 @@ package typeutil
import (
"go/ast"
"go/types"
-
- "golang.org/x/tools/internal/typeparams"
+ _ "unsafe" // for linkname
)
// Callee returns the named target of a function call, if any:
// a function, method, builtin, or variable.
//
// Functions and methods may potentially have type parameters.
+//
+// Note: for calls of instantiated functions and methods, Callee returns
+// the corresponding generic function or method on the generic type.
func Callee(info *types.Info, call *ast.CallExpr) types.Object {
- fun := ast.Unparen(call.Fun)
-
- // Look through type instantiation if necessary.
- isInstance := false
- switch fun.(type) {
- case *ast.IndexExpr, *ast.IndexListExpr:
- // When extracting the callee from an *IndexExpr, we need to check that
- // it is a *types.Func and not a *types.Var.
- // Example: Don't match a slice m within the expression `m[0]()`.
- isInstance = true
- fun, _, _, _ = typeparams.UnpackIndexExpr(fun)
- }
-
- var obj types.Object
- switch fun := fun.(type) {
- case *ast.Ident:
- obj = info.Uses[fun] // type, var, builtin, or declared func
- case *ast.SelectorExpr:
- if sel, ok := info.Selections[fun]; ok {
- obj = sel.Obj() // method or field
- } else {
- obj = info.Uses[fun.Sel] // qualified identifier?
- }
+ obj := info.Uses[usedIdent(info, call.Fun)]
+ if obj == nil {
+ return nil
}
if _, ok := obj.(*types.TypeName); ok {
- return nil // T(x) is a conversion, not a call
- }
- // A Func is required to match instantiations.
- if _, ok := obj.(*types.Func); isInstance && !ok {
- return nil // Was not a Func.
+ return nil
}
return obj
}
@@ -56,13 +34,52 @@ func Callee(info *types.Info, call *ast.CallExpr) types.Object {
// Note: for calls of instantiated functions and methods, StaticCallee returns
// the corresponding generic function or method on the generic type.
func StaticCallee(info *types.Info, call *ast.CallExpr) *types.Func {
- if f, ok := Callee(info, call).(*types.Func); ok && !interfaceMethod(f) {
- return f
+ obj := info.Uses[usedIdent(info, call.Fun)]
+ fn, _ := obj.(*types.Func)
+ if fn == nil || interfaceMethod(fn) {
+ return nil
+ }
+ return fn
+}
+
+// usedIdent is the implementation of [internal/typesinternal.UsedIdent].
+// It returns the identifier associated with e.
+// See typesinternal.UsedIdent for a fuller description.
+// This function should live in typesinternal, but cannot because it would
+// create an import cycle.
+//
+//go:linkname usedIdent
+func usedIdent(info *types.Info, e ast.Expr) *ast.Ident {
+ if info.Types == nil || info.Uses == nil {
+ panic("one of info.Types or info.Uses is nil; both must be populated")
+ }
+ // Look through type instantiation if necessary.
+ switch d := ast.Unparen(e).(type) {
+ case *ast.IndexExpr:
+ if info.Types[d.Index].IsType() {
+ e = d.X
+ }
+ case *ast.IndexListExpr:
+ e = d.X
+ }
+
+ switch e := ast.Unparen(e).(type) {
+ // info.Uses always has the object we want, even for selector expressions.
+ // We don't need info.Selections.
+ // See go/types/recording.go:recordSelection.
+ case *ast.Ident:
+ return e
+ case *ast.SelectorExpr:
+ return e.Sel
}
return nil
}
+// interfaceMethod reports whether its argument is a method of an interface.
+// This function should live in typesinternal, but cannot because it would create an import cycle.
+//
+//go:linkname interfaceMethod
func interfaceMethod(f *types.Func) bool {
- recv := f.Type().(*types.Signature).Recv()
+ recv := f.Signature().Recv()
return recv != nil && types.IsInterface(recv.Type())
}
diff --git a/vendor/golang.org/x/tools/go/types/typeutil/map.go b/vendor/golang.org/x/tools/go/types/typeutil/map.go
index 8d824f71..b6d542c6 100644
--- a/vendor/golang.org/x/tools/go/types/typeutil/map.go
+++ b/vendor/golang.org/x/tools/go/types/typeutil/map.go
@@ -2,30 +2,35 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// Package typeutil defines various utilities for types, such as Map,
-// a mapping from types.Type to any values.
-package typeutil // import "golang.org/x/tools/go/types/typeutil"
+// Package typeutil defines various utilities for types, such as [Map],
+// a hash table that maps [types.Type] to any value.
+package typeutil
import (
"bytes"
"fmt"
"go/types"
- "reflect"
+ "hash/maphash"
+ "unsafe"
"golang.org/x/tools/internal/typeparams"
)
// Map is a hash-table-based mapping from types (types.Type) to
-// arbitrary any values. The concrete types that implement
+// arbitrary values. The concrete types that implement
// the Type interface are pointers. Since they are not canonicalized,
// == cannot be used to check for equivalence, and thus we cannot
// simply use a Go map.
//
// Just as with map[K]V, a nil *Map is a valid empty map.
//
-// Not thread-safe.
+// Read-only map operations ([Map.At], [Map.Len], and so on) may
+// safely be called concurrently.
+//
+// TODO(adonovan): deprecate in favor of https://go.dev/issues/69420
+// and 69559, if the latter proposals for a generic hash-map type and
+// a types.Hash function are accepted.
type Map struct {
- hasher Hasher // shared by many Maps
table map[uint32][]entry // maps hash to bucket; entry.key==nil means unused
length int // number of map entries
}
@@ -36,35 +41,17 @@ type entry struct {
value any
}
-// SetHasher sets the hasher used by Map.
+// SetHasher has no effect.
//
-// All Hashers are functionally equivalent but contain internal state
-// used to cache the results of hashing previously seen types.
-//
-// A single Hasher created by MakeHasher() may be shared among many
-// Maps. This is recommended if the instances have many keys in
-// common, as it will amortize the cost of hash computation.
-//
-// A Hasher may grow without bound as new types are seen. Even when a
-// type is deleted from the map, the Hasher never shrinks, since other
-// types in the map may reference the deleted type indirectly.
-//
-// Hashers are not thread-safe, and read-only operations such as
-// Map.Lookup require updates to the hasher, so a full Mutex lock (not a
-// read-lock) is require around all Map operations if a shared
-// hasher is accessed from multiple threads.
-//
-// If SetHasher is not called, the Map will create a private hasher at
-// the first call to Insert.
-func (m *Map) SetHasher(hasher Hasher) {
- m.hasher = hasher
-}
+// It is a relic of an optimization that is no longer profitable. Do
+// not use [Hasher], [MakeHasher], or [SetHasher] in new code.
+func (m *Map) SetHasher(Hasher) {}
// Delete removes the entry with the given key, if any.
// It returns true if the entry was found.
func (m *Map) Delete(key types.Type) bool {
if m != nil && m.table != nil {
- hash := m.hasher.Hash(key)
+ hash := hash(key)
bucket := m.table[hash]
for i, e := range bucket {
if e.key != nil && types.Identical(key, e.key) {
@@ -83,7 +70,7 @@ func (m *Map) Delete(key types.Type) bool {
// The result is nil if the entry is not present.
func (m *Map) At(key types.Type) any {
if m != nil && m.table != nil {
- for _, e := range m.table[m.hasher.Hash(key)] {
+ for _, e := range m.table[hash(key)] {
if e.key != nil && types.Identical(key, e.key) {
return e.value
}
@@ -96,7 +83,7 @@ func (m *Map) At(key types.Type) any {
// and returns the previous entry, if any.
func (m *Map) Set(key types.Type, value any) (prev any) {
if m.table != nil {
- hash := m.hasher.Hash(key)
+ hash := hash(key)
bucket := m.table[hash]
var hole *entry
for i, e := range bucket {
@@ -115,10 +102,7 @@ func (m *Map) Set(key types.Type, value any) (prev any) {
m.table[hash] = append(bucket, entry{key, value})
}
} else {
- if m.hasher.memo == nil {
- m.hasher = MakeHasher()
- }
- hash := m.hasher.Hash(key)
+ hash := hash(key)
m.table = map[uint32][]entry{hash: {entry{key, value}}}
}
@@ -195,53 +179,35 @@ func (m *Map) KeysString() string {
return m.toString(false)
}
-////////////////////////////////////////////////////////////////////////
-// Hasher
+// -- Hasher --
-// A Hasher maps each type to its hash value.
-// For efficiency, a hasher uses memoization; thus its memory
-// footprint grows monotonically over time.
-// Hashers are not thread-safe.
-// Hashers have reference semantics.
-// Call MakeHasher to create a Hasher.
-type Hasher struct {
- memo map[types.Type]uint32
-
- // ptrMap records pointer identity.
- ptrMap map[any]uint32
-
- // sigTParams holds type parameters from the signature being hashed.
- // Signatures are considered identical modulo renaming of type parameters, so
- // within the scope of a signature type the identity of the signature's type
- // parameters is just their index.
- //
- // Since the language does not currently support referring to uninstantiated
- // generic types or functions, and instantiated signatures do not have type
- // parameter lists, we should never encounter a second non-empty type
- // parameter list when hashing a generic signature.
- sigTParams *types.TypeParamList
+// hash returns the hash of type t.
+// TODO(adonovan): replace by types.Hash when Go proposal #69420 is accepted.
+func hash(t types.Type) uint32 {
+ return theHasher.Hash(t)
}
-// MakeHasher returns a new Hasher instance.
-func MakeHasher() Hasher {
- return Hasher{
- memo: make(map[types.Type]uint32),
- ptrMap: make(map[any]uint32),
- sigTParams: nil,
- }
-}
+// A Hasher provides a [Hasher.Hash] method to map a type to its hash value.
+// Hashers are stateless, and all are equivalent.
+type Hasher struct{}
+
+var theHasher Hasher
+
+// MakeHasher returns Hasher{}.
+// Hashers are stateless; all are equivalent.
+func MakeHasher() Hasher { return theHasher }
// Hash computes a hash value for the given type t such that
// Identical(t, t') => Hash(t) == Hash(t').
func (h Hasher) Hash(t types.Type) uint32 {
- hash, ok := h.memo[t]
- if !ok {
- hash = h.hashFor(t)
- h.memo[t] = hash
- }
- return hash
+ return hasher{inGenericSig: false}.hash(t)
}
+// hasher holds the state of a single Hash traversal: whether we are
+// inside the signature of a generic function; this is used to
+// optimize [hasher.hashTypeParam].
+type hasher struct{ inGenericSig bool }
+
// hashString computes the Fowler–Noll–Vo hash of s.
func hashString(s string) uint32 {
var h uint32
@@ -252,21 +218,21 @@ func hashString(s string) uint32 {
return h
}
-// hashFor computes the hash of t.
-func (h Hasher) hashFor(t types.Type) uint32 {
+// hash computes the hash of t.
+func (h hasher) hash(t types.Type) uint32 {
// See Identical for rationale.
switch t := t.(type) {
case *types.Basic:
return uint32(t.Kind())
case *types.Alias:
- return h.Hash(types.Unalias(t))
+ return h.hash(types.Unalias(t))
case *types.Array:
- return 9043 + 2*uint32(t.Len()) + 3*h.Hash(t.Elem())
+ return 9043 + 2*uint32(t.Len()) + 3*h.hash(t.Elem())
case *types.Slice:
- return 9049 + 2*h.Hash(t.Elem())
+ return 9049 + 2*h.hash(t.Elem())
case *types.Struct:
var hash uint32 = 9059
@@ -277,12 +243,12 @@ func (h Hasher) hashFor(t types.Type) uint32 {
}
hash += hashString(t.Tag(i))
hash += hashString(f.Name()) // (ignore f.Pkg)
- hash += h.Hash(f.Type())
+ hash += h.hash(f.Type())
}
return hash
case *types.Pointer:
- return 9067 + 2*h.Hash(t.Elem())
+ return 9067 + 2*h.hash(t.Elem())
case *types.Signature:
var hash uint32 = 9091
@@ -290,33 +256,14 @@ func (h Hasher) hashFor(t types.Type) uint32 {
hash *= 8863
}
- // Use a separate hasher for types inside of the signature, where type
- // parameter identity is modified to be (index, constraint). We must use a
- // new memo for this hasher as type identity may be affected by this
- // masking. For example, in func[T any](*T), the identity of *T depends on
- // whether we are mapping the argument in isolation, or recursively as part
- // of hashing the signature.
- //
- // We should never encounter a generic signature while hashing another
- // generic signature, but defensively set sigTParams only if h.mask is
- // unset.
tparams := t.TypeParams()
- if h.sigTParams == nil && tparams.Len() != 0 {
- h = Hasher{
- // There may be something more efficient than discarding the existing
- // memo, but it would require detecting whether types are 'tainted' by
- // references to type parameters.
- memo: make(map[types.Type]uint32),
- // Re-using ptrMap ensures that pointer identity is preserved in this
- // hasher.
- ptrMap: h.ptrMap,
- sigTParams: tparams,
- }
- }
+ if n := tparams.Len(); n > 0 {
+ h.inGenericSig = true // affects constraints, params, and results
- for i := 0; i < tparams.Len(); i++ {
- tparam := tparams.At(i)
- hash += 7 * h.Hash(tparam.Constraint())
+ for i := range n {
+ tparam := tparams.At(i)
+ hash += 7 * h.hash(tparam.Constraint())
+ }
}
return hash + 3*h.hashTuple(t.Params()) + 5*h.hashTuple(t.Results())
@@ -350,17 +297,17 @@ func (h Hasher) hashFor(t types.Type) uint32 {
return hash
case *types.Map:
- return 9109 + 2*h.Hash(t.Key()) + 3*h.Hash(t.Elem())
+ return 9109 + 2*h.hash(t.Key()) + 3*h.hash(t.Elem())
case *types.Chan:
- return 9127 + 2*uint32(t.Dir()) + 3*h.Hash(t.Elem())
+ return 9127 + 2*uint32(t.Dir()) + 3*h.hash(t.Elem())
case *types.Named:
- hash := h.hashPtr(t.Obj())
+ hash := h.hashTypeName(t.Obj())
targs := t.TypeArgs()
for i := 0; i < targs.Len(); i++ {
targ := targs.At(i)
- hash += 2 * h.Hash(targ)
+ hash += 2 * h.hash(targ)
}
return hash
@@ -374,17 +321,17 @@ func (h Hasher) hashFor(t types.Type) uint32 {
panic(fmt.Sprintf("%T: %v", t, t))
}
-func (h Hasher) hashTuple(tuple *types.Tuple) uint32 {
+func (h hasher) hashTuple(tuple *types.Tuple) uint32 {
// See go/types.identicalTypes for rationale.
n := tuple.Len()
hash := 9137 + 2*uint32(n)
- for i := 0; i < n; i++ {
- hash += 3 * h.Hash(tuple.At(i).Type())
+ for i := range n {
+ hash += 3 * h.hash(tuple.At(i).Type())
}
return hash
}
-func (h Hasher) hashUnion(t *types.Union) uint32 {
+func (h hasher) hashUnion(t *types.Union) uint32 {
// Hash type restrictions.
terms, err := typeparams.UnionTermSet(t)
// if err != nil t has invalid type restrictions. Fall back on a non-zero
@@ -395,11 +342,11 @@ func (h Hasher) hashUnion(t *types.Union) uint32 {
return h.hashTermSet(terms)
}
-func (h Hasher) hashTermSet(terms []*types.Term) uint32 {
+func (h hasher) hashTermSet(terms []*types.Term) uint32 {
hash := 9157 + 2*uint32(len(terms))
for _, term := range terms {
// term order is not significant.
- termHash := h.Hash(term.Type())
+ termHash := h.hash(term.Type())
if term.Tilde() {
termHash *= 9161
}
@@ -408,36 +355,47 @@ func (h Hasher) hashTermSet(terms []*types.Term) uint32 {
return hash
}
-// hashTypeParam returns a hash of the type parameter t, with a hash value
-// depending on whether t is contained in h.sigTParams.
-//
-// If h.sigTParams is set and contains t, then we are in the process of hashing
-// a signature, and the hash value of t must depend only on t's index and
-// constraint: signatures are considered identical modulo type parameter
-// renaming. To avoid infinite recursion, we only hash the type parameter
-// index, and rely on types.Identical to handle signatures where constraints
-// are not identical.
-//
-// Otherwise the hash of t depends only on t's pointer identity.
-func (h Hasher) hashTypeParam(t *types.TypeParam) uint32 {
- if h.sigTParams != nil {
- i := t.Index()
- if i >= 0 && i < h.sigTParams.Len() && t == h.sigTParams.At(i) {
- return 9173 + 3*uint32(i)
- }
+// hashTypeParam returns the hash of a type parameter.
+func (h hasher) hashTypeParam(t *types.TypeParam) uint32 {
+ // Within the signature of a generic function, TypeParams are
+ // identical if they have the same index and constraint, so we
+ // hash them based on index.
+ //
+ // When we are outside a generic function, free TypeParams are
+ // identical iff they are the same object, so we can use a
+ // more discriminating hash consistent with object identity.
+ // This optimization saves [Map] about 4% when hashing all the
+ // types.Info.Types in the forward closure of net/http.
+ if !h.inGenericSig {
+ // Optimization: outside a generic function signature,
+ // use a more discrimating hash consistent with object identity.
+ return h.hashTypeName(t.Obj())
}
- return h.hashPtr(t.Obj())
+ return 9173 + 3*uint32(t.Index())
}
-// hashPtr hashes the pointer identity of ptr. It uses h.ptrMap to ensure that
-// pointers values are not dependent on the GC.
-func (h Hasher) hashPtr(ptr any) uint32 {
- if hash, ok := h.ptrMap[ptr]; ok {
- return hash
+var theSeed = maphash.MakeSeed()
+
+// hashTypeName hashes the pointer of tname.
+func (hasher) hashTypeName(tname *types.TypeName) uint32 {
+ // Since types.Identical uses == to compare TypeNames,
+ // the Hash function uses maphash.Comparable.
+ // TODO(adonovan): or will, when it becomes available in go1.24.
+ // In the meantime we use the pointer's numeric value.
+ //
+ // hash := maphash.Comparable(theSeed, tname)
+ //
+ // (Another approach would be to hash the name and package
+ // path, and whether or not it is a package-level typename. It
+ // is rare for a package to define multiple local types with
+ // the same name.)
+ ptr := uintptr(unsafe.Pointer(tname))
+ if unsafe.Sizeof(ptr) == 8 {
+ hash := uint64(ptr)
+ return uint32(hash ^ (hash >> 32))
+ } else {
+ return uint32(ptr)
}
- hash := uint32(reflect.ValueOf(ptr).Pointer())
- h.ptrMap[ptr] = hash
- return hash
}
// shallowHash computes a hash of t without looking at any of its
@@ -454,7 +412,7 @@ func (h Hasher) hashPtr(ptr any) uint32 {
// include m itself; there is no mention of the named type X that
// might help us break the cycle.
// (See comment in go/types.identical, case *Interface, for more.)
-func (h Hasher) shallowHash(t types.Type) uint32 {
+func (h hasher) shallowHash(t types.Type) uint32 {
// t is the type of an interface method (Signature),
// its params or results (Tuples), or their immediate
// elements (mostly Slice, Pointer, Basic, Named),
@@ -475,7 +433,7 @@ func (h Hasher) shallowHash(t types.Type) uint32 {
case *types.Tuple:
n := t.Len()
hash := 9137 + 2*uint32(n)
- for i := 0; i < n; i++ {
+ for i := range n {
hash += 53471161 * h.shallowHash(t.At(i).Type())
}
return hash
@@ -508,10 +466,10 @@ func (h Hasher) shallowHash(t types.Type) uint32 {
return 9127
case *types.Named:
- return h.hashPtr(t.Obj())
+ return h.hashTypeName(t.Obj())
case *types.TypeParam:
- return h.hashPtr(t.Obj())
+ return h.hashTypeParam(t)
}
panic(fmt.Sprintf("shallowHash: %T: %v", t, t))
}
diff --git a/vendor/golang.org/x/tools/internal/event/keys/keys.go b/vendor/golang.org/x/tools/internal/event/keys/keys.go
index a02206e3..4cfa51b6 100644
--- a/vendor/golang.org/x/tools/internal/event/keys/keys.go
+++ b/vendor/golang.org/x/tools/internal/event/keys/keys.go
@@ -32,7 +32,7 @@ func (k *Value) Format(w io.Writer, buf []byte, l label.Label) {
}
// Get can be used to get a label for the key from a label.Map.
-func (k *Value) Get(lm label.Map) interface{} {
+func (k *Value) Get(lm label.Map) any {
if t := lm.Find(k); t.Valid() {
return k.From(t)
}
@@ -40,10 +40,10 @@ func (k *Value) Get(lm label.Map) interface{} {
}
// From can be used to get a value from a Label.
-func (k *Value) From(t label.Label) interface{} { return t.UnpackValue() }
+func (k *Value) From(t label.Label) any { return t.UnpackValue() }
// Of creates a new Label with this key and the supplied value.
-func (k *Value) Of(value interface{}) label.Label { return label.OfValue(k, value) }
+func (k *Value) Of(value any) label.Label { return label.OfValue(k, value) }
// Tag represents a key for tagging labels that have no value.
// These are used when the existence of the label is the entire information it
diff --git a/vendor/golang.org/x/tools/internal/event/label/label.go b/vendor/golang.org/x/tools/internal/event/label/label.go
index 0f526e1f..92a39105 100644
--- a/vendor/golang.org/x/tools/internal/event/label/label.go
+++ b/vendor/golang.org/x/tools/internal/event/label/label.go
@@ -8,6 +8,7 @@ import (
"fmt"
"io"
"reflect"
+ "slices"
"unsafe"
)
@@ -32,7 +33,7 @@ type Key interface {
type Label struct {
key Key
packed uint64
- untyped interface{}
+ untyped any
}
// Map is the interface to a collection of Labels indexed by key.
@@ -76,13 +77,13 @@ type mapChain struct {
// OfValue creates a new label from the key and value.
// This method is for implementing new key types, label creation should
// normally be done with the Of method of the key.
-func OfValue(k Key, value interface{}) Label { return Label{key: k, untyped: value} }
+func OfValue(k Key, value any) Label { return Label{key: k, untyped: value} }
// UnpackValue assumes the label was built using LabelOfValue and returns the value
// that was passed to that constructor.
// This method is for implementing new key types, for type safety normal
// access should be done with the From method of the key.
-func (t Label) UnpackValue() interface{} { return t.untyped }
+func (t Label) UnpackValue() any { return t.untyped }
// Of64 creates a new label from a key and a uint64. This is often
// used for non uint64 values that can be packed into a uint64.
@@ -154,10 +155,8 @@ func (f *filter) Valid(index int) bool {
func (f *filter) Label(index int) Label {
l := f.underlying.Label(index)
- for _, f := range f.keys {
- if l.Key() == f {
- return Label{}
- }
+ if slices.Contains(f.keys, l.Key()) {
+ return Label{}
}
return l
}
diff --git a/vendor/golang.org/x/tools/internal/gcimporter/bimport.go b/vendor/golang.org/x/tools/internal/gcimporter/bimport.go
index d79a605e..734c4619 100644
--- a/vendor/golang.org/x/tools/internal/gcimporter/bimport.go
+++ b/vendor/golang.org/x/tools/internal/gcimporter/bimport.go
@@ -14,7 +14,7 @@ import (
"sync"
)
-func errorf(format string, args ...interface{}) {
+func errorf(format string, args ...any) {
panic(fmt.Sprintf(format, args...))
}
diff --git a/vendor/golang.org/x/tools/internal/gcimporter/exportdata.go b/vendor/golang.org/x/tools/internal/gcimporter/exportdata.go
index f6437feb..5662a311 100644
--- a/vendor/golang.org/x/tools/internal/gcimporter/exportdata.go
+++ b/vendor/golang.org/x/tools/internal/gcimporter/exportdata.go
@@ -2,49 +2,183 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// This file is a copy of $GOROOT/src/go/internal/gcimporter/exportdata.go.
-
-// This file implements FindExportData.
+// This file should be kept in sync with $GOROOT/src/internal/exportdata/exportdata.go.
+// This file also additionally implements FindExportData for gcexportdata.NewReader.
package gcimporter
import (
"bufio"
+ "bytes"
+ "errors"
"fmt"
+ "go/build"
"io"
- "strconv"
+ "os"
+ "os/exec"
+ "path/filepath"
"strings"
+ "sync"
)
-func readGopackHeader(r *bufio.Reader) (name string, size int64, err error) {
- // See $GOROOT/include/ar.h.
- hdr := make([]byte, 16+12+6+6+8+10+2)
- _, err = io.ReadFull(r, hdr)
+// FindExportData positions the reader r at the beginning of the
+// export data section of an underlying cmd/compile created archive
+// file by reading from it. The reader must be positioned at the
+// start of the file before calling this function.
+// This returns the length of the export data in bytes.
+//
+// This function is needed by [gcexportdata.Read], which must
+// accept inputs produced by the last two releases of cmd/compile,
+// plus tip.
+func FindExportData(r *bufio.Reader) (size int64, err error) {
+ arsize, err := FindPackageDefinition(r)
if err != nil {
return
}
- // leave for debugging
- if false {
- fmt.Printf("header: %s", hdr)
- }
- s := strings.TrimSpace(string(hdr[16+12+6+6+8:][:10]))
- length, err := strconv.Atoi(s)
- size = int64(length)
- if err != nil || hdr[len(hdr)-2] != '`' || hdr[len(hdr)-1] != '\n' {
- err = fmt.Errorf("invalid archive header")
+ size = int64(arsize)
+
+ objapi, headers, err := ReadObjectHeaders(r)
+ if err != nil {
return
}
- name = strings.TrimSpace(string(hdr[:16]))
+ size -= int64(len(objapi))
+ for _, h := range headers {
+ size -= int64(len(h))
+ }
+
+ // Check for the binary export data section header "$$B\n".
+ // TODO(taking): Unify with ReadExportDataHeader so that it stops at the 'u' instead of reading
+ line, err := r.ReadSlice('\n')
+ if err != nil {
+ return
+ }
+ hdr := string(line)
+ if hdr != "$$B\n" {
+ err = fmt.Errorf("unknown export data header: %q", hdr)
+ return
+ }
+ size -= int64(len(hdr))
+
+ // For files with a binary export data header "$$B\n",
+ // these are always terminated by an end-of-section marker "\n$$\n".
+ // So the last bytes must always be this constant.
+ //
+ // The end-of-section marker is not a part of the export data itself.
+ // Do not include these in size.
+ //
+ // It would be nice to have sanity check that the final bytes after
+ // the export data are indeed the end-of-section marker. The split
+ // of gcexportdata.NewReader and gcexportdata.Read make checking this
+ // ugly so gcimporter gives up enforcing this. The compiler and go/types
+ // importer do enforce this, which seems good enough.
+ const endofsection = "\n$$\n"
+ size -= int64(len(endofsection))
+
+ if size < 0 {
+ err = fmt.Errorf("invalid size (%d) in the archive file: %d bytes remain without section headers (recompile package)", arsize, size)
+ return
+ }
+
return
}
-// FindExportData positions the reader r at the beginning of the
-// export data section of an underlying GC-created object/archive
-// file by reading from it. The reader must be positioned at the
-// start of the file before calling this function. The hdr result
-// is the string before the export data, either "$$" or "$$B".
-// The size result is the length of the export data in bytes, or -1 if not known.
-func FindExportData(r *bufio.Reader) (hdr string, size int64, err error) {
+// ReadUnified reads the contents of the unified export data from a reader r
+// that contains the contents of a GC-created archive file.
+//
+// On success, the reader will be positioned after the end-of-section marker "\n$$\n".
+//
+// Supported GC-created archive files have 4 layers of nesting:
+// - An archive file containing a package definition file.
+// - The package definition file contains headers followed by a data section.
+// Headers are lines (≤ 4kb) that do not start with "$$".
+// - The data section starts with "$$B\n" followed by export data followed
+// by an end of section marker "\n$$\n". (The section start "$$\n" is no
+// longer supported.)
+// - The export data starts with a format byte ('u') followed by the in
+// the given format. (See ReadExportDataHeader for older formats.)
+//
+// Putting this together, the bytes in a GC-created archive files are expected
+// to look like the following.
+// See cmd/internal/archive for more details on ar file headers.
+//
+// | \n | ar file signature
+// | __.PKGDEF...size...\n | ar header for __.PKGDEF including size.
+// | go object <...>\n | objabi header
+// | \n | other headers such as build id
+// | $$B\n | binary format marker
+// | u\n | unified export
+// | $$\n | end-of-section marker
+// | [optional padding] | padding byte (0x0A) if size is odd
+// | [ar file header] | other ar files
+// | [ar file data] |
+func ReadUnified(r *bufio.Reader) (data []byte, err error) {
+ // We historically guaranteed headers at the default buffer size (4096) work.
+ // This ensures we can use ReadSlice throughout.
+ const minBufferSize = 4096
+ r = bufio.NewReaderSize(r, minBufferSize)
+
+ size, err := FindPackageDefinition(r)
+ if err != nil {
+ return
+ }
+ n := size
+
+ objapi, headers, err := ReadObjectHeaders(r)
+ if err != nil {
+ return
+ }
+ n -= len(objapi)
+ for _, h := range headers {
+ n -= len(h)
+ }
+
+ hdrlen, err := ReadExportDataHeader(r)
+ if err != nil {
+ return
+ }
+ n -= hdrlen
+
+ // size also includes the end of section marker. Remove that many bytes from the end.
+ const marker = "\n$$\n"
+ n -= len(marker)
+
+ if n < 0 {
+ err = fmt.Errorf("invalid size (%d) in the archive file: %d bytes remain without section headers (recompile package)", size, n)
+ return
+ }
+
+ // Read n bytes from buf.
+ data = make([]byte, n)
+ _, err = io.ReadFull(r, data)
+ if err != nil {
+ return
+ }
+
+ // Check for marker at the end.
+ var suffix [len(marker)]byte
+ _, err = io.ReadFull(r, suffix[:])
+ if err != nil {
+ return
+ }
+ if s := string(suffix[:]); s != marker {
+ err = fmt.Errorf("read %q instead of end-of-section marker (%q)", s, marker)
+ return
+ }
+
+ return
+}
+
+// FindPackageDefinition positions the reader r at the beginning of a package
+// definition file ("__.PKGDEF") within a GC-created archive by reading
+// from it, and returns the size of the package definition file in the archive.
+//
+// The reader must be positioned at the start of the archive file before calling
+// this function, and "__.PKGDEF" is assumed to be the first file in the archive.
+//
+// See cmd/internal/archive for details on the archive format.
+func FindPackageDefinition(r *bufio.Reader) (size int, err error) {
+ // Uses ReadSlice to limit risk of malformed inputs.
+
// Read first line to make sure this is an object file.
line, err := r.ReadSlice('\n')
if err != nil {
@@ -52,48 +186,236 @@ func FindExportData(r *bufio.Reader) (hdr string, size int64, err error) {
return
}
- if string(line) == "!\n" {
- // Archive file. Scan to __.PKGDEF.
- var name string
- if name, size, err = readGopackHeader(r); err != nil {
- return
- }
-
- // First entry should be __.PKGDEF.
- if name != "__.PKGDEF" {
- err = fmt.Errorf("go archive is missing __.PKGDEF")
- return
- }
-
- // Read first line of __.PKGDEF data, so that line
- // is once again the first line of the input.
- if line, err = r.ReadSlice('\n'); err != nil {
- err = fmt.Errorf("can't find export data (%v)", err)
- return
- }
- size -= int64(len(line))
- }
-
- // Now at __.PKGDEF in archive or still at beginning of file.
- // Either way, line should begin with "go object ".
- if !strings.HasPrefix(string(line), "go object ") {
- err = fmt.Errorf("not a Go object file")
+ // Is the first line an archive file signature?
+ if string(line) != "!\n" {
+ err = fmt.Errorf("not the start of an archive file (%q)", line)
return
}
- // Skip over object header to export data.
- // Begins after first line starting with $$.
- for line[0] != '$' {
- if line, err = r.ReadSlice('\n'); err != nil {
- err = fmt.Errorf("can't find export data (%v)", err)
- return
- }
- size -= int64(len(line))
- }
- hdr = string(line)
- if size < 0 {
- size = -1
+ // package export block should be first
+ size = readArchiveHeader(r, "__.PKGDEF")
+ if size <= 0 {
+ err = fmt.Errorf("not a package file")
+ return
}
return
}
+
+// ReadObjectHeaders reads object headers from the reader. Object headers are
+// lines that do not start with an end-of-section marker "$$". The first header
+// is the objabi header. On success, the reader will be positioned at the beginning
+// of the end-of-section marker.
+//
+// It returns an error if any header does not fit in r.Size() bytes.
+func ReadObjectHeaders(r *bufio.Reader) (objapi string, headers []string, err error) {
+ // line is a temporary buffer for headers.
+ // Use bounded reads (ReadSlice, Peek) to limit risk of malformed inputs.
+ var line []byte
+
+ // objapi header should be the first line
+ if line, err = r.ReadSlice('\n'); err != nil {
+ err = fmt.Errorf("can't find export data (%v)", err)
+ return
+ }
+ objapi = string(line)
+
+ // objapi header begins with "go object ".
+ if !strings.HasPrefix(objapi, "go object ") {
+ err = fmt.Errorf("not a go object file: %s", objapi)
+ return
+ }
+
+ // process remaining object header lines
+ for {
+ // check for an end of section marker "$$"
+ line, err = r.Peek(2)
+ if err != nil {
+ return
+ }
+ if string(line) == "$$" {
+ return // stop
+ }
+
+ // read next header
+ line, err = r.ReadSlice('\n')
+ if err != nil {
+ return
+ }
+ headers = append(headers, string(line))
+ }
+}
+
+// ReadExportDataHeader reads the export data header and format from r.
+// It returns the number of bytes read, or an error if the format is no longer
+// supported or it failed to read.
+//
+// The only currently supported format is binary export data in the
+// unified export format.
+func ReadExportDataHeader(r *bufio.Reader) (n int, err error) {
+ // Read export data header.
+ line, err := r.ReadSlice('\n')
+ if err != nil {
+ return
+ }
+
+ hdr := string(line)
+ switch hdr {
+ case "$$\n":
+ err = fmt.Errorf("old textual export format no longer supported (recompile package)")
+ return
+
+ case "$$B\n":
+ var format byte
+ format, err = r.ReadByte()
+ if err != nil {
+ return
+ }
+ // The unified export format starts with a 'u'.
+ switch format {
+ case 'u':
+ default:
+ // Older no longer supported export formats include:
+ // indexed export format which started with an 'i'; and
+ // the older binary export format which started with a 'c',
+ // 'd', or 'v' (from "version").
+ err = fmt.Errorf("binary export format %q is no longer supported (recompile package)", format)
+ return
+ }
+
+ default:
+ err = fmt.Errorf("unknown export data header: %q", hdr)
+ return
+ }
+
+ n = len(hdr) + 1 // + 1 is for 'u'
+ return
+}
+
+// FindPkg returns the filename and unique package id for an import
+// path based on package information provided by build.Import (using
+// the build.Default build.Context). A relative srcDir is interpreted
+// relative to the current working directory.
+//
+// FindPkg is only used in tests within x/tools.
+func FindPkg(path, srcDir string) (filename, id string, err error) {
+ // TODO(taking): Move internal/exportdata.FindPkg into its own file,
+ // and then this copy into a _test package.
+ if path == "" {
+ return "", "", errors.New("path is empty")
+ }
+
+ var noext string
+ switch {
+ default:
+ // "x" -> "$GOPATH/pkg/$GOOS_$GOARCH/x.ext", "x"
+ // Don't require the source files to be present.
+ if abs, err := filepath.Abs(srcDir); err == nil { // see issue 14282
+ srcDir = abs
+ }
+ var bp *build.Package
+ bp, err = build.Import(path, srcDir, build.FindOnly|build.AllowBinary)
+ if bp.PkgObj == "" {
+ if bp.Goroot && bp.Dir != "" {
+ filename, err = lookupGorootExport(bp.Dir)
+ if err == nil {
+ _, err = os.Stat(filename)
+ }
+ if err == nil {
+ return filename, bp.ImportPath, nil
+ }
+ }
+ goto notfound
+ } else {
+ noext = strings.TrimSuffix(bp.PkgObj, ".a")
+ }
+ id = bp.ImportPath
+
+ case build.IsLocalImport(path):
+ // "./x" -> "/this/directory/x.ext", "/this/directory/x"
+ noext = filepath.Join(srcDir, path)
+ id = noext
+
+ case filepath.IsAbs(path):
+ // for completeness only - go/build.Import
+ // does not support absolute imports
+ // "/x" -> "/x.ext", "/x"
+ noext = path
+ id = path
+ }
+
+ if false { // for debugging
+ if path != id {
+ fmt.Printf("%s -> %s\n", path, id)
+ }
+ }
+
+ // try extensions
+ for _, ext := range pkgExts {
+ filename = noext + ext
+ f, statErr := os.Stat(filename)
+ if statErr == nil && !f.IsDir() {
+ return filename, id, nil
+ }
+ if err == nil {
+ err = statErr
+ }
+ }
+
+notfound:
+ if err == nil {
+ return "", path, fmt.Errorf("can't find import: %q", path)
+ }
+ return "", path, fmt.Errorf("can't find import: %q: %w", path, err)
+}
+
+var pkgExts = [...]string{".a", ".o"} // a file from the build cache will have no extension
+
+var exportMap sync.Map // package dir → func() (string, error)
+
+// lookupGorootExport returns the location of the export data
+// (normally found in the build cache, but located in GOROOT/pkg
+// in prior Go releases) for the package located in pkgDir.
+//
+// (We use the package's directory instead of its import path
+// mainly to simplify handling of the packages in src/vendor
+// and cmd/vendor.)
+//
+// lookupGorootExport is only used in tests within x/tools.
+func lookupGorootExport(pkgDir string) (string, error) {
+ f, ok := exportMap.Load(pkgDir)
+ if !ok {
+ var (
+ listOnce sync.Once
+ exportPath string
+ err error
+ )
+ f, _ = exportMap.LoadOrStore(pkgDir, func() (string, error) {
+ listOnce.Do(func() {
+ cmd := exec.Command(filepath.Join(build.Default.GOROOT, "bin", "go"), "list", "-export", "-f", "{{.Export}}", pkgDir)
+ cmd.Dir = build.Default.GOROOT
+ cmd.Env = append(os.Environ(), "PWD="+cmd.Dir, "GOROOT="+build.Default.GOROOT)
+ var output []byte
+ output, err = cmd.Output()
+ if err != nil {
+ if ee, ok := err.(*exec.ExitError); ok && len(ee.Stderr) > 0 {
+ err = errors.New(string(ee.Stderr))
+ }
+ return
+ }
+
+ exports := strings.Split(string(bytes.TrimSpace(output)), "\n")
+ if len(exports) != 1 {
+ err = fmt.Errorf("go list reported %d exports; expected 1", len(exports))
+ return
+ }
+
+ exportPath = exports[0]
+ })
+
+ return exportPath, err
+ })
+ }
+
+ return f.(func() (string, error))()
+}
diff --git a/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go b/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go
index e6c5d51f..3dbd21d1 100644
--- a/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go
+++ b/vendor/golang.org/x/tools/internal/gcimporter/gcimporter.go
@@ -23,17 +23,11 @@ package gcimporter // import "golang.org/x/tools/internal/gcimporter"
import (
"bufio"
- "bytes"
"fmt"
- "go/build"
"go/token"
"go/types"
"io"
"os"
- "os/exec"
- "path/filepath"
- "strings"
- "sync"
)
const (
@@ -45,125 +39,14 @@ const (
trace = false
)
-var exportMap sync.Map // package dir → func() (string, bool)
-
-// lookupGorootExport returns the location of the export data
-// (normally found in the build cache, but located in GOROOT/pkg
-// in prior Go releases) for the package located in pkgDir.
-//
-// (We use the package's directory instead of its import path
-// mainly to simplify handling of the packages in src/vendor
-// and cmd/vendor.)
-func lookupGorootExport(pkgDir string) (string, bool) {
- f, ok := exportMap.Load(pkgDir)
- if !ok {
- var (
- listOnce sync.Once
- exportPath string
- )
- f, _ = exportMap.LoadOrStore(pkgDir, func() (string, bool) {
- listOnce.Do(func() {
- cmd := exec.Command("go", "list", "-export", "-f", "{{.Export}}", pkgDir)
- cmd.Dir = build.Default.GOROOT
- var output []byte
- output, err := cmd.Output()
- if err != nil {
- return
- }
-
- exports := strings.Split(string(bytes.TrimSpace(output)), "\n")
- if len(exports) != 1 {
- return
- }
-
- exportPath = exports[0]
- })
-
- return exportPath, exportPath != ""
- })
- }
-
- return f.(func() (string, bool))()
-}
-
-var pkgExts = [...]string{".a", ".o"}
-
-// FindPkg returns the filename and unique package id for an import
-// path based on package information provided by build.Import (using
-// the build.Default build.Context). A relative srcDir is interpreted
-// relative to the current working directory.
-// If no file was found, an empty filename is returned.
-func FindPkg(path, srcDir string) (filename, id string) {
- if path == "" {
- return
- }
-
- var noext string
- switch {
- default:
- // "x" -> "$GOPATH/pkg/$GOOS_$GOARCH/x.ext", "x"
- // Don't require the source files to be present.
- if abs, err := filepath.Abs(srcDir); err == nil { // see issue 14282
- srcDir = abs
- }
- bp, _ := build.Import(path, srcDir, build.FindOnly|build.AllowBinary)
- if bp.PkgObj == "" {
- var ok bool
- if bp.Goroot && bp.Dir != "" {
- filename, ok = lookupGorootExport(bp.Dir)
- }
- if !ok {
- id = path // make sure we have an id to print in error message
- return
- }
- } else {
- noext = strings.TrimSuffix(bp.PkgObj, ".a")
- id = bp.ImportPath
- }
-
- case build.IsLocalImport(path):
- // "./x" -> "/this/directory/x.ext", "/this/directory/x"
- noext = filepath.Join(srcDir, path)
- id = noext
-
- case filepath.IsAbs(path):
- // for completeness only - go/build.Import
- // does not support absolute imports
- // "/x" -> "/x.ext", "/x"
- noext = path
- id = path
- }
-
- if false { // for debugging
- if path != id {
- fmt.Printf("%s -> %s\n", path, id)
- }
- }
-
- if filename != "" {
- if f, err := os.Stat(filename); err == nil && !f.IsDir() {
- return
- }
- }
-
- // try extensions
- for _, ext := range pkgExts {
- filename = noext + ext
- if f, err := os.Stat(filename); err == nil && !f.IsDir() {
- return
- }
- }
-
- filename = "" // not found
- return
-}
-
// Import imports a gc-generated package given its import path and srcDir, adds
// the corresponding package object to the packages map, and returns the object.
// The packages map must contain all packages already imported.
-func Import(packages map[string]*types.Package, path, srcDir string, lookup func(path string) (io.ReadCloser, error)) (pkg *types.Package, err error) {
+//
+// Import is only used in tests.
+func Import(fset *token.FileSet, packages map[string]*types.Package, path, srcDir string, lookup func(path string) (io.ReadCloser, error)) (pkg *types.Package, err error) {
var rc io.ReadCloser
- var filename, id string
+ var id string
if lookup != nil {
// With custom lookup specified, assume that caller has
// converted path to a canonical import path for use in the map.
@@ -182,12 +65,13 @@ func Import(packages map[string]*types.Package, path, srcDir string, lookup func
}
rc = f
} else {
- filename, id = FindPkg(path, srcDir)
+ var filename string
+ filename, id, err = FindPkg(path, srcDir)
if filename == "" {
if path == "unsafe" {
return types.Unsafe, nil
}
- return nil, fmt.Errorf("can't find import: %q", id)
+ return nil, err
}
// no need to re-import if the package was imported completely before
@@ -210,62 +94,15 @@ func Import(packages map[string]*types.Package, path, srcDir string, lookup func
}
defer rc.Close()
- var hdr string
- var size int64
buf := bufio.NewReader(rc)
- if hdr, size, err = FindExportData(buf); err != nil {
+ data, err := ReadUnified(buf)
+ if err != nil {
+ err = fmt.Errorf("import %q: %v", path, err)
return
}
- switch hdr {
- case "$$B\n":
- var data []byte
- data, err = io.ReadAll(buf)
- if err != nil {
- break
- }
-
- // TODO(gri): allow clients of go/importer to provide a FileSet.
- // Or, define a new standard go/types/gcexportdata package.
- fset := token.NewFileSet()
-
- // Select appropriate importer.
- if len(data) > 0 {
- switch data[0] {
- case 'v', 'c', 'd':
- // binary: emitted by cmd/compile till go1.10; obsolete.
- return nil, fmt.Errorf("binary (%c) import format is no longer supported", data[0])
-
- case 'i':
- // indexed: emitted by cmd/compile till go1.19;
- // now used only for serializing go/types.
- // See https://github.com/golang/go/issues/69491.
- _, pkg, err := IImportData(fset, packages, data[1:], id)
- return pkg, err
-
- case 'u':
- // unified: emitted by cmd/compile since go1.20.
- _, pkg, err := UImportData(fset, packages, data[1:size], id)
- return pkg, err
-
- default:
- l := len(data)
- if l > 10 {
- l = 10
- }
- return nil, fmt.Errorf("unexpected export data with prefix %q for path %s", string(data[:l]), id)
- }
- }
-
- default:
- err = fmt.Errorf("unknown export data header: %q", hdr)
- }
+ // unified: emitted by cmd/compile since go1.20.
+ _, pkg, err = UImportData(fset, packages, data, id)
return
}
-
-type byPath []*types.Package
-
-func (a byPath) Len() int { return len(a) }
-func (a byPath) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
-func (a byPath) Less(i, j int) bool { return a[i].Path() < a[j].Path() }
diff --git a/vendor/golang.org/x/tools/internal/gcimporter/iexport.go b/vendor/golang.org/x/tools/internal/gcimporter/iexport.go
index 7dfc31a3..780873e3 100644
--- a/vendor/golang.org/x/tools/internal/gcimporter/iexport.go
+++ b/vendor/golang.org/x/tools/internal/gcimporter/iexport.go
@@ -236,6 +236,7 @@ import (
"io"
"math/big"
"reflect"
+ "slices"
"sort"
"strconv"
"strings"
@@ -271,10 +272,10 @@ import (
// file system, be sure to include a cryptographic digest of the executable in
// the key to avoid version skew.
//
-// If the provided reportf func is non-nil, it will be used for reporting bugs
-// encountered during export.
-// TODO(rfindley): remove reportf when we are confident enough in the new
-// objectpath encoding.
+// If the provided reportf func is non-nil, it is used for reporting
+// bugs (e.g. recovered panics) encountered during export, enabling us
+// to obtain via telemetry the stack that would otherwise be lost by
+// merely returning an error.
func IExportShallow(fset *token.FileSet, pkg *types.Package, reportf ReportFunc) ([]byte, error) {
// In principle this operation can only fail if out.Write fails,
// but that's impossible for bytes.Buffer---and as a matter of
@@ -283,7 +284,7 @@ func IExportShallow(fset *token.FileSet, pkg *types.Package, reportf ReportFunc)
// TODO(adonovan): use byte slices throughout, avoiding copying.
const bundle, shallow = false, true
var out bytes.Buffer
- err := iexportCommon(&out, fset, bundle, shallow, iexportVersion, []*types.Package{pkg})
+ err := iexportCommon(&out, fset, bundle, shallow, iexportVersion, []*types.Package{pkg}, reportf)
return out.Bytes(), err
}
@@ -310,7 +311,7 @@ func IImportShallow(fset *token.FileSet, getPackages GetPackagesFunc, data []byt
}
// ReportFunc is the type of a function used to report formatted bugs.
-type ReportFunc = func(string, ...interface{})
+type ReportFunc = func(string, ...any)
// Current bundled export format version. Increase with each format change.
// 0: initial implementation
@@ -323,20 +324,27 @@ const bundleVersion = 0
// so that calls to IImportData can override with a provided package path.
func IExportData(out io.Writer, fset *token.FileSet, pkg *types.Package) error {
const bundle, shallow = false, false
- return iexportCommon(out, fset, bundle, shallow, iexportVersion, []*types.Package{pkg})
+ return iexportCommon(out, fset, bundle, shallow, iexportVersion, []*types.Package{pkg}, nil)
}
// IExportBundle writes an indexed export bundle for pkgs to out.
func IExportBundle(out io.Writer, fset *token.FileSet, pkgs []*types.Package) error {
const bundle, shallow = true, false
- return iexportCommon(out, fset, bundle, shallow, iexportVersion, pkgs)
+ return iexportCommon(out, fset, bundle, shallow, iexportVersion, pkgs, nil)
}
-func iexportCommon(out io.Writer, fset *token.FileSet, bundle, shallow bool, version int, pkgs []*types.Package) (err error) {
+func iexportCommon(out io.Writer, fset *token.FileSet, bundle, shallow bool, version int, pkgs []*types.Package, reportf ReportFunc) (err error) {
if !debug {
defer func() {
if e := recover(); e != nil {
+ // Report the stack via telemetry (see #71067).
+ if reportf != nil {
+ reportf("panic in exporter")
+ }
if ierr, ok := e.(internalError); ok {
+ // internalError usually means we exported a
+ // bad go/types data structure: a violation
+ // of an implicit precondition of Export.
err = ierr
return
}
@@ -458,7 +466,7 @@ func (p *iexporter) encodeFile(w *intWriter, file *token.File, needed []uint64)
w.uint64(size)
// Sort the set of needed offsets. Duplicates are harmless.
- sort.Slice(needed, func(i, j int) bool { return needed[i] < needed[j] })
+ slices.Sort(needed)
lines := file.Lines() // byte offset of each line start
w.uint64(uint64(len(lines)))
@@ -597,7 +605,7 @@ type filePositions struct {
needed []uint64 // unordered list of needed file offsets
}
-func (p *iexporter) trace(format string, args ...interface{}) {
+func (p *iexporter) trace(format string, args ...any) {
if !trace {
// Call sites should also be guarded, but having this check here allows
// easily enabling/disabling debug trace statements.
@@ -812,7 +820,7 @@ func (p *iexporter) doDecl(obj types.Object) {
n := named.NumMethods()
w.uint64(uint64(n))
- for i := 0; i < n; i++ {
+ for i := range n {
m := named.Method(i)
w.pos(m.Pos())
w.string(m.Name())
@@ -1089,7 +1097,7 @@ func (w *exportWriter) doTyp(t types.Type, pkg *types.Package) {
w.pkg(fieldPkg)
w.uint64(uint64(n))
- for i := 0; i < n; i++ {
+ for i := range n {
f := t.Field(i)
if w.p.shallow {
w.objectPath(f)
@@ -1138,7 +1146,7 @@ func (w *exportWriter) doTyp(t types.Type, pkg *types.Package) {
w.startType(unionType)
nt := t.Len()
w.uint64(uint64(nt))
- for i := 0; i < nt; i++ {
+ for i := range nt {
term := t.Term(i)
w.bool(term.Tilde())
w.typ(term.Type(), pkg)
@@ -1267,7 +1275,7 @@ func tparamName(exportName string) string {
func (w *exportWriter) paramList(tup *types.Tuple) {
n := tup.Len()
w.uint64(uint64(n))
- for i := 0; i < n; i++ {
+ for i := range n {
w.param(tup.At(i))
}
}
@@ -1583,6 +1591,6 @@ func (e internalError) Error() string { return "gcimporter: " + string(e) }
// "internalErrorf" as the former is used for bugs, whose cause is
// internal inconsistency, whereas the latter is used for ordinary
// situations like bad input, whose cause is external.
-func internalErrorf(format string, args ...interface{}) error {
+func internalErrorf(format string, args ...any) error {
return internalError(fmt.Sprintf(format, args...))
}
diff --git a/vendor/golang.org/x/tools/internal/gcimporter/iimport.go b/vendor/golang.org/x/tools/internal/gcimporter/iimport.go
index e260c0e8..82e6c9d2 100644
--- a/vendor/golang.org/x/tools/internal/gcimporter/iimport.go
+++ b/vendor/golang.org/x/tools/internal/gcimporter/iimport.go
@@ -5,8 +5,6 @@
// Indexed package import.
// See iexport.go for the export data format.
-// This file is a copy of $GOROOT/src/go/internal/gcimporter/iimport.go.
-
package gcimporter
import (
@@ -18,6 +16,7 @@ import (
"go/types"
"io"
"math/big"
+ "slices"
"sort"
"strings"
@@ -316,7 +315,7 @@ func iimportCommon(fset *token.FileSet, getPackages GetPackagesFunc, data []byte
pkgs = pkgList[:1]
// record all referenced packages as imports
- list := append(([]*types.Package)(nil), pkgList[1:]...)
+ list := slices.Clone(pkgList[1:])
sort.Sort(byPath(list))
pkgs[0].SetImports(list)
}
@@ -402,7 +401,7 @@ type iimporter struct {
indent int // for tracing support
}
-func (p *iimporter) trace(format string, args ...interface{}) {
+func (p *iimporter) trace(format string, args ...any) {
if !trace {
// Call sites should also be guarded, but having this check here allows
// easily enabling/disabling debug trace statements.
@@ -673,7 +672,9 @@ func (r *importReader) obj(name string) {
case varTag:
typ := r.typ()
- r.declare(types.NewVar(pos, r.currPkg, name, typ))
+ v := types.NewVar(pos, r.currPkg, name, typ)
+ typesinternal.SetVarKind(v, typesinternal.PackageVar)
+ r.declare(v)
default:
errorf("unexpected tag: %v", tag)
@@ -1111,3 +1112,9 @@ func (r *importReader) byte() byte {
}
return x
}
+
+type byPath []*types.Package
+
+func (a byPath) Len() int { return len(a) }
+func (a byPath) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+func (a byPath) Less(i, j int) bool { return a[i].Path() < a[j].Path() }
diff --git a/vendor/golang.org/x/tools/internal/gcimporter/support.go b/vendor/golang.org/x/tools/internal/gcimporter/support.go
new file mode 100644
index 00000000..4af810dc
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/gcimporter/support.go
@@ -0,0 +1,30 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gcimporter
+
+import (
+ "bufio"
+ "io"
+ "strconv"
+ "strings"
+)
+
+// Copy of $GOROOT/src/cmd/internal/archive.ReadHeader.
+func readArchiveHeader(b *bufio.Reader, name string) int {
+ // architecture-independent object file output
+ const HeaderSize = 60
+
+ var buf [HeaderSize]byte
+ if _, err := io.ReadFull(b, buf[:]); err != nil {
+ return -1
+ }
+ aname := strings.Trim(string(buf[0:16]), " ")
+ if !strings.HasPrefix(aname, name) {
+ return -1
+ }
+ asize := strings.Trim(string(buf[48:58]), " ")
+ i, _ := strconv.Atoi(asize)
+ return i
+}
diff --git a/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go b/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go
index 1db40861..37b4a39e 100644
--- a/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go
+++ b/vendor/golang.org/x/tools/internal/gcimporter/ureader_yes.go
@@ -11,10 +11,10 @@ import (
"go/token"
"go/types"
"sort"
- "strings"
"golang.org/x/tools/internal/aliases"
"golang.org/x/tools/internal/pkgbits"
+ "golang.org/x/tools/internal/typesinternal"
)
// A pkgReader holds the shared state for reading a unified IR package
@@ -71,7 +71,6 @@ func UImportData(fset *token.FileSet, imports map[string]*types.Package, data []
}
s := string(data)
- s = s[:strings.LastIndex(s, "\n$$\n")]
input := pkgbits.NewPkgDecoder(path, s)
pkg = readUnifiedPackage(fset, nil, imports, input)
return
@@ -266,7 +265,12 @@ func (pr *pkgReader) pkgIdx(idx pkgbits.Index) *types.Package {
func (r *reader) doPkg() *types.Package {
path := r.String()
switch path {
- case "":
+ // cmd/compile emits path="main" for main packages because
+ // that's the linker symbol prefix it used; but we need
+ // the package's path as it would be reported by go list,
+ // hence "main" below.
+ // See test at go/packages.TestMainPackagePathInModeTypes.
+ case "", "main":
path = r.p.PkgPath()
case "builtin":
return nil // universe
@@ -569,7 +573,8 @@ func (pr *pkgReader) objIdx(idx pkgbits.Index) (*types.Package, string) {
sig := fn.Type().(*types.Signature)
recv := types.NewVar(fn.Pos(), fn.Pkg(), "", named)
- methods[i] = types.NewFunc(fn.Pos(), fn.Pkg(), fn.Name(), types.NewSignature(recv, sig.Params(), sig.Results(), sig.Variadic()))
+ typesinternal.SetVarKind(recv, typesinternal.RecvVar)
+ methods[i] = types.NewFunc(fn.Pos(), fn.Pkg(), fn.Name(), types.NewSignatureType(recv, nil, nil, sig.Params(), sig.Results(), sig.Variadic()))
}
embeds := make([]types.Type, iface.NumEmbeddeds())
@@ -616,7 +621,9 @@ func (pr *pkgReader) objIdx(idx pkgbits.Index) (*types.Package, string) {
case pkgbits.ObjVar:
pos := r.pos()
typ := r.typ()
- declare(types.NewVar(pos, objPkg, objName, typ))
+ v := types.NewVar(pos, objPkg, objName, typ)
+ typesinternal.SetVarKind(v, typesinternal.PackageVar)
+ declare(v)
}
}
diff --git a/vendor/golang.org/x/tools/internal/gocommand/invoke.go b/vendor/golang.org/x/tools/internal/gocommand/invoke.go
index e333efc8..58721202 100644
--- a/vendor/golang.org/x/tools/internal/gocommand/invoke.go
+++ b/vendor/golang.org/x/tools/internal/gocommand/invoke.go
@@ -28,7 +28,7 @@ import (
"golang.org/x/tools/internal/event/label"
)
-// An Runner will run go command invocations and serialize
+// A Runner will run go command invocations and serialize
// them if it sees a concurrency error.
type Runner struct {
// once guards the runner initialization.
@@ -141,7 +141,7 @@ func (runner *Runner) runPiped(ctx context.Context, inv Invocation, stdout, stde
// Wait for all in-progress go commands to return before proceeding,
// to avoid load concurrency errors.
- for i := 0; i < maxInFlight; i++ {
+ for range maxInFlight {
select {
case <-ctx.Done():
return ctx.Err(), ctx.Err()
@@ -179,7 +179,7 @@ type Invocation struct {
CleanEnv bool
Env []string
WorkingDir string
- Logf func(format string, args ...interface{})
+ Logf func(format string, args ...any)
}
// Postcondition: both error results have same nilness.
@@ -388,7 +388,9 @@ func runCmdContext(ctx context.Context, cmd *exec.Cmd) (err error) {
case err := <-resChan:
return err
case <-timer.C:
- HandleHangingGoCommand(startTime, cmd)
+ // HandleHangingGoCommand terminates this process.
+ // Pass off resChan in case we can collect the command error.
+ handleHangingGoCommand(startTime, cmd, resChan)
case <-ctx.Done():
}
} else {
@@ -413,8 +415,6 @@ func runCmdContext(ctx context.Context, cmd *exec.Cmd) (err error) {
}
// Didn't shut down in response to interrupt. Kill it hard.
- // TODO(rfindley): per advice from bcmills@, it may be better to send SIGQUIT
- // on certain platforms, such as unix.
if err := cmd.Process.Kill(); err != nil && !errors.Is(err, os.ErrProcessDone) && debug {
log.Printf("error killing the Go command: %v", err)
}
@@ -422,15 +422,17 @@ func runCmdContext(ctx context.Context, cmd *exec.Cmd) (err error) {
return <-resChan
}
-func HandleHangingGoCommand(start time.Time, cmd *exec.Cmd) {
+// handleHangingGoCommand outputs debugging information to help diagnose the
+// cause of a hanging Go command, and then exits with log.Fatalf.
+func handleHangingGoCommand(start time.Time, cmd *exec.Cmd, resChan chan error) {
switch runtime.GOOS {
- case "linux", "darwin", "freebsd", "netbsd":
+ case "linux", "darwin", "freebsd", "netbsd", "openbsd":
fmt.Fprintln(os.Stderr, `DETECTED A HANGING GO COMMAND
-The gopls test runner has detected a hanging go command. In order to debug
-this, the output of ps and lsof/fstat is printed below.
+ The gopls test runner has detected a hanging go command. In order to debug
+ this, the output of ps and lsof/fstat is printed below.
-See golang/go#54461 for more details.`)
+ See golang/go#54461 for more details.`)
fmt.Fprintln(os.Stderr, "\nps axo ppid,pid,command:")
fmt.Fprintln(os.Stderr, "-------------------------")
@@ -438,7 +440,7 @@ See golang/go#54461 for more details.`)
psCmd.Stdout = os.Stderr
psCmd.Stderr = os.Stderr
if err := psCmd.Run(); err != nil {
- panic(fmt.Sprintf("running ps: %v", err))
+ log.Printf("Handling hanging Go command: running ps: %v", err)
}
listFiles := "lsof"
@@ -452,10 +454,24 @@ See golang/go#54461 for more details.`)
listFilesCmd.Stdout = os.Stderr
listFilesCmd.Stderr = os.Stderr
if err := listFilesCmd.Run(); err != nil {
- panic(fmt.Sprintf("running %s: %v", listFiles, err))
+ log.Printf("Handling hanging Go command: running %s: %v", listFiles, err)
+ }
+ // Try to extract information about the slow go process by issuing a SIGQUIT.
+ if err := cmd.Process.Signal(sigStuckProcess); err == nil {
+ select {
+ case err := <-resChan:
+ stderr := "not a bytes.Buffer"
+ if buf, _ := cmd.Stderr.(*bytes.Buffer); buf != nil {
+ stderr = buf.String()
+ }
+ log.Printf("Quit hanging go command:\n\terr:%v\n\tstderr:\n%v\n\n", err, stderr)
+ case <-time.After(5 * time.Second):
+ }
+ } else {
+ log.Printf("Sending signal %d to hanging go command: %v", sigStuckProcess, err)
}
}
- panic(fmt.Sprintf("detected hanging go command (golang/go#54461); waited %s\n\tcommand:%s\n\tpid:%d", time.Since(start), cmd, cmd.Process.Pid))
+ log.Fatalf("detected hanging go command (golang/go#54461); waited %s\n\tcommand:%s\n\tpid:%d", time.Since(start), cmd, cmd.Process.Pid)
}
func cmdDebugStr(cmd *exec.Cmd) string {
diff --git a/vendor/golang.org/x/tools/internal/gocommand/invoke_notunix.go b/vendor/golang.org/x/tools/internal/gocommand/invoke_notunix.go
new file mode 100644
index 00000000..469c648e
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/gocommand/invoke_notunix.go
@@ -0,0 +1,13 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !unix
+
+package gocommand
+
+import "os"
+
+// sigStuckProcess is the signal to send to kill a hanging subprocess.
+// On Unix we send SIGQUIT, but on non-Unix we only have os.Kill.
+var sigStuckProcess = os.Kill
diff --git a/vendor/golang.org/x/tools/internal/gocommand/invoke_unix.go b/vendor/golang.org/x/tools/internal/gocommand/invoke_unix.go
new file mode 100644
index 00000000..169d37c8
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/gocommand/invoke_unix.go
@@ -0,0 +1,13 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build unix
+
+package gocommand
+
+import "syscall"
+
+// Sigstuckprocess is the signal to send to kill a hanging subprocess.
+// Send SIGQUIT to get a stack trace.
+var sigStuckProcess = syscall.SIGQUIT
diff --git a/vendor/golang.org/x/tools/internal/packagesinternal/packages.go b/vendor/golang.org/x/tools/internal/packagesinternal/packages.go
index 44719de1..25ebab66 100644
--- a/vendor/golang.org/x/tools/internal/packagesinternal/packages.go
+++ b/vendor/golang.org/x/tools/internal/packagesinternal/packages.go
@@ -5,8 +5,7 @@
// Package packagesinternal exposes internal-only fields from go/packages.
package packagesinternal
-var GetForTest = func(p interface{}) string { return "" }
-var GetDepsErrors = func(p interface{}) []*PackageError { return nil }
+var GetDepsErrors = func(p any) []*PackageError { return nil }
type PackageError struct {
ImportStack []string // shortest path from package named on command line to this one
@@ -16,7 +15,6 @@ type PackageError struct {
var TypecheckCgo int
var DepsErrors int // must be set as a LoadMode to call GetDepsErrors
-var ForTest int // must be set as a LoadMode to call GetForTest
-var SetModFlag = func(config interface{}, value string) {}
-var SetModFile = func(config interface{}, value string) {}
+var SetModFlag = func(config any, value string) {}
+var SetModFile = func(config any, value string) {}
diff --git a/vendor/golang.org/x/tools/internal/pkgbits/decoder.go b/vendor/golang.org/x/tools/internal/pkgbits/decoder.go
index f6cb37c5..c0aba26c 100644
--- a/vendor/golang.org/x/tools/internal/pkgbits/decoder.go
+++ b/vendor/golang.org/x/tools/internal/pkgbits/decoder.go
@@ -259,7 +259,7 @@ func (r *Decoder) rawUvarint() uint64 {
func readUvarint(r *strings.Reader) (uint64, error) {
var x uint64
var s uint
- for i := 0; i < binary.MaxVarintLen64; i++ {
+ for i := range binary.MaxVarintLen64 {
b, err := r.ReadByte()
if err != nil {
if i > 0 && err == io.EOF {
diff --git a/vendor/golang.org/x/tools/internal/stdlib/deps.go b/vendor/golang.org/x/tools/internal/stdlib/deps.go
new file mode 100644
index 00000000..c50bf406
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/stdlib/deps.go
@@ -0,0 +1,359 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Code generated by generate.go. DO NOT EDIT.
+
+package stdlib
+
+type pkginfo struct {
+ name string
+ deps string // list of indices of dependencies, as varint-encoded deltas
+}
+
+var deps = [...]pkginfo{
+ {"archive/tar", "\x03j\x03E6\x01\v\x01\"\x01\x01\x02\x05\n\x02\x01\x02\x02\v"},
+ {"archive/zip", "\x02\x04`\a\x16\x0206\x01*\x05\x01\x11\x03\x02\r\x04"},
+ {"bufio", "\x03j~E\x13"},
+ {"bytes", "m+S\x03\fG\x02\x02"},
+ {"cmp", ""},
+ {"compress/bzip2", "\x02\x02\xe7\x01B"},
+ {"compress/flate", "\x02k\x03{\r\x024\x01\x03"},
+ {"compress/gzip", "\x02\x04`\a\x03\x15fT"},
+ {"compress/lzw", "\x02k\x03{"},
+ {"compress/zlib", "\x02\x04`\a\x03\x13\x01g"},
+ {"container/heap", "\xae\x02"},
+ {"container/list", ""},
+ {"container/ring", ""},
+ {"context", "m\\i\x01\f"},
+ {"crypto", "\x83\x01hD"},
+ {"crypto/aes", "\x10\n\a\x8e\x02"},
+ {"crypto/cipher", "\x03\x1e\x01\x01\x1d\x11\x1c,R"},
+ {"crypto/des", "\x10\x13\x1d-,\x96\x01\x03"},
+ {"crypto/dsa", "@\x04)~\x0e"},
+ {"crypto/ecdh", "\x03\v\f\x0e\x04\x14\x04\r\x1c~"},
+ {"crypto/ecdsa", "\x0e\x05\x03\x04\x01\x0e\x16\x01\x04\f\x01\x1c~\x0e\x04K\x01"},
+ {"crypto/ed25519", "\x0e\x1c\x16\n\a\x1c~D"},
+ {"crypto/elliptic", "0=~\x0e9"},
+ {"crypto/fips140", " \x05\x90\x01"},
+ {"crypto/hkdf", "-\x12\x01-\x16"},
+ {"crypto/hmac", "\x1a\x14\x11\x01\x112"},
+ {"crypto/internal/boring", "\x0e\x02\rf"},
+ {"crypto/internal/boring/bbig", "\x1a\xdf\x01L"},
+ {"crypto/internal/boring/bcache", "\xb3\x02\x12"},
+ {"crypto/internal/boring/sig", ""},
+ {"crypto/internal/cryptotest", "\x03\r\n)\x0e\x19\x06\x13\x12#\a\t\x11\x12\x11\x1a\r\r\x05\n"},
+ {"crypto/internal/entropy", "E"},
+ {"crypto/internal/fips140", ">/~8\r\x15"},
+ {"crypto/internal/fips140/aes", "\x03\x1d\x03\x02\x13\x04\x01\x01\x05*\x8d\x015"},
+ {"crypto/internal/fips140/aes/gcm", " \x01\x02\x02\x02\x11\x04\x01\x06*\x8b\x01"},
+ {"crypto/internal/fips140/alias", "\xc5\x02"},
+ {"crypto/internal/fips140/bigmod", "%\x17\x01\x06*\x8d\x01"},
+ {"crypto/internal/fips140/check", " \x0e\x06\b\x02\xad\x01Z"},
+ {"crypto/internal/fips140/check/checktest", "%\xfe\x01\""},
+ {"crypto/internal/fips140/drbg", "\x03\x1c\x01\x01\x04\x13\x04\b\x01(~\x0f8"},
+ {"crypto/internal/fips140/ecdh", "\x03\x1d\x05\x02\t\f1~\x0f8"},
+ {"crypto/internal/fips140/ecdsa", "\x03\x1d\x04\x01\x02\a\x02\x067~G"},
+ {"crypto/internal/fips140/ed25519", "\x03\x1d\x05\x02\x04\v7\xc2\x01\x03"},
+ {"crypto/internal/fips140/edwards25519", "%\a\f\x041\x8d\x018"},
+ {"crypto/internal/fips140/edwards25519/field", "%\x13\x041\x8d\x01"},
+ {"crypto/internal/fips140/hkdf", "\x03\x1d\x05\t\x069"},
+ {"crypto/internal/fips140/hmac", "\x03\x1d\x14\x01\x017"},
+ {"crypto/internal/fips140/mlkem", "\x03\x1d\x05\x02\x0e\x03\x041"},
+ {"crypto/internal/fips140/nistec", "%\f\a\x041\x8d\x01)\x0f\x13"},
+ {"crypto/internal/fips140/nistec/fiat", "%\x135\x8d\x01"},
+ {"crypto/internal/fips140/pbkdf2", "\x03\x1d\x05\t\x069"},
+ {"crypto/internal/fips140/rsa", "\x03\x1d\x04\x01\x02\r\x01\x01\x025~G"},
+ {"crypto/internal/fips140/sha256", "\x03\x1d\x1c\x01\x06*\x8d\x01"},
+ {"crypto/internal/fips140/sha3", "\x03\x1d\x18\x04\x010\x8d\x01K"},
+ {"crypto/internal/fips140/sha512", "\x03\x1d\x1c\x01\x06*\x8d\x01"},
+ {"crypto/internal/fips140/ssh", " \x05"},
+ {"crypto/internal/fips140/subtle", "#"},
+ {"crypto/internal/fips140/tls12", "\x03\x1d\x05\t\x06\x027"},
+ {"crypto/internal/fips140/tls13", "\x03\x1d\x05\b\a\b1"},
+ {"crypto/internal/fips140deps", ""},
+ {"crypto/internal/fips140deps/byteorder", "\x99\x01"},
+ {"crypto/internal/fips140deps/cpu", "\xad\x01\a"},
+ {"crypto/internal/fips140deps/godebug", "\xb5\x01"},
+ {"crypto/internal/fips140hash", "5\x1a4\xc2\x01"},
+ {"crypto/internal/fips140only", "'\r\x01\x01M26"},
+ {"crypto/internal/fips140test", ""},
+ {"crypto/internal/hpke", "\x0e\x01\x01\x03\x1a\x1d#,aM"},
+ {"crypto/internal/impl", "\xb0\x02"},
+ {"crypto/internal/randutil", "\xeb\x01\x12"},
+ {"crypto/internal/sysrand", "mi\"\x1e\r\x0f\x01\x01\v\x06"},
+ {"crypto/internal/sysrand/internal/seccomp", "m"},
+ {"crypto/md5", "\x0e2-\x16\x16a"},
+ {"crypto/mlkem", "/"},
+ {"crypto/pbkdf2", "2\r\x01-\x16"},
+ {"crypto/rand", "\x1a\x06\a\x19\x04\x01(~\x0eL"},
+ {"crypto/rc4", "#\x1d-\xc2\x01"},
+ {"crypto/rsa", "\x0e\f\x01\t\x0f\f\x01\x04\x06\a\x1c\x03\x1326\r\x01"},
+ {"crypto/sha1", "\x0e\f&-\x16\x16\x14M"},
+ {"crypto/sha256", "\x0e\f\x1aO"},
+ {"crypto/sha3", "\x0e'N\xc2\x01"},
+ {"crypto/sha512", "\x0e\f\x1cM"},
+ {"crypto/subtle", "8\x96\x01U"},
+ {"crypto/tls", "\x03\b\x02\x01\x01\x01\x01\x02\x01\x01\x01\x03\x01\a\x01\v\x02\n\x01\b\x05\x03\x01\x01\x01\x01\x02\x01\x02\x01\x17\x02\x03\x13\x16\x14\b6\x16\x15\r\n\x01\x01\x01\x02\x01\f\x06\x02\x01"},
+ {"crypto/tls/internal/fips140tls", " \x93\x02"},
+ {"crypto/x509", "\x03\v\x01\x01\x01\x01\x01\x01\x01\x011\x03\x02\x01\x01\x02\x05\x0e\x06\x02\x02\x03E\x033\x01\x02\t\x01\x01\x01\a\x0f\x05\x01\x06\x02\x05\f\x01\x02\r\x02\x01\x01\x02\x03\x01"},
+ {"crypto/x509/pkix", "c\x06\a\x89\x01F"},
+ {"database/sql", "\x03\nJ\x16\x03{\f\x06!\x05\n\x02\x03\x01\f\x02\x02\x02"},
+ {"database/sql/driver", "\r`\x03\xae\x01\x11\x10"},
+ {"debug/buildinfo", "\x03W\x02\x01\x01\b\a\x03`\x19\x02\x01*\x0f "},
+ {"debug/dwarf", "\x03c\a\x03{0\x13\x01\x01"},
+ {"debug/elf", "\x03\x06P\r\a\x03`\x1a\x01+\x19\x01\x15"},
+ {"debug/gosym", "\x03c\n\xbe\x01\x01\x01\x02"},
+ {"debug/macho", "\x03\x06P\r\n`\x1b+\x19\x01"},
+ {"debug/pe", "\x03\x06P\r\a\x03`\x1b+\x19\x01\x15"},
+ {"debug/plan9obj", "f\a\x03`\x1b+"},
+ {"embed", "m+:\x19\x01S"},
+ {"embed/internal/embedtest", ""},
+ {"encoding", ""},
+ {"encoding/ascii85", "\xeb\x01D"},
+ {"encoding/asn1", "\x03j\x03\x88\x01\x01%\x0f\x02\x01\x0f\x03\x01"},
+ {"encoding/base32", "\xeb\x01B\x02"},
+ {"encoding/base64", "f\x85\x01B\x02"},
+ {"encoding/binary", "m~\r&\x0f\x05"},
+ {"encoding/csv", "\x02\x01j\x03{E\x11\x02"},
+ {"encoding/gob", "\x02_\x05\a\x03`\x1b\f\x01\x02\x1c\b\x14\x01\x0e\x02"},
+ {"encoding/hex", "m\x03{B\x03"},
+ {"encoding/json", "\x03\x01]\x04\b\x03{\r&\x0f\x02\x01\x02\x0f\x01\x01\x02"},
+ {"encoding/pem", "\x03b\b~B\x03"},
+ {"encoding/xml", "\x02\x01^\f\x03{3\x05\f\x01\x02\x0f\x02"},
+ {"errors", "\xc9\x01|"},
+ {"expvar", "jK:\t\n\x14\r\n\x02\x03\x01\x10"},
+ {"flag", "a\f\x03{+\b\x05\n\x02\x01\x0f"},
+ {"fmt", "mE9\r\x1e\b\x0f\x02\x03\x11"},
+ {"go/ast", "\x03\x01l\x0f\x01k\x03(\b\x0f\x02\x01"},
+ {"go/ast/internal/tests", ""},
+ {"go/build", "\x02\x01j\x03\x01\x03\x02\a\x02\x01\x17\x1e\x04\x02\t\x14\x13\x01*\x01\x04\x01\a\n\x02\x01\x11\x02\x02"},
+ {"go/build/constraint", "m\xc2\x01\x01\x11\x02"},
+ {"go/constant", "p\x10x\x01\x015\x01\x02\x11"},
+ {"go/doc", "\x04l\x01\x06\t=.0\x12\x02\x01\x11\x02"},
+ {"go/doc/comment", "\x03m\xbd\x01\x01\x01\x01\x11\x02"},
+ {"go/format", "\x03m\x01\f\x01\x02kE"},
+ {"go/importer", "s\a\x01\x01\x04\x01j8"},
+ {"go/internal/gccgoimporter", "\x02\x01W\x13\x03\x05\v\x01h\x02+\x01\x05\x13\x01\v\b"},
+ {"go/internal/gcimporter", "\x02n\x10\x01/\x05\x0e(+\x17\x03\x02"},
+ {"go/internal/srcimporter", "p\x01\x02\n\x03\x01j+\x01\x05\x14\x02\x13"},
+ {"go/parser", "\x03j\x03\x01\x03\v\x01k\x01*\x06\x14"},
+ {"go/printer", "p\x01\x03\x03\tk\r\x1e\x17\x02\x01\x02\n\x05\x02"},
+ {"go/scanner", "\x03m\x10k1\x12\x01\x12\x02"},
+ {"go/token", "\x04l\xbd\x01\x02\x03\x01\x0e\x02"},
+ {"go/types", "\x03\x01\x06c\x03\x01\x04\b\x03\x02\x15\x1e\x06,\x04\x03\n$\a\n\x01\x01\x01\x02\x01\x0e\x02\x02"},
+ {"go/version", "\xba\x01v"},
+ {"hash", "\xeb\x01"},
+ {"hash/adler32", "m\x16\x16"},
+ {"hash/crc32", "m\x16\x16\x14\x85\x01\x01\x12"},
+ {"hash/crc64", "m\x16\x16\x99\x01"},
+ {"hash/fnv", "m\x16\x16a"},
+ {"hash/maphash", "\x94\x01\x05\x1b\x03AM"},
+ {"html", "\xb0\x02\x02\x11"},
+ {"html/template", "\x03g\x06\x19,6\x01\v\x1f\x05\x01\x02\x03\x0e\x01\x02\v\x01\x03\x02"},
+ {"image", "\x02k\x1f_\x0f5\x03\x01"},
+ {"image/color", ""},
+ {"image/color/palette", "\x8c\x01"},
+ {"image/draw", "\x8b\x01\x01\x04"},
+ {"image/gif", "\x02\x01\x05e\x03\x1b\x01\x01\x01\vR"},
+ {"image/internal/imageutil", "\x8b\x01"},
+ {"image/jpeg", "\x02k\x1e\x01\x04["},
+ {"image/png", "\x02\a]\n\x13\x02\x06\x01_D"},
+ {"index/suffixarray", "\x03c\a~\r)\f\x01"},
+ {"internal/abi", "\xb4\x01\x91\x01"},
+ {"internal/asan", "\xc5\x02"},
+ {"internal/bisect", "\xa3\x02\x0f\x01"},
+ {"internal/buildcfg", "pG_\x06\x02\x05\f\x01"},
+ {"internal/bytealg", "\xad\x01\x98\x01"},
+ {"internal/byteorder", ""},
+ {"internal/cfg", ""},
+ {"internal/chacha8rand", "\x99\x01\x1b\x91\x01"},
+ {"internal/copyright", ""},
+ {"internal/coverage", ""},
+ {"internal/coverage/calloc", ""},
+ {"internal/coverage/cfile", "j\x06\x17\x16\x01\x02\x01\x01\x01\x01\x01\x01\x01#\x01 +\x06\a\f\x01\x03\f\x06"},
+ {"internal/coverage/cformat", "\x04l-\x04J\f6\x01\x02\f"},
+ {"internal/coverage/cmerge", "p-["},
+ {"internal/coverage/decodecounter", "f\n-\v\x02A+\x19\x16"},
+ {"internal/coverage/decodemeta", "\x02d\n\x17\x16\v\x02A+"},
+ {"internal/coverage/encodecounter", "\x02d\n-\f\x01\x02?\f\x1f\x17"},
+ {"internal/coverage/encodemeta", "\x02\x01c\n\x13\x04\x16\r\x02?+/"},
+ {"internal/coverage/pods", "\x04l-y\x06\x05\f\x02\x01"},
+ {"internal/coverage/rtcov", "\xc5\x02"},
+ {"internal/coverage/slicereader", "f\n{Z"},
+ {"internal/coverage/slicewriter", "p{"},
+ {"internal/coverage/stringtab", "p8\x04?"},
+ {"internal/coverage/test", ""},
+ {"internal/coverage/uleb128", ""},
+ {"internal/cpu", "\xc5\x02"},
+ {"internal/dag", "\x04l\xbd\x01\x03"},
+ {"internal/diff", "\x03m\xbe\x01\x02"},
+ {"internal/exportdata", "\x02\x01j\x03\x03]\x1b+\x01\x05\x13\x01\x02"},
+ {"internal/filepathlite", "m+:\x1aA"},
+ {"internal/fmtsort", "\x04\x9a\x02\x0f"},
+ {"internal/fuzz", "\x03\nA\x18\x04\x03\x03\x01\f\x0356\r\x02\x1c\x01\x05\x02\x05\f\x01\x02\x01\x01\v\x04\x02"},
+ {"internal/goarch", ""},
+ {"internal/godebug", "\x96\x01 |\x01\x12"},
+ {"internal/godebugs", ""},
+ {"internal/goexperiment", ""},
+ {"internal/goos", ""},
+ {"internal/goroot", "\x96\x02\x01\x05\x14\x02"},
+ {"internal/gover", "\x04"},
+ {"internal/goversion", ""},
+ {"internal/itoa", ""},
+ {"internal/lazyregexp", "\x96\x02\v\x0f\x02"},
+ {"internal/lazytemplate", "\xeb\x01+\x1a\x02\v"},
+ {"internal/msan", "\xc5\x02"},
+ {"internal/nettrace", ""},
+ {"internal/obscuretestdata", "e\x86\x01+"},
+ {"internal/oserror", "m"},
+ {"internal/pkgbits", "\x03K\x18\a\x03\x05\vk\x0e\x1d\r\f\x01"},
+ {"internal/platform", ""},
+ {"internal/poll", "mO\x1a\x158\x0f\x01\x01\v\x06"},
+ {"internal/profile", "\x03\x04f\x03{6\r\x01\x01\x0f"},
+ {"internal/profilerecord", ""},
+ {"internal/race", "\x94\x01\xb1\x01"},
+ {"internal/reflectlite", "\x94\x01 4;\""},
+ {"internal/runtime/atomic", "\xc5\x02"},
+ {"internal/runtime/exithook", "\xca\x01{"},
+ {"internal/runtime/maps", "\x94\x01\x01\x1f\v\t\x05\x01w"},
+ {"internal/runtime/math", "\xb4\x01"},
+ {"internal/runtime/sys", "\xb4\x01\x04"},
+ {"internal/runtime/syscall", "\xc5\x02"},
+ {"internal/saferio", "\xeb\x01Z"},
+ {"internal/singleflight", "\xb2\x02"},
+ {"internal/stringslite", "\x98\x01\xad\x01"},
+ {"internal/sync", "\x94\x01 \x14k\x12"},
+ {"internal/synctest", "\xc5\x02"},
+ {"internal/syscall/execenv", "\xb4\x02"},
+ {"internal/syscall/unix", "\xa3\x02\x10\x01\x11"},
+ {"internal/sysinfo", "\x02\x01\xaa\x01>+\x1a\x02"},
+ {"internal/syslist", ""},
+ {"internal/testenv", "\x03\n`\x02\x01*\x1a\x10(*\x01\x05\a\f\x01\x02\x02\x01\n"},
+ {"internal/testlog", "\xb2\x02\x01\x12"},
+ {"internal/testpty", "m\x03\xa6\x01"},
+ {"internal/trace", "\x02\x01\x01\x06\\\a\x03m\x01\x01\x06\x06\x03\n5\x01\x02\x0f"},
+ {"internal/trace/event", ""},
+ {"internal/trace/event/go122", "pm"},
+ {"internal/trace/internal/oldtrace", "\x03\x01b\a\x03m\b\x06\r5\x01"},
+ {"internal/trace/internal/testgen/go122", "\x03c\nl\x01\x01\x03\x04\x010\v\x0f"},
+ {"internal/trace/raw", "\x02d\nm\b\x06D\x01\x11"},
+ {"internal/trace/testtrace", "\x02\x01j\x03l\x05\x05\x056\f\x02\x01"},
+ {"internal/trace/traceviewer", "\x02]\v\x06\x1a<\x16\b\a\x04\t\n\x14\x01\x05\a\f\x01\x02\r"},
+ {"internal/trace/traceviewer/format", ""},
+ {"internal/trace/version", "pm\x01\r"},
+ {"internal/txtar", "\x03m\xa6\x01\x1a"},
+ {"internal/types/errors", "\xaf\x02"},
+ {"internal/unsafeheader", "\xc5\x02"},
+ {"internal/xcoff", "Y\r\a\x03`\x1b+\x19\x01"},
+ {"internal/zstd", "f\a\x03{\x0f"},
+ {"io", "m\xc5\x01"},
+ {"io/fs", "m+*)0\x12\x12\x04"},
+ {"io/ioutil", "\xeb\x01\x01*\x17\x03"},
+ {"iter", "\xc8\x01[\""},
+ {"log", "p{\x05&\r\x0f\x01\f"},
+ {"log/internal", ""},
+ {"log/slog", "\x03\nT\t\x03\x03{\x04\x01\x02\x02\x04&\x05\n\x02\x01\x02\x01\f\x02\x02\x02"},
+ {"log/slog/internal", ""},
+ {"log/slog/internal/benchmarks", "\r`\x03{\x06\x03;\x10"},
+ {"log/slog/internal/buffer", "\xb2\x02"},
+ {"log/slog/internal/slogtest", "\xf1\x01"},
+ {"log/syslog", "m\x03\x7f\x12\x15\x1a\x02\r"},
+ {"maps", "\xee\x01W"},
+ {"math", "\xad\x01MK"},
+ {"math/big", "\x03j\x03)\x14>\r\x02\x023\x01\x02\x13"},
+ {"math/bits", "\xc5\x02"},
+ {"math/cmplx", "\xf8\x01\x02"},
+ {"math/rand", "\xb5\x01C:\x01\x12"},
+ {"math/rand/v2", "m,\x02]\x02K"},
+ {"mime", "\x02\x01b\b\x03{\f\x1f\x17\x03\x02\x0f\x02"},
+ {"mime/multipart", "\x02\x01G#\x03E6\f\x01\x06\x02\x14\x02\x06\x11\x02\x01\x15"},
+ {"mime/quotedprintable", "\x02\x01m{"},
+ {"net", "\x04\t`+\x1d\a\x04\x05\f\x01\x04\x15\x01$\x06\r\n\x05\x01\x01\v\x06\a"},
+ {"net/http", "\x02\x01\x04\x04\x02=\b\x13\x01\a\x03E6\x01\x03\b\x01\x02\x02\x02\x01\x02\x06\x02\x01\n\x01\x01\x05\x01\x02\x05\n\x01\x01\x01\x02\x01\f\x02\x02\x02\b\x01\x01\x01"},
+ {"net/http/cgi", "\x02P\x1b\x03{\x04\b\n\x01\x12\x01\x01\x01\x04\x01\x05\x02\n\x02\x01\x0f\x0e"},
+ {"net/http/cookiejar", "\x04i\x03\x91\x01\x01\b\v\x18\x03\x02\r\x04"},
+ {"net/http/fcgi", "\x02\x01\nY\a\x03{\x16\x01\x01\x13\x1a\x02\r"},
+ {"net/http/httptest", "\x02\x01\nE\x02\x1b\x01{\x04\x12\x01\t\t\x02\x19\x01\x02\r\x0e"},
+ {"net/http/httptrace", "\rEnA\x13\n!"},
+ {"net/http/httputil", "\x02\x01\n`\x03{\x04\x0f\x03\x01\x05\x02\x01\n\x01\x1b\x02\r\x0e"},
+ {"net/http/internal", "\x02\x01j\x03{"},
+ {"net/http/internal/ascii", "\xb0\x02\x11"},
+ {"net/http/internal/testcert", "\xb0\x02"},
+ {"net/http/pprof", "\x02\x01\nc\x19,\x11%\x04\x13\x13\x01\r\x06\x03\x01\x02\x01\x0f"},
+ {"net/internal/cgotest", ""},
+ {"net/internal/socktest", "p\xc2\x01\x02"},
+ {"net/mail", "\x02k\x03{\x04\x0f\x03\x13\x1c\x02\r\x04"},
+ {"net/netip", "\x04i+\x01#<\x025\x15"},
+ {"net/rpc", "\x02f\x05\x03\x10\na\x04\x12\x01\x1c\x0f\x03\x02"},
+ {"net/rpc/jsonrpc", "j\x03\x03{\x16\x10!"},
+ {"net/smtp", "\x19.\v\x13\b\x03{\x16\x13\x1c"},
+ {"net/textproto", "\x02\x01j\x03{\r\t.\x01\x02\x13"},
+ {"net/url", "m\x03\x87\x01$\x12\x02\x01\x15"},
+ {"os", "m+\x01\x18\x03\b\t\r\x03\x01\x04\x11\x017\n\x05\x01\x01\v\x06"},
+ {"os/exec", "\x03\n`H \x01\x15\x01*\x06\a\f\x01\x04\v"},
+ {"os/exec/internal/fdtest", "\xb4\x02"},
+ {"os/signal", "\r\x89\x02\x17\x05\x02"},
+ {"os/user", "\x02\x01j\x03{+\r\f\x01\x02"},
+ {"path", "m+\xab\x01"},
+ {"path/filepath", "m+\x19;*\r\n\x03\x04\x0f"},
+ {"plugin", "m"},
+ {"reflect", "m'\x04\x1c\b\f\x04\x02\x1a\x06\n+\f\x03\x0f\x02\x02"},
+ {"reflect/internal/example1", ""},
+ {"reflect/internal/example2", ""},
+ {"regexp", "\x03\xe8\x017\v\x02\x01\x02\x0f\x02"},
+ {"regexp/syntax", "\xad\x02\x01\x01\x01\x11\x02"},
+ {"runtime", "\x94\x01\x04\x01\x02\f\x06\a\x02\x01\x01\x0f\x03\x01\x01\x01\x01\x01\x03s"},
+ {"runtime/coverage", "\x9f\x01L"},
+ {"runtime/debug", "pUQ\r\n\x02\x01\x0f\x06"},
+ {"runtime/internal/startlinetest", ""},
+ {"runtime/internal/wasitest", ""},
+ {"runtime/metrics", "\xb6\x01B+\""},
+ {"runtime/pprof", "\x02\x01\x01\x03\x06Y\a\x03$3$\r\x1e\r\n\x01\x01\x01\x02\x02\b\x03\x06"},
+ {"runtime/race", "\xab\x02"},
+ {"runtime/race/internal/amd64v1", ""},
+ {"runtime/trace", "\rc{8\x0f\x01\x12"},
+ {"slices", "\x04\xea\x01\fK"},
+ {"sort", "\xc9\x0113"},
+ {"strconv", "m+:&\x02I"},
+ {"strings", "m'\x04:\x19\x03\f8\x0f\x02\x02"},
+ {"structs", ""},
+ {"sync", "\xc8\x01\vP\x10\x12"},
+ {"sync/atomic", "\xc5\x02"},
+ {"syscall", "m(\x03\x01\x1b\b\x03\x03\x06\aT\x0f\x01\x12"},
+ {"testing", "\x03\n`\x02\x01G\x11\x0f\x14\r\x04\x1a\x06\x02\x05\x02\a\x01\x02\x01\x02\x01\f\x02\x02\x02"},
+ {"testing/fstest", "m\x03{\x01\v$\x12\x03\b\a"},
+ {"testing/internal/testdeps", "\x02\v\xa6\x01'\x11+\x03\x05\x03\b\a\x02\r"},
+ {"testing/iotest", "\x03j\x03{\x04"},
+ {"testing/quick", "o\x01\x88\x01\x04\"\x12\x0f"},
+ {"testing/slogtest", "\r`\x03\x81\x01-\x05\x12\n"},
+ {"text/scanner", "\x03m{++\x02"},
+ {"text/tabwriter", "p{X"},
+ {"text/template", "m\x03B9\x01\v\x1e\x01\x05\x01\x02\x05\r\x02\f\x03\x02"},
+ {"text/template/parse", "\x03m\xb3\x01\f\x01\x11\x02"},
+ {"time", "m+\x1d\x1d()\x0f\x02\x11"},
+ {"time/tzdata", "m\xc7\x01\x11"},
+ {"unicode", ""},
+ {"unicode/utf16", ""},
+ {"unicode/utf8", ""},
+ {"unique", "\x94\x01>\x01P\x0f\x13\x12"},
+ {"unsafe", ""},
+ {"vendor/golang.org/x/crypto/chacha20", "\x10V\a\x8d\x01)'"},
+ {"vendor/golang.org/x/crypto/chacha20poly1305", "\x10V\a\xd9\x01\x04\x01\a"},
+ {"vendor/golang.org/x/crypto/cryptobyte", "c\n\x03\x89\x01%!\n"},
+ {"vendor/golang.org/x/crypto/cryptobyte/asn1", ""},
+ {"vendor/golang.org/x/crypto/internal/alias", "\xc5\x02"},
+ {"vendor/golang.org/x/crypto/internal/poly1305", "Q\x15\x94\x01"},
+ {"vendor/golang.org/x/net/dns/dnsmessage", "m"},
+ {"vendor/golang.org/x/net/http/httpguts", "\x81\x02\x13\x1c\x13\r"},
+ {"vendor/golang.org/x/net/http/httpproxy", "m\x03\x91\x01\x0f\x05\x01\x1a\x13\r"},
+ {"vendor/golang.org/x/net/http2/hpack", "\x03j\x03{G"},
+ {"vendor/golang.org/x/net/idna", "p\x88\x018\x13\x10\x02\x01"},
+ {"vendor/golang.org/x/net/nettest", "\x03c\a\x03{\x11\x05\x15\x01\f\f\x01\x02\x02\x01\n"},
+ {"vendor/golang.org/x/sys/cpu", "\x96\x02\r\f\x01\x15"},
+ {"vendor/golang.org/x/text/secure/bidirule", "m\xd6\x01\x11\x01"},
+ {"vendor/golang.org/x/text/transform", "\x03j~X"},
+ {"vendor/golang.org/x/text/unicode/bidi", "\x03\be\x7f?\x15"},
+ {"vendor/golang.org/x/text/unicode/norm", "f\n{G\x11\x11"},
+ {"weak", "\x94\x01\x8f\x01\""},
+}
diff --git a/vendor/golang.org/x/tools/internal/stdlib/import.go b/vendor/golang.org/x/tools/internal/stdlib/import.go
new file mode 100644
index 00000000..f6909878
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/stdlib/import.go
@@ -0,0 +1,89 @@
+// Copyright 2025 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package stdlib
+
+// This file provides the API for the import graph of the standard library.
+//
+// Be aware that the compiler-generated code for every package
+// implicitly depends on package "runtime" and a handful of others
+// (see runtimePkgs in GOROOT/src/cmd/internal/objabi/pkgspecial.go).
+
+import (
+ "encoding/binary"
+ "iter"
+ "slices"
+ "strings"
+)
+
+// Imports returns the sequence of packages directly imported by the
+// named standard packages, in name order.
+// The imports of an unknown package are the empty set.
+//
+// The graph is built into the application and may differ from the
+// graph in the Go source tree being analyzed by the application.
+func Imports(pkgs ...string) iter.Seq[string] {
+ return func(yield func(string) bool) {
+ for _, pkg := range pkgs {
+ if i, ok := find(pkg); ok {
+ var depIndex uint64
+ for data := []byte(deps[i].deps); len(data) > 0; {
+ delta, n := binary.Uvarint(data)
+ depIndex += delta
+ if !yield(deps[depIndex].name) {
+ return
+ }
+ data = data[n:]
+ }
+ }
+ }
+ }
+}
+
+// Dependencies returns the set of all dependencies of the named
+// standard packages, including the initial package,
+// in a deterministic topological order.
+// The dependencies of an unknown package are the empty set.
+//
+// The graph is built into the application and may differ from the
+// graph in the Go source tree being analyzed by the application.
+func Dependencies(pkgs ...string) iter.Seq[string] {
+ return func(yield func(string) bool) {
+ for _, pkg := range pkgs {
+ if i, ok := find(pkg); ok {
+ var seen [1 + len(deps)/8]byte // bit set of seen packages
+ var visit func(i int) bool
+ visit = func(i int) bool {
+ bit := byte(1) << (i % 8)
+ if seen[i/8]&bit == 0 {
+ seen[i/8] |= bit
+ var depIndex uint64
+ for data := []byte(deps[i].deps); len(data) > 0; {
+ delta, n := binary.Uvarint(data)
+ depIndex += delta
+ if !visit(int(depIndex)) {
+ return false
+ }
+ data = data[n:]
+ }
+ if !yield(deps[i].name) {
+ return false
+ }
+ }
+ return true
+ }
+ if !visit(i) {
+ return
+ }
+ }
+ }
+ }
+}
+
+// find returns the index of pkg in the deps table.
+func find(pkg string) (int, bool) {
+ return slices.BinarySearchFunc(deps[:], pkg, func(p pkginfo, n string) int {
+ return strings.Compare(p.name, n)
+ })
+}
diff --git a/vendor/golang.org/x/tools/internal/stdlib/manifest.go b/vendor/golang.org/x/tools/internal/stdlib/manifest.go
index cdaac9ab..2b418796 100644
--- a/vendor/golang.org/x/tools/internal/stdlib/manifest.go
+++ b/vendor/golang.org/x/tools/internal/stdlib/manifest.go
@@ -1,4 +1,4 @@
-// Copyright 2024 The Go Authors. All rights reserved.
+// Copyright 2025 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
@@ -268,6 +268,8 @@ var PackageSymbols = map[string][]Symbol{
{"ErrTooLarge", Var, 0},
{"Fields", Func, 0},
{"FieldsFunc", Func, 0},
+ {"FieldsFuncSeq", Func, 24},
+ {"FieldsSeq", Func, 24},
{"HasPrefix", Func, 0},
{"HasSuffix", Func, 0},
{"Index", Func, 0},
@@ -280,6 +282,7 @@ var PackageSymbols = map[string][]Symbol{
{"LastIndexAny", Func, 0},
{"LastIndexByte", Func, 5},
{"LastIndexFunc", Func, 0},
+ {"Lines", Func, 24},
{"Map", Func, 0},
{"MinRead", Const, 0},
{"NewBuffer", Func, 0},
@@ -293,7 +296,9 @@ var PackageSymbols = map[string][]Symbol{
{"Split", Func, 0},
{"SplitAfter", Func, 0},
{"SplitAfterN", Func, 0},
+ {"SplitAfterSeq", Func, 24},
{"SplitN", Func, 0},
+ {"SplitSeq", Func, 24},
{"Title", Func, 0},
{"ToLower", Func, 0},
{"ToLowerSpecial", Func, 0},
@@ -535,6 +540,7 @@ var PackageSymbols = map[string][]Symbol{
{"NewCTR", Func, 0},
{"NewGCM", Func, 2},
{"NewGCMWithNonceSize", Func, 5},
+ {"NewGCMWithRandomNonce", Func, 24},
{"NewGCMWithTagSize", Func, 11},
{"NewOFB", Func, 0},
{"Stream", Type, 0},
@@ -673,6 +679,14 @@ var PackageSymbols = map[string][]Symbol{
{"Unmarshal", Func, 0},
{"UnmarshalCompressed", Func, 15},
},
+ "crypto/fips140": {
+ {"Enabled", Func, 24},
+ },
+ "crypto/hkdf": {
+ {"Expand", Func, 24},
+ {"Extract", Func, 24},
+ {"Key", Func, 24},
+ },
"crypto/hmac": {
{"Equal", Func, 1},
{"New", Func, 0},
@@ -683,11 +697,43 @@ var PackageSymbols = map[string][]Symbol{
{"Size", Const, 0},
{"Sum", Func, 2},
},
+ "crypto/mlkem": {
+ {"(*DecapsulationKey1024).Bytes", Method, 24},
+ {"(*DecapsulationKey1024).Decapsulate", Method, 24},
+ {"(*DecapsulationKey1024).EncapsulationKey", Method, 24},
+ {"(*DecapsulationKey768).Bytes", Method, 24},
+ {"(*DecapsulationKey768).Decapsulate", Method, 24},
+ {"(*DecapsulationKey768).EncapsulationKey", Method, 24},
+ {"(*EncapsulationKey1024).Bytes", Method, 24},
+ {"(*EncapsulationKey1024).Encapsulate", Method, 24},
+ {"(*EncapsulationKey768).Bytes", Method, 24},
+ {"(*EncapsulationKey768).Encapsulate", Method, 24},
+ {"CiphertextSize1024", Const, 24},
+ {"CiphertextSize768", Const, 24},
+ {"DecapsulationKey1024", Type, 24},
+ {"DecapsulationKey768", Type, 24},
+ {"EncapsulationKey1024", Type, 24},
+ {"EncapsulationKey768", Type, 24},
+ {"EncapsulationKeySize1024", Const, 24},
+ {"EncapsulationKeySize768", Const, 24},
+ {"GenerateKey1024", Func, 24},
+ {"GenerateKey768", Func, 24},
+ {"NewDecapsulationKey1024", Func, 24},
+ {"NewDecapsulationKey768", Func, 24},
+ {"NewEncapsulationKey1024", Func, 24},
+ {"NewEncapsulationKey768", Func, 24},
+ {"SeedSize", Const, 24},
+ {"SharedKeySize", Const, 24},
+ },
+ "crypto/pbkdf2": {
+ {"Key", Func, 24},
+ },
"crypto/rand": {
{"Int", Func, 0},
{"Prime", Func, 0},
{"Read", Func, 0},
{"Reader", Var, 0},
+ {"Text", Func, 24},
},
"crypto/rc4": {
{"(*Cipher).Reset", Method, 0},
@@ -766,6 +812,39 @@ var PackageSymbols = map[string][]Symbol{
{"Sum224", Func, 2},
{"Sum256", Func, 2},
},
+ "crypto/sha3": {
+ {"(*SHA3).AppendBinary", Method, 24},
+ {"(*SHA3).BlockSize", Method, 24},
+ {"(*SHA3).MarshalBinary", Method, 24},
+ {"(*SHA3).Reset", Method, 24},
+ {"(*SHA3).Size", Method, 24},
+ {"(*SHA3).Sum", Method, 24},
+ {"(*SHA3).UnmarshalBinary", Method, 24},
+ {"(*SHA3).Write", Method, 24},
+ {"(*SHAKE).AppendBinary", Method, 24},
+ {"(*SHAKE).BlockSize", Method, 24},
+ {"(*SHAKE).MarshalBinary", Method, 24},
+ {"(*SHAKE).Read", Method, 24},
+ {"(*SHAKE).Reset", Method, 24},
+ {"(*SHAKE).UnmarshalBinary", Method, 24},
+ {"(*SHAKE).Write", Method, 24},
+ {"New224", Func, 24},
+ {"New256", Func, 24},
+ {"New384", Func, 24},
+ {"New512", Func, 24},
+ {"NewCSHAKE128", Func, 24},
+ {"NewCSHAKE256", Func, 24},
+ {"NewSHAKE128", Func, 24},
+ {"NewSHAKE256", Func, 24},
+ {"SHA3", Type, 24},
+ {"SHAKE", Type, 24},
+ {"Sum224", Func, 24},
+ {"Sum256", Func, 24},
+ {"Sum384", Func, 24},
+ {"Sum512", Func, 24},
+ {"SumSHAKE128", Func, 24},
+ {"SumSHAKE256", Func, 24},
+ },
"crypto/sha512": {
{"BlockSize", Const, 0},
{"New", Func, 0},
@@ -788,6 +867,7 @@ var PackageSymbols = map[string][]Symbol{
{"ConstantTimeEq", Func, 0},
{"ConstantTimeLessOrEq", Func, 2},
{"ConstantTimeSelect", Func, 0},
+ {"WithDataIndependentTiming", Func, 24},
{"XORBytes", Func, 20},
},
"crypto/tls": {
@@ -864,6 +944,7 @@ var PackageSymbols = map[string][]Symbol{
{"ClientHelloInfo", Type, 4},
{"ClientHelloInfo.CipherSuites", Field, 4},
{"ClientHelloInfo.Conn", Field, 8},
+ {"ClientHelloInfo.Extensions", Field, 24},
{"ClientHelloInfo.ServerName", Field, 4},
{"ClientHelloInfo.SignatureSchemes", Field, 8},
{"ClientHelloInfo.SupportedCurves", Field, 4},
@@ -881,6 +962,7 @@ var PackageSymbols = map[string][]Symbol{
{"Config.CurvePreferences", Field, 3},
{"Config.DynamicRecordSizingDisabled", Field, 7},
{"Config.EncryptedClientHelloConfigList", Field, 23},
+ {"Config.EncryptedClientHelloKeys", Field, 24},
{"Config.EncryptedClientHelloRejectionVerify", Field, 23},
{"Config.GetCertificate", Field, 4},
{"Config.GetClientCertificate", Field, 8},
@@ -934,6 +1016,10 @@ var PackageSymbols = map[string][]Symbol{
{"ECHRejectionError", Type, 23},
{"ECHRejectionError.RetryConfigList", Field, 23},
{"Ed25519", Const, 13},
+ {"EncryptedClientHelloKey", Type, 24},
+ {"EncryptedClientHelloKey.Config", Field, 24},
+ {"EncryptedClientHelloKey.PrivateKey", Field, 24},
+ {"EncryptedClientHelloKey.SendAsRetry", Field, 24},
{"InsecureCipherSuites", Func, 14},
{"Listen", Func, 0},
{"LoadX509KeyPair", Func, 0},
@@ -1032,6 +1118,7 @@ var PackageSymbols = map[string][]Symbol{
{"VersionTLS12", Const, 2},
{"VersionTLS13", Const, 12},
{"X25519", Const, 8},
+ {"X25519MLKEM768", Const, 24},
{"X509KeyPair", Func, 0},
},
"crypto/x509": {
@@ -1056,6 +1143,8 @@ var PackageSymbols = map[string][]Symbol{
{"(ConstraintViolationError).Error", Method, 0},
{"(HostnameError).Error", Method, 0},
{"(InsecureAlgorithmError).Error", Method, 6},
+ {"(OID).AppendBinary", Method, 24},
+ {"(OID).AppendText", Method, 24},
{"(OID).Equal", Method, 22},
{"(OID).EqualASN1OID", Method, 22},
{"(OID).MarshalBinary", Method, 23},
@@ -1084,6 +1173,10 @@ var PackageSymbols = map[string][]Symbol{
{"Certificate.Extensions", Field, 2},
{"Certificate.ExtraExtensions", Field, 2},
{"Certificate.IPAddresses", Field, 1},
+ {"Certificate.InhibitAnyPolicy", Field, 24},
+ {"Certificate.InhibitAnyPolicyZero", Field, 24},
+ {"Certificate.InhibitPolicyMapping", Field, 24},
+ {"Certificate.InhibitPolicyMappingZero", Field, 24},
{"Certificate.IsCA", Field, 0},
{"Certificate.Issuer", Field, 0},
{"Certificate.IssuingCertificateURL", Field, 2},
@@ -1100,6 +1193,7 @@ var PackageSymbols = map[string][]Symbol{
{"Certificate.PermittedURIDomains", Field, 10},
{"Certificate.Policies", Field, 22},
{"Certificate.PolicyIdentifiers", Field, 0},
+ {"Certificate.PolicyMappings", Field, 24},
{"Certificate.PublicKey", Field, 0},
{"Certificate.PublicKeyAlgorithm", Field, 0},
{"Certificate.Raw", Field, 0},
@@ -1107,6 +1201,8 @@ var PackageSymbols = map[string][]Symbol{
{"Certificate.RawSubject", Field, 0},
{"Certificate.RawSubjectPublicKeyInfo", Field, 0},
{"Certificate.RawTBSCertificate", Field, 0},
+ {"Certificate.RequireExplicitPolicy", Field, 24},
+ {"Certificate.RequireExplicitPolicyZero", Field, 24},
{"Certificate.SerialNumber", Field, 0},
{"Certificate.Signature", Field, 0},
{"Certificate.SignatureAlgorithm", Field, 0},
@@ -1198,6 +1294,7 @@ var PackageSymbols = map[string][]Symbol{
{"NameConstraintsWithoutSANs", Const, 10},
{"NameMismatch", Const, 8},
{"NewCertPool", Func, 0},
+ {"NoValidChains", Const, 24},
{"NotAuthorizedToSign", Const, 0},
{"OID", Type, 22},
{"OIDFromInts", Func, 22},
@@ -1219,6 +1316,9 @@ var PackageSymbols = map[string][]Symbol{
{"ParsePKCS8PrivateKey", Func, 0},
{"ParsePKIXPublicKey", Func, 0},
{"ParseRevocationList", Func, 19},
+ {"PolicyMapping", Type, 24},
+ {"PolicyMapping.IssuerDomainPolicy", Field, 24},
+ {"PolicyMapping.SubjectDomainPolicy", Field, 24},
{"PublicKeyAlgorithm", Type, 0},
{"PureEd25519", Const, 13},
{"RSA", Const, 0},
@@ -1265,6 +1365,7 @@ var PackageSymbols = map[string][]Symbol{
{"UnknownPublicKeyAlgorithm", Const, 0},
{"UnknownSignatureAlgorithm", Const, 0},
{"VerifyOptions", Type, 0},
+ {"VerifyOptions.CertificatePolicies", Field, 24},
{"VerifyOptions.CurrentTime", Field, 0},
{"VerifyOptions.DNSName", Field, 0},
{"VerifyOptions.Intermediates", Field, 0},
@@ -1975,6 +2076,8 @@ var PackageSymbols = map[string][]Symbol{
{"(*File).DynString", Method, 1},
{"(*File).DynValue", Method, 21},
{"(*File).DynamicSymbols", Method, 4},
+ {"(*File).DynamicVersionNeeds", Method, 24},
+ {"(*File).DynamicVersions", Method, 24},
{"(*File).ImportedLibraries", Method, 0},
{"(*File).ImportedSymbols", Method, 0},
{"(*File).Section", Method, 0},
@@ -2048,6 +2151,8 @@ var PackageSymbols = map[string][]Symbol{
{"(Type).String", Method, 0},
{"(Version).GoString", Method, 0},
{"(Version).String", Method, 0},
+ {"(VersionIndex).Index", Method, 24},
+ {"(VersionIndex).IsHidden", Method, 24},
{"ARM_MAGIC_TRAMP_NUMBER", Const, 0},
{"COMPRESS_HIOS", Const, 6},
{"COMPRESS_HIPROC", Const, 6},
@@ -2240,6 +2345,19 @@ var PackageSymbols = map[string][]Symbol{
{"DynFlag", Type, 0},
{"DynFlag1", Type, 21},
{"DynTag", Type, 0},
+ {"DynamicVersion", Type, 24},
+ {"DynamicVersion.Deps", Field, 24},
+ {"DynamicVersion.Flags", Field, 24},
+ {"DynamicVersion.Index", Field, 24},
+ {"DynamicVersion.Name", Field, 24},
+ {"DynamicVersionDep", Type, 24},
+ {"DynamicVersionDep.Dep", Field, 24},
+ {"DynamicVersionDep.Flags", Field, 24},
+ {"DynamicVersionDep.Index", Field, 24},
+ {"DynamicVersionFlag", Type, 24},
+ {"DynamicVersionNeed", Type, 24},
+ {"DynamicVersionNeed.Name", Field, 24},
+ {"DynamicVersionNeed.Needs", Field, 24},
{"EI_ABIVERSION", Const, 0},
{"EI_CLASS", Const, 0},
{"EI_DATA", Const, 0},
@@ -3718,6 +3836,7 @@ var PackageSymbols = map[string][]Symbol{
{"SymType", Type, 0},
{"SymVis", Type, 0},
{"Symbol", Type, 0},
+ {"Symbol.HasVersion", Field, 24},
{"Symbol.Info", Field, 0},
{"Symbol.Library", Field, 13},
{"Symbol.Name", Field, 0},
@@ -3726,8 +3845,13 @@ var PackageSymbols = map[string][]Symbol{
{"Symbol.Size", Field, 0},
{"Symbol.Value", Field, 0},
{"Symbol.Version", Field, 13},
+ {"Symbol.VersionIndex", Field, 24},
{"Type", Type, 0},
+ {"VER_FLG_BASE", Const, 24},
+ {"VER_FLG_INFO", Const, 24},
+ {"VER_FLG_WEAK", Const, 24},
{"Version", Type, 0},
+ {"VersionIndex", Type, 24},
},
"debug/gosym": {
{"(*DecodingError).Error", Method, 0},
@@ -4453,8 +4577,10 @@ var PackageSymbols = map[string][]Symbol{
{"FS", Type, 16},
},
"encoding": {
+ {"BinaryAppender", Type, 24},
{"BinaryMarshaler", Type, 2},
{"BinaryUnmarshaler", Type, 2},
+ {"TextAppender", Type, 24},
{"TextMarshaler", Type, 2},
{"TextUnmarshaler", Type, 2},
},
@@ -5984,13 +6110,16 @@ var PackageSymbols = map[string][]Symbol{
{"(*Interface).Complete", Method, 5},
{"(*Interface).Embedded", Method, 5},
{"(*Interface).EmbeddedType", Method, 11},
+ {"(*Interface).EmbeddedTypes", Method, 24},
{"(*Interface).Empty", Method, 5},
{"(*Interface).ExplicitMethod", Method, 5},
+ {"(*Interface).ExplicitMethods", Method, 24},
{"(*Interface).IsComparable", Method, 18},
{"(*Interface).IsImplicit", Method, 18},
{"(*Interface).IsMethodSet", Method, 18},
{"(*Interface).MarkImplicit", Method, 18},
{"(*Interface).Method", Method, 5},
+ {"(*Interface).Methods", Method, 24},
{"(*Interface).NumEmbeddeds", Method, 5},
{"(*Interface).NumExplicitMethods", Method, 5},
{"(*Interface).NumMethods", Method, 5},
@@ -6011,9 +6140,11 @@ var PackageSymbols = map[string][]Symbol{
{"(*MethodSet).At", Method, 5},
{"(*MethodSet).Len", Method, 5},
{"(*MethodSet).Lookup", Method, 5},
+ {"(*MethodSet).Methods", Method, 24},
{"(*MethodSet).String", Method, 5},
{"(*Named).AddMethod", Method, 5},
{"(*Named).Method", Method, 5},
+ {"(*Named).Methods", Method, 24},
{"(*Named).NumMethods", Method, 5},
{"(*Named).Obj", Method, 5},
{"(*Named).Origin", Method, 18},
@@ -6054,6 +6185,7 @@ var PackageSymbols = map[string][]Symbol{
{"(*Pointer).String", Method, 5},
{"(*Pointer).Underlying", Method, 5},
{"(*Scope).Child", Method, 5},
+ {"(*Scope).Children", Method, 24},
{"(*Scope).Contains", Method, 5},
{"(*Scope).End", Method, 5},
{"(*Scope).Innermost", Method, 5},
@@ -6089,6 +6221,7 @@ var PackageSymbols = map[string][]Symbol{
{"(*StdSizes).Offsetsof", Method, 5},
{"(*StdSizes).Sizeof", Method, 5},
{"(*Struct).Field", Method, 5},
+ {"(*Struct).Fields", Method, 24},
{"(*Struct).NumFields", Method, 5},
{"(*Struct).String", Method, 5},
{"(*Struct).Tag", Method, 5},
@@ -6100,8 +6233,10 @@ var PackageSymbols = map[string][]Symbol{
{"(*Tuple).Len", Method, 5},
{"(*Tuple).String", Method, 5},
{"(*Tuple).Underlying", Method, 5},
+ {"(*Tuple).Variables", Method, 24},
{"(*TypeList).At", Method, 18},
{"(*TypeList).Len", Method, 18},
+ {"(*TypeList).Types", Method, 24},
{"(*TypeName).Exported", Method, 5},
{"(*TypeName).Id", Method, 5},
{"(*TypeName).IsAlias", Method, 9},
@@ -6119,9 +6254,11 @@ var PackageSymbols = map[string][]Symbol{
{"(*TypeParam).Underlying", Method, 18},
{"(*TypeParamList).At", Method, 18},
{"(*TypeParamList).Len", Method, 18},
+ {"(*TypeParamList).TypeParams", Method, 24},
{"(*Union).Len", Method, 18},
{"(*Union).String", Method, 18},
{"(*Union).Term", Method, 18},
+ {"(*Union).Terms", Method, 24},
{"(*Union).Underlying", Method, 18},
{"(*Var).Anonymous", Method, 5},
{"(*Var).Embedded", Method, 11},
@@ -6392,10 +6529,12 @@ var PackageSymbols = map[string][]Symbol{
{"(*Hash).WriteByte", Method, 14},
{"(*Hash).WriteString", Method, 14},
{"Bytes", Func, 19},
+ {"Comparable", Func, 24},
{"Hash", Type, 14},
{"MakeSeed", Func, 14},
{"Seed", Type, 14},
{"String", Func, 19},
+ {"WriteComparable", Func, 24},
},
"html": {
{"EscapeString", Func, 0},
@@ -7082,6 +7221,7 @@ var PackageSymbols = map[string][]Symbol{
{"(*JSONHandler).WithGroup", Method, 21},
{"(*Level).UnmarshalJSON", Method, 21},
{"(*Level).UnmarshalText", Method, 21},
+ {"(*LevelVar).AppendText", Method, 24},
{"(*LevelVar).Level", Method, 21},
{"(*LevelVar).MarshalText", Method, 21},
{"(*LevelVar).Set", Method, 21},
@@ -7110,6 +7250,7 @@ var PackageSymbols = map[string][]Symbol{
{"(Attr).Equal", Method, 21},
{"(Attr).String", Method, 21},
{"(Kind).String", Method, 21},
+ {"(Level).AppendText", Method, 24},
{"(Level).Level", Method, 21},
{"(Level).MarshalJSON", Method, 21},
{"(Level).MarshalText", Method, 21},
@@ -7140,6 +7281,7 @@ var PackageSymbols = map[string][]Symbol{
{"Debug", Func, 21},
{"DebugContext", Func, 21},
{"Default", Func, 21},
+ {"DiscardHandler", Var, 24},
{"Duration", Func, 21},
{"DurationValue", Func, 21},
{"Error", Func, 21},
@@ -7375,6 +7517,7 @@ var PackageSymbols = map[string][]Symbol{
{"(*Float).Acc", Method, 5},
{"(*Float).Add", Method, 5},
{"(*Float).Append", Method, 5},
+ {"(*Float).AppendText", Method, 24},
{"(*Float).Cmp", Method, 5},
{"(*Float).Copy", Method, 5},
{"(*Float).Float32", Method, 5},
@@ -7421,6 +7564,7 @@ var PackageSymbols = map[string][]Symbol{
{"(*Int).And", Method, 0},
{"(*Int).AndNot", Method, 0},
{"(*Int).Append", Method, 6},
+ {"(*Int).AppendText", Method, 24},
{"(*Int).Binomial", Method, 0},
{"(*Int).Bit", Method, 0},
{"(*Int).BitLen", Method, 0},
@@ -7477,6 +7621,7 @@ var PackageSymbols = map[string][]Symbol{
{"(*Int).Xor", Method, 0},
{"(*Rat).Abs", Method, 0},
{"(*Rat).Add", Method, 0},
+ {"(*Rat).AppendText", Method, 24},
{"(*Rat).Cmp", Method, 0},
{"(*Rat).Denom", Method, 0},
{"(*Rat).Float32", Method, 4},
@@ -7659,11 +7804,13 @@ var PackageSymbols = map[string][]Symbol{
{"Zipf", Type, 0},
},
"math/rand/v2": {
+ {"(*ChaCha8).AppendBinary", Method, 24},
{"(*ChaCha8).MarshalBinary", Method, 22},
{"(*ChaCha8).Read", Method, 23},
{"(*ChaCha8).Seed", Method, 22},
{"(*ChaCha8).Uint64", Method, 22},
{"(*ChaCha8).UnmarshalBinary", Method, 22},
+ {"(*PCG).AppendBinary", Method, 24},
{"(*PCG).MarshalBinary", Method, 22},
{"(*PCG).Seed", Method, 22},
{"(*PCG).Uint64", Method, 22},
@@ -7931,6 +8078,7 @@ var PackageSymbols = map[string][]Symbol{
{"(*UnixListener).SyscallConn", Method, 10},
{"(Flags).String", Method, 0},
{"(HardwareAddr).String", Method, 0},
+ {"(IP).AppendText", Method, 24},
{"(IP).DefaultMask", Method, 0},
{"(IP).Equal", Method, 0},
{"(IP).IsGlobalUnicast", Method, 0},
@@ -8131,6 +8279,9 @@ var PackageSymbols = map[string][]Symbol{
{"(*MaxBytesError).Error", Method, 19},
{"(*ProtocolError).Error", Method, 0},
{"(*ProtocolError).Is", Method, 21},
+ {"(*Protocols).SetHTTP1", Method, 24},
+ {"(*Protocols).SetHTTP2", Method, 24},
+ {"(*Protocols).SetUnencryptedHTTP2", Method, 24},
{"(*Request).AddCookie", Method, 0},
{"(*Request).BasicAuth", Method, 4},
{"(*Request).Clone", Method, 13},
@@ -8190,6 +8341,10 @@ var PackageSymbols = map[string][]Symbol{
{"(Header).Values", Method, 14},
{"(Header).Write", Method, 0},
{"(Header).WriteSubset", Method, 0},
+ {"(Protocols).HTTP1", Method, 24},
+ {"(Protocols).HTTP2", Method, 24},
+ {"(Protocols).String", Method, 24},
+ {"(Protocols).UnencryptedHTTP2", Method, 24},
{"AllowQuerySemicolons", Func, 17},
{"CanonicalHeaderKey", Func, 0},
{"Client", Type, 0},
@@ -8252,6 +8407,18 @@ var PackageSymbols = map[string][]Symbol{
{"FileSystem", Type, 0},
{"Flusher", Type, 0},
{"Get", Func, 0},
+ {"HTTP2Config", Type, 24},
+ {"HTTP2Config.CountError", Field, 24},
+ {"HTTP2Config.MaxConcurrentStreams", Field, 24},
+ {"HTTP2Config.MaxDecoderHeaderTableSize", Field, 24},
+ {"HTTP2Config.MaxEncoderHeaderTableSize", Field, 24},
+ {"HTTP2Config.MaxReadFrameSize", Field, 24},
+ {"HTTP2Config.MaxReceiveBufferPerConnection", Field, 24},
+ {"HTTP2Config.MaxReceiveBufferPerStream", Field, 24},
+ {"HTTP2Config.PermitProhibitedCipherSuites", Field, 24},
+ {"HTTP2Config.PingTimeout", Field, 24},
+ {"HTTP2Config.SendPingTimeout", Field, 24},
+ {"HTTP2Config.WriteByteTimeout", Field, 24},
{"Handle", Func, 0},
{"HandleFunc", Func, 0},
{"Handler", Type, 0},
@@ -8292,6 +8459,7 @@ var PackageSymbols = map[string][]Symbol{
{"PostForm", Func, 0},
{"ProtocolError", Type, 0},
{"ProtocolError.ErrorString", Field, 0},
+ {"Protocols", Type, 24},
{"ProxyFromEnvironment", Func, 0},
{"ProxyURL", Func, 0},
{"PushOptions", Type, 8},
@@ -8361,9 +8529,11 @@ var PackageSymbols = map[string][]Symbol{
{"Server.ConnState", Field, 3},
{"Server.DisableGeneralOptionsHandler", Field, 20},
{"Server.ErrorLog", Field, 3},
+ {"Server.HTTP2", Field, 24},
{"Server.Handler", Field, 0},
{"Server.IdleTimeout", Field, 8},
{"Server.MaxHeaderBytes", Field, 0},
+ {"Server.Protocols", Field, 24},
{"Server.ReadHeaderTimeout", Field, 8},
{"Server.ReadTimeout", Field, 0},
{"Server.TLSConfig", Field, 0},
@@ -8453,12 +8623,14 @@ var PackageSymbols = map[string][]Symbol{
{"Transport.ExpectContinueTimeout", Field, 6},
{"Transport.ForceAttemptHTTP2", Field, 13},
{"Transport.GetProxyConnectHeader", Field, 16},
+ {"Transport.HTTP2", Field, 24},
{"Transport.IdleConnTimeout", Field, 7},
{"Transport.MaxConnsPerHost", Field, 11},
{"Transport.MaxIdleConns", Field, 7},
{"Transport.MaxIdleConnsPerHost", Field, 0},
{"Transport.MaxResponseHeaderBytes", Field, 7},
{"Transport.OnProxyConnectResponse", Field, 20},
+ {"Transport.Protocols", Field, 24},
{"Transport.Proxy", Field, 0},
{"Transport.ProxyConnectHeader", Field, 8},
{"Transport.ReadBufferSize", Field, 13},
@@ -8646,6 +8818,8 @@ var PackageSymbols = map[string][]Symbol{
{"(*AddrPort).UnmarshalText", Method, 18},
{"(*Prefix).UnmarshalBinary", Method, 18},
{"(*Prefix).UnmarshalText", Method, 18},
+ {"(Addr).AppendBinary", Method, 24},
+ {"(Addr).AppendText", Method, 24},
{"(Addr).AppendTo", Method, 18},
{"(Addr).As16", Method, 18},
{"(Addr).As4", Method, 18},
@@ -8676,6 +8850,8 @@ var PackageSymbols = map[string][]Symbol{
{"(Addr).WithZone", Method, 18},
{"(Addr).Zone", Method, 18},
{"(AddrPort).Addr", Method, 18},
+ {"(AddrPort).AppendBinary", Method, 24},
+ {"(AddrPort).AppendText", Method, 24},
{"(AddrPort).AppendTo", Method, 18},
{"(AddrPort).Compare", Method, 22},
{"(AddrPort).IsValid", Method, 18},
@@ -8684,6 +8860,8 @@ var PackageSymbols = map[string][]Symbol{
{"(AddrPort).Port", Method, 18},
{"(AddrPort).String", Method, 18},
{"(Prefix).Addr", Method, 18},
+ {"(Prefix).AppendBinary", Method, 24},
+ {"(Prefix).AppendText", Method, 24},
{"(Prefix).AppendTo", Method, 18},
{"(Prefix).Bits", Method, 18},
{"(Prefix).Contains", Method, 18},
@@ -8868,6 +9046,7 @@ var PackageSymbols = map[string][]Symbol{
{"(*Error).Temporary", Method, 6},
{"(*Error).Timeout", Method, 6},
{"(*Error).Unwrap", Method, 13},
+ {"(*URL).AppendBinary", Method, 24},
{"(*URL).EscapedFragment", Method, 15},
{"(*URL).EscapedPath", Method, 5},
{"(*URL).Hostname", Method, 8},
@@ -8967,6 +9146,17 @@ var PackageSymbols = map[string][]Symbol{
{"(*ProcessState).SysUsage", Method, 0},
{"(*ProcessState).SystemTime", Method, 0},
{"(*ProcessState).UserTime", Method, 0},
+ {"(*Root).Close", Method, 24},
+ {"(*Root).Create", Method, 24},
+ {"(*Root).FS", Method, 24},
+ {"(*Root).Lstat", Method, 24},
+ {"(*Root).Mkdir", Method, 24},
+ {"(*Root).Name", Method, 24},
+ {"(*Root).Open", Method, 24},
+ {"(*Root).OpenFile", Method, 24},
+ {"(*Root).OpenRoot", Method, 24},
+ {"(*Root).Remove", Method, 24},
+ {"(*Root).Stat", Method, 24},
{"(*SyscallError).Error", Method, 0},
{"(*SyscallError).Timeout", Method, 10},
{"(*SyscallError).Unwrap", Method, 13},
@@ -9060,6 +9250,8 @@ var PackageSymbols = map[string][]Symbol{
{"O_WRONLY", Const, 0},
{"Open", Func, 0},
{"OpenFile", Func, 0},
+ {"OpenInRoot", Func, 24},
+ {"OpenRoot", Func, 24},
{"PathError", Type, 0},
{"PathError.Err", Field, 0},
{"PathError.Op", Field, 0},
@@ -9081,6 +9273,7 @@ var PackageSymbols = map[string][]Symbol{
{"Remove", Func, 0},
{"RemoveAll", Func, 0},
{"Rename", Func, 0},
+ {"Root", Type, 24},
{"SEEK_CUR", Const, 0},
{"SEEK_END", Const, 0},
{"SEEK_SET", Const, 0},
@@ -9422,6 +9615,7 @@ var PackageSymbols = map[string][]Symbol{
{"Zero", Func, 0},
},
"regexp": {
+ {"(*Regexp).AppendText", Method, 24},
{"(*Regexp).Copy", Method, 6},
{"(*Regexp).Expand", Method, 0},
{"(*Regexp).ExpandString", Method, 0},
@@ -9602,6 +9796,8 @@ var PackageSymbols = map[string][]Symbol{
{"(*StackRecord).Stack", Method, 0},
{"(*TypeAssertionError).Error", Method, 0},
{"(*TypeAssertionError).RuntimeError", Method, 0},
+ {"(Cleanup).Stop", Method, 24},
+ {"AddCleanup", Func, 24},
{"BlockProfile", Func, 1},
{"BlockProfileRecord", Type, 1},
{"BlockProfileRecord.Count", Field, 1},
@@ -9612,6 +9808,7 @@ var PackageSymbols = map[string][]Symbol{
{"Caller", Func, 0},
{"Callers", Func, 0},
{"CallersFrames", Func, 7},
+ {"Cleanup", Type, 24},
{"Compiler", Const, 0},
{"Error", Type, 0},
{"Frame", Type, 7},
@@ -9974,6 +10171,8 @@ var PackageSymbols = map[string][]Symbol{
{"EqualFold", Func, 0},
{"Fields", Func, 0},
{"FieldsFunc", Func, 0},
+ {"FieldsFuncSeq", Func, 24},
+ {"FieldsSeq", Func, 24},
{"HasPrefix", Func, 0},
{"HasSuffix", Func, 0},
{"Index", Func, 0},
@@ -9986,6 +10185,7 @@ var PackageSymbols = map[string][]Symbol{
{"LastIndexAny", Func, 0},
{"LastIndexByte", Func, 5},
{"LastIndexFunc", Func, 0},
+ {"Lines", Func, 24},
{"Map", Func, 0},
{"NewReader", Func, 0},
{"NewReplacer", Func, 0},
@@ -9997,7 +10197,9 @@ var PackageSymbols = map[string][]Symbol{
{"Split", Func, 0},
{"SplitAfter", Func, 0},
{"SplitAfterN", Func, 0},
+ {"SplitAfterSeq", Func, 24},
{"SplitN", Func, 0},
+ {"SplitSeq", Func, 24},
{"Title", Func, 0},
{"ToLower", Func, 0},
{"ToLowerSpecial", Func, 0},
@@ -16413,7 +16615,9 @@ var PackageSymbols = map[string][]Symbol{
{"ValueOf", Func, 0},
},
"testing": {
+ {"(*B).Chdir", Method, 24},
{"(*B).Cleanup", Method, 14},
+ {"(*B).Context", Method, 24},
{"(*B).Elapsed", Method, 20},
{"(*B).Error", Method, 0},
{"(*B).Errorf", Method, 0},
@@ -16425,6 +16629,7 @@ var PackageSymbols = map[string][]Symbol{
{"(*B).Helper", Method, 9},
{"(*B).Log", Method, 0},
{"(*B).Logf", Method, 0},
+ {"(*B).Loop", Method, 24},
{"(*B).Name", Method, 8},
{"(*B).ReportAllocs", Method, 1},
{"(*B).ReportMetric", Method, 13},
@@ -16442,7 +16647,9 @@ var PackageSymbols = map[string][]Symbol{
{"(*B).StopTimer", Method, 0},
{"(*B).TempDir", Method, 15},
{"(*F).Add", Method, 18},
+ {"(*F).Chdir", Method, 24},
{"(*F).Cleanup", Method, 18},
+ {"(*F).Context", Method, 24},
{"(*F).Error", Method, 18},
{"(*F).Errorf", Method, 18},
{"(*F).Fail", Method, 18},
@@ -16463,7 +16670,9 @@ var PackageSymbols = map[string][]Symbol{
{"(*F).TempDir", Method, 18},
{"(*M).Run", Method, 4},
{"(*PB).Next", Method, 3},
+ {"(*T).Chdir", Method, 24},
{"(*T).Cleanup", Method, 14},
+ {"(*T).Context", Method, 24},
{"(*T).Deadline", Method, 15},
{"(*T).Error", Method, 0},
{"(*T).Errorf", Method, 0},
@@ -16954,7 +17163,9 @@ var PackageSymbols = map[string][]Symbol{
{"(Time).Add", Method, 0},
{"(Time).AddDate", Method, 0},
{"(Time).After", Method, 0},
+ {"(Time).AppendBinary", Method, 24},
{"(Time).AppendFormat", Method, 5},
+ {"(Time).AppendText", Method, 24},
{"(Time).Before", Method, 0},
{"(Time).Clock", Method, 0},
{"(Time).Compare", Method, 20},
@@ -17428,4 +17639,9 @@ var PackageSymbols = map[string][]Symbol{
{"String", Func, 0},
{"StringData", Func, 0},
},
+ "weak": {
+ {"(Pointer).Value", Method, 24},
+ {"Make", Func, 24},
+ {"Pointer", Type, 24},
+ },
}
diff --git a/vendor/golang.org/x/tools/internal/stdlib/stdlib.go b/vendor/golang.org/x/tools/internal/stdlib/stdlib.go
index 98904017..3d96d3bf 100644
--- a/vendor/golang.org/x/tools/internal/stdlib/stdlib.go
+++ b/vendor/golang.org/x/tools/internal/stdlib/stdlib.go
@@ -6,7 +6,7 @@
// Package stdlib provides a table of all exported symbols in the
// standard library, along with the version at which they first
-// appeared.
+// appeared. It also provides the import graph of std packages.
package stdlib
import (
diff --git a/vendor/golang.org/x/tools/internal/typeparams/common.go b/vendor/golang.org/x/tools/internal/typeparams/common.go
index 0b84acc5..cdae2b8e 100644
--- a/vendor/golang.org/x/tools/internal/typeparams/common.go
+++ b/vendor/golang.org/x/tools/internal/typeparams/common.go
@@ -66,75 +66,3 @@ func IsTypeParam(t types.Type) bool {
_, ok := types.Unalias(t).(*types.TypeParam)
return ok
}
-
-// GenericAssignableTo is a generalization of types.AssignableTo that
-// implements the following rule for uninstantiated generic types:
-//
-// If V and T are generic named types, then V is considered assignable to T if,
-// for every possible instantiation of V[A_1, ..., A_N], the instantiation
-// T[A_1, ..., A_N] is valid and V[A_1, ..., A_N] implements T[A_1, ..., A_N].
-//
-// If T has structural constraints, they must be satisfied by V.
-//
-// For example, consider the following type declarations:
-//
-// type Interface[T any] interface {
-// Accept(T)
-// }
-//
-// type Container[T any] struct {
-// Element T
-// }
-//
-// func (c Container[T]) Accept(t T) { c.Element = t }
-//
-// In this case, GenericAssignableTo reports that instantiations of Container
-// are assignable to the corresponding instantiation of Interface.
-func GenericAssignableTo(ctxt *types.Context, V, T types.Type) bool {
- V = types.Unalias(V)
- T = types.Unalias(T)
-
- // If V and T are not both named, or do not have matching non-empty type
- // parameter lists, fall back on types.AssignableTo.
-
- VN, Vnamed := V.(*types.Named)
- TN, Tnamed := T.(*types.Named)
- if !Vnamed || !Tnamed {
- return types.AssignableTo(V, T)
- }
-
- vtparams := VN.TypeParams()
- ttparams := TN.TypeParams()
- if vtparams.Len() == 0 || vtparams.Len() != ttparams.Len() || VN.TypeArgs().Len() != 0 || TN.TypeArgs().Len() != 0 {
- return types.AssignableTo(V, T)
- }
-
- // V and T have the same (non-zero) number of type params. Instantiate both
- // with the type parameters of V. This must always succeed for V, and will
- // succeed for T if and only if the type set of each type parameter of V is a
- // subset of the type set of the corresponding type parameter of T, meaning
- // that every instantiation of V corresponds to a valid instantiation of T.
-
- // Minor optimization: ensure we share a context across the two
- // instantiations below.
- if ctxt == nil {
- ctxt = types.NewContext()
- }
-
- var targs []types.Type
- for i := 0; i < vtparams.Len(); i++ {
- targs = append(targs, vtparams.At(i))
- }
-
- vinst, err := types.Instantiate(ctxt, V, targs, true)
- if err != nil {
- panic("type parameters should satisfy their own constraints")
- }
-
- tinst, err := types.Instantiate(ctxt, T, targs, true)
- if err != nil {
- return false
- }
-
- return types.AssignableTo(vinst, tinst)
-}
diff --git a/vendor/golang.org/x/tools/internal/typeparams/coretype.go b/vendor/golang.org/x/tools/internal/typeparams/coretype.go
index 6e83c6fb..27a2b179 100644
--- a/vendor/golang.org/x/tools/internal/typeparams/coretype.go
+++ b/vendor/golang.org/x/tools/internal/typeparams/coretype.go
@@ -109,8 +109,13 @@ func CoreType(T types.Type) types.Type {
//
// NormalTerms makes no guarantees about the order of terms, except that it
// is deterministic.
-func NormalTerms(typ types.Type) ([]*types.Term, error) {
- switch typ := typ.Underlying().(type) {
+func NormalTerms(T types.Type) ([]*types.Term, error) {
+ // typeSetOf(T) == typeSetOf(Unalias(T))
+ typ := types.Unalias(T)
+ if named, ok := typ.(*types.Named); ok {
+ typ = named.Underlying()
+ }
+ switch typ := typ.(type) {
case *types.TypeParam:
return StructuralTerms(typ)
case *types.Union:
@@ -118,7 +123,7 @@ func NormalTerms(typ types.Type) ([]*types.Term, error) {
case *types.Interface:
return InterfaceTermSet(typ)
default:
- return []*types.Term{types.NewTerm(false, typ)}, nil
+ return []*types.Term{types.NewTerm(false, T)}, nil
}
}
diff --git a/vendor/golang.org/x/tools/internal/typeparams/free.go b/vendor/golang.org/x/tools/internal/typeparams/free.go
index 0ade5c29..709d2fc1 100644
--- a/vendor/golang.org/x/tools/internal/typeparams/free.go
+++ b/vendor/golang.org/x/tools/internal/typeparams/free.go
@@ -70,7 +70,7 @@ func (w *Free) Has(typ types.Type) (res bool) {
case *types.Tuple:
n := t.Len()
- for i := 0; i < n; i++ {
+ for i := range n {
if w.Has(t.At(i).Type()) {
return true
}
diff --git a/vendor/golang.org/x/tools/internal/typeparams/normalize.go b/vendor/golang.org/x/tools/internal/typeparams/normalize.go
index 93c80fdc..f49802b8 100644
--- a/vendor/golang.org/x/tools/internal/typeparams/normalize.go
+++ b/vendor/golang.org/x/tools/internal/typeparams/normalize.go
@@ -120,7 +120,7 @@ type termSet struct {
terms termlist
}
-func indentf(depth int, format string, args ...interface{}) {
+func indentf(depth int, format string, args ...any) {
fmt.Fprintf(os.Stderr, strings.Repeat(".", depth)+format+"\n", args...)
}
diff --git a/vendor/golang.org/x/tools/internal/typeparams/termlist.go b/vendor/golang.org/x/tools/internal/typeparams/termlist.go
index cbd12f80..9bc29143 100644
--- a/vendor/golang.org/x/tools/internal/typeparams/termlist.go
+++ b/vendor/golang.org/x/tools/internal/typeparams/termlist.go
@@ -1,3 +1,6 @@
+// Code generated by "go test -run=Generate -write=all"; DO NOT EDIT.
+// Source: ../../cmd/compile/internal/types2/termlist.go
+
// Copyright 2021 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
@@ -7,8 +10,8 @@
package typeparams
import (
- "bytes"
"go/types"
+ "strings"
)
// A termlist represents the type set represented by the union
@@ -22,15 +25,18 @@ type termlist []*term
// It is in normal form.
var allTermlist = termlist{new(term)}
+// termSep is the separator used between individual terms.
+const termSep = " | "
+
// String prints the termlist exactly (without normalization).
func (xl termlist) String() string {
if len(xl) == 0 {
return "∅"
}
- var buf bytes.Buffer
+ var buf strings.Builder
for i, x := range xl {
if i > 0 {
- buf.WriteString(" | ")
+ buf.WriteString(termSep)
}
buf.WriteString(x.String())
}
diff --git a/vendor/golang.org/x/tools/internal/typeparams/typeterm.go b/vendor/golang.org/x/tools/internal/typeparams/typeterm.go
index 7350bb70..fa758cdc 100644
--- a/vendor/golang.org/x/tools/internal/typeparams/typeterm.go
+++ b/vendor/golang.org/x/tools/internal/typeparams/typeterm.go
@@ -1,3 +1,6 @@
+// Code generated by "go test -run=Generate -write=all"; DO NOT EDIT.
+// Source: ../../cmd/compile/internal/types2/typeterm.go
+
// Copyright 2021 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
diff --git a/vendor/golang.org/x/tools/internal/typesinternal/classify_call.go b/vendor/golang.org/x/tools/internal/typesinternal/classify_call.go
new file mode 100644
index 00000000..649c82b6
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/typesinternal/classify_call.go
@@ -0,0 +1,135 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typesinternal
+
+import (
+ "fmt"
+ "go/ast"
+ "go/types"
+ _ "unsafe"
+)
+
+// CallKind describes the function position of an [*ast.CallExpr].
+type CallKind int
+
+const (
+ CallStatic CallKind = iota // static call to known function
+ CallInterface // dynamic call through an interface method
+ CallDynamic // dynamic call of a func value
+ CallBuiltin // call to a builtin function
+ CallConversion // a conversion (not a call)
+)
+
+var callKindNames = []string{
+ "CallStatic",
+ "CallInterface",
+ "CallDynamic",
+ "CallBuiltin",
+ "CallConversion",
+}
+
+func (k CallKind) String() string {
+ if i := int(k); i >= 0 && i < len(callKindNames) {
+ return callKindNames[i]
+ }
+ return fmt.Sprintf("typeutil.CallKind(%d)", k)
+}
+
+// ClassifyCall classifies the function position of a call expression ([*ast.CallExpr]).
+// It distinguishes among true function calls, calls to builtins, and type conversions,
+// and further classifies function calls as static calls (where the function is known),
+// dynamic interface calls, and other dynamic calls.
+//
+// For the declarations:
+//
+// func f() {}
+// func g[T any]() {}
+// var v func()
+// var s []func()
+// type I interface { M() }
+// var i I
+//
+// ClassifyCall returns the following:
+//
+// f() CallStatic
+// g[int]() CallStatic
+// i.M() CallInterface
+// min(1, 2) CallBuiltin
+// v() CallDynamic
+// s[0]() CallDynamic
+// int(x) CallConversion
+// []byte("") CallConversion
+func ClassifyCall(info *types.Info, call *ast.CallExpr) CallKind {
+ if info.Types == nil {
+ panic("ClassifyCall: info.Types is nil")
+ }
+ if info.Types[call.Fun].IsType() {
+ return CallConversion
+ }
+ obj := info.Uses[UsedIdent(info, call.Fun)]
+ // Classify the call by the type of the object, if any.
+ switch obj := obj.(type) {
+ case *types.Builtin:
+ return CallBuiltin
+ case *types.Func:
+ if interfaceMethod(obj) {
+ return CallInterface
+ }
+ return CallStatic
+ default:
+ return CallDynamic
+ }
+}
+
+// UsedIdent returns the identifier such that info.Uses[UsedIdent(info, e)]
+// is the [types.Object] used by e, if any.
+//
+// If e is one of various forms of reference:
+//
+// f, c, v, T lexical reference
+// pkg.X qualified identifier
+// f[T] or pkg.F[K,V] instantiations of the above kinds
+// expr.f field or method value selector
+// T.f method expression selector
+//
+// UsedIdent returns the identifier whose is associated value in [types.Info.Uses]
+// is the object to which it refers.
+//
+// For the declarations:
+//
+// func F[T any] {...}
+// type I interface { M() }
+// var (
+// x int
+// s struct { f int }
+// a []int
+// i I
+// )
+//
+// UsedIdent returns the following:
+//
+// Expr UsedIdent
+// x x
+// s.f f
+// F[int] F
+// i.M M
+// I.M M
+// min min
+// int int
+// 1 nil
+// a[0] nil
+// []byte nil
+//
+// Note: if e is an instantiated function or method, UsedIdent returns
+// the corresponding generic function or method on the generic type.
+func UsedIdent(info *types.Info, e ast.Expr) *ast.Ident {
+ return usedIdent(info, e)
+}
+
+//go:linkname usedIdent golang.org/x/tools/go/types/typeutil.usedIdent
+func usedIdent(info *types.Info, e ast.Expr) *ast.Ident
+
+//go:linkname interfaceMethod golang.org/x/tools/go/types/typeutil.interfaceMethod
+func interfaceMethod(f *types.Func) bool
diff --git a/vendor/golang.org/x/tools/internal/typesinternal/errorcode.go b/vendor/golang.org/x/tools/internal/typesinternal/errorcode.go
index 131caab2..235a6def 100644
--- a/vendor/golang.org/x/tools/internal/typesinternal/errorcode.go
+++ b/vendor/golang.org/x/tools/internal/typesinternal/errorcode.go
@@ -966,7 +966,7 @@ const (
// var _ = string(x)
InvalidConversion
- // InvalidUntypedConversion occurs when an there is no valid implicit
+ // InvalidUntypedConversion occurs when there is no valid implicit
// conversion from an untyped value satisfying the type constraints of the
// context in which it is used.
//
diff --git a/vendor/golang.org/x/tools/internal/typesinternal/qualifier.go b/vendor/golang.org/x/tools/internal/typesinternal/qualifier.go
new file mode 100644
index 00000000..b64f714e
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/typesinternal/qualifier.go
@@ -0,0 +1,46 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typesinternal
+
+import (
+ "go/ast"
+ "go/types"
+ "strconv"
+)
+
+// FileQualifier returns a [types.Qualifier] function that qualifies
+// imported symbols appropriately based on the import environment of a given
+// file.
+// If the same package is imported multiple times, the last appearance is
+// recorded.
+func FileQualifier(f *ast.File, pkg *types.Package) types.Qualifier {
+ // Construct mapping of import paths to their defined names.
+ // It is only necessary to look at renaming imports.
+ imports := make(map[string]string)
+ for _, imp := range f.Imports {
+ if imp.Name != nil && imp.Name.Name != "_" {
+ path, _ := strconv.Unquote(imp.Path.Value)
+ imports[path] = imp.Name.Name
+ }
+ }
+
+ // Define qualifier to replace full package paths with names of the imports.
+ return func(p *types.Package) string {
+ if p == nil || p == pkg {
+ return ""
+ }
+
+ if name, ok := imports[p.Path()]; ok {
+ if name == "." {
+ return ""
+ } else {
+ return name
+ }
+ }
+
+ // If there is no local renaming, fall back to the package name.
+ return p.Name()
+ }
+}
diff --git a/vendor/golang.org/x/tools/internal/typesinternal/recv.go b/vendor/golang.org/x/tools/internal/typesinternal/recv.go
index ba6f4f4e..8352ea76 100644
--- a/vendor/golang.org/x/tools/internal/typesinternal/recv.go
+++ b/vendor/golang.org/x/tools/internal/typesinternal/recv.go
@@ -11,6 +11,9 @@ import (
// ReceiverNamed returns the named type (if any) associated with the
// type of recv, which may be of the form N or *N, or aliases thereof.
// It also reports whether a Pointer was present.
+//
+// The named result may be nil if recv is from a method on an
+// anonymous interface or struct types or in ill-typed code.
func ReceiverNamed(recv *types.Var) (isPtr bool, named *types.Named) {
t := recv.Type()
if ptr, ok := types.Unalias(t).(*types.Pointer); ok {
diff --git a/vendor/golang.org/x/tools/internal/typesinternal/types.go b/vendor/golang.org/x/tools/internal/typesinternal/types.go
index df3ea521..cc244689 100644
--- a/vendor/golang.org/x/tools/internal/typesinternal/types.go
+++ b/vendor/golang.org/x/tools/internal/typesinternal/types.go
@@ -7,6 +7,7 @@
package typesinternal
import (
+ "go/ast"
"go/token"
"go/types"
"reflect"
@@ -32,12 +33,14 @@ func SetUsesCgo(conf *types.Config) bool {
return true
}
-// ReadGo116ErrorData extracts additional information from types.Error values
+// ErrorCodeStartEnd extracts additional information from types.Error values
// generated by Go version 1.16 and later: the error code, start position, and
// end position. If all positions are valid, start <= err.Pos <= end.
//
// If the data could not be read, the final result parameter will be false.
-func ReadGo116ErrorData(err types.Error) (code ErrorCode, start, end token.Pos, ok bool) {
+//
+// TODO(adonovan): eliminate start/end when proposal #71803 is accepted.
+func ErrorCodeStartEnd(err types.Error) (code ErrorCode, start, end token.Pos, ok bool) {
var data [3]int
// By coincidence all of these fields are ints, which simplifies things.
v := reflect.ValueOf(err)
@@ -82,6 +85,7 @@ func NameRelativeTo(pkg *types.Package) types.Qualifier {
type NamedOrAlias interface {
types.Type
Obj() *types.TypeName
+ // TODO(hxjiang): add method TypeArgs() *types.TypeList after stop supporting go1.22.
}
// TypeParams is a light shim around t.TypeParams().
@@ -119,3 +123,22 @@ func Origin(t NamedOrAlias) NamedOrAlias {
}
return t
}
+
+// IsPackageLevel reports whether obj is a package-level symbol.
+func IsPackageLevel(obj types.Object) bool {
+ return obj.Pkg() != nil && obj.Parent() == obj.Pkg().Scope()
+}
+
+// NewTypesInfo returns a *types.Info with all maps populated.
+func NewTypesInfo() *types.Info {
+ return &types.Info{
+ Types: map[ast.Expr]types.TypeAndValue{},
+ Instances: map[*ast.Ident]types.Instance{},
+ Defs: map[*ast.Ident]types.Object{},
+ Uses: map[*ast.Ident]types.Object{},
+ Implicits: map[ast.Node]types.Object{},
+ Selections: map[*ast.SelectorExpr]*types.Selection{},
+ Scopes: map[ast.Node]*types.Scope{},
+ FileVersions: map[*ast.File]string{},
+ }
+}
diff --git a/vendor/golang.org/x/tools/internal/typesinternal/varkind.go b/vendor/golang.org/x/tools/internal/typesinternal/varkind.go
new file mode 100644
index 00000000..e5da0495
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/typesinternal/varkind.go
@@ -0,0 +1,40 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typesinternal
+
+// TODO(adonovan): when CL 645115 lands, define the go1.25 version of
+// this API that actually does something.
+
+import "go/types"
+
+type VarKind uint8
+
+const (
+ _ VarKind = iota // (not meaningful)
+ PackageVar // a package-level variable
+ LocalVar // a local variable
+ RecvVar // a method receiver variable
+ ParamVar // a function parameter variable
+ ResultVar // a function result variable
+ FieldVar // a struct field
+)
+
+func (kind VarKind) String() string {
+ return [...]string{
+ 0: "VarKind(0)",
+ PackageVar: "PackageVar",
+ LocalVar: "LocalVar",
+ RecvVar: "RecvVar",
+ ParamVar: "ParamVar",
+ ResultVar: "ResultVar",
+ FieldVar: "FieldVar",
+ }[kind]
+}
+
+// GetVarKind returns an invalid VarKind.
+func GetVarKind(v *types.Var) VarKind { return 0 }
+
+// SetVarKind has no effect.
+func SetVarKind(v *types.Var, kind VarKind) {}
diff --git a/vendor/golang.org/x/tools/internal/typesinternal/zerovalue.go b/vendor/golang.org/x/tools/internal/typesinternal/zerovalue.go
new file mode 100644
index 00000000..d272949c
--- /dev/null
+++ b/vendor/golang.org/x/tools/internal/typesinternal/zerovalue.go
@@ -0,0 +1,392 @@
+// Copyright 2024 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typesinternal
+
+import (
+ "fmt"
+ "go/ast"
+ "go/token"
+ "go/types"
+ "strings"
+)
+
+// ZeroString returns the string representation of the zero value for any type t.
+// The boolean result indicates whether the type is or contains an invalid type
+// or a non-basic (constraint) interface type.
+//
+// Even for invalid input types, ZeroString may return a partially correct
+// string representation. The caller should use the returned isValid boolean
+// to determine the validity of the expression.
+//
+// When assigning to a wider type (such as 'any'), it's the caller's
+// responsibility to handle any necessary type conversions.
+//
+// This string can be used on the right-hand side of an assignment where the
+// left-hand side has that explicit type.
+// References to named types are qualified by an appropriate (optional)
+// qualifier function.
+// Exception: This does not apply to tuples. Their string representation is
+// informational only and cannot be used in an assignment.
+//
+// See [ZeroExpr] for a variant that returns an [ast.Expr].
+func ZeroString(t types.Type, qual types.Qualifier) (_ string, isValid bool) {
+ switch t := t.(type) {
+ case *types.Basic:
+ switch {
+ case t.Info()&types.IsBoolean != 0:
+ return "false", true
+ case t.Info()&types.IsNumeric != 0:
+ return "0", true
+ case t.Info()&types.IsString != 0:
+ return `""`, true
+ case t.Kind() == types.UnsafePointer:
+ fallthrough
+ case t.Kind() == types.UntypedNil:
+ return "nil", true
+ case t.Kind() == types.Invalid:
+ return "invalid", false
+ default:
+ panic(fmt.Sprintf("ZeroString for unexpected type %v", t))
+ }
+
+ case *types.Pointer, *types.Slice, *types.Chan, *types.Map, *types.Signature:
+ return "nil", true
+
+ case *types.Interface:
+ if !t.IsMethodSet() {
+ return "invalid", false
+ }
+ return "nil", true
+
+ case *types.Named:
+ switch under := t.Underlying().(type) {
+ case *types.Struct, *types.Array:
+ return types.TypeString(t, qual) + "{}", true
+ default:
+ return ZeroString(under, qual)
+ }
+
+ case *types.Alias:
+ switch t.Underlying().(type) {
+ case *types.Struct, *types.Array:
+ return types.TypeString(t, qual) + "{}", true
+ default:
+ // A type parameter can have alias but alias type's underlying type
+ // can never be a type parameter.
+ // Use types.Unalias to preserve the info of type parameter instead
+ // of call Underlying() going right through and get the underlying
+ // type of the type parameter which is always an interface.
+ return ZeroString(types.Unalias(t), qual)
+ }
+
+ case *types.Array, *types.Struct:
+ return types.TypeString(t, qual) + "{}", true
+
+ case *types.TypeParam:
+ // Assumes func new is not shadowed.
+ return "*new(" + types.TypeString(t, qual) + ")", true
+
+ case *types.Tuple:
+ // Tuples are not normal values.
+ // We are currently format as "(t[0], ..., t[n])". Could be something else.
+ isValid := true
+ components := make([]string, t.Len())
+ for i := 0; i < t.Len(); i++ {
+ comp, ok := ZeroString(t.At(i).Type(), qual)
+
+ components[i] = comp
+ isValid = isValid && ok
+ }
+ return "(" + strings.Join(components, ", ") + ")", isValid
+
+ case *types.Union:
+ // Variables of these types cannot be created, so it makes
+ // no sense to ask for their zero value.
+ panic(fmt.Sprintf("invalid type for a variable: %v", t))
+
+ default:
+ panic(t) // unreachable.
+ }
+}
+
+// ZeroExpr returns the ast.Expr representation of the zero value for any type t.
+// The boolean result indicates whether the type is or contains an invalid type
+// or a non-basic (constraint) interface type.
+//
+// Even for invalid input types, ZeroExpr may return a partially correct ast.Expr
+// representation. The caller should use the returned isValid boolean to determine
+// the validity of the expression.
+//
+// This function is designed for types suitable for variables and should not be
+// used with Tuple or Union types.References to named types are qualified by an
+// appropriate (optional) qualifier function.
+//
+// See [ZeroString] for a variant that returns a string.
+func ZeroExpr(t types.Type, qual types.Qualifier) (_ ast.Expr, isValid bool) {
+ switch t := t.(type) {
+ case *types.Basic:
+ switch {
+ case t.Info()&types.IsBoolean != 0:
+ return &ast.Ident{Name: "false"}, true
+ case t.Info()&types.IsNumeric != 0:
+ return &ast.BasicLit{Kind: token.INT, Value: "0"}, true
+ case t.Info()&types.IsString != 0:
+ return &ast.BasicLit{Kind: token.STRING, Value: `""`}, true
+ case t.Kind() == types.UnsafePointer:
+ fallthrough
+ case t.Kind() == types.UntypedNil:
+ return ast.NewIdent("nil"), true
+ case t.Kind() == types.Invalid:
+ return &ast.BasicLit{Kind: token.STRING, Value: `"invalid"`}, false
+ default:
+ panic(fmt.Sprintf("ZeroExpr for unexpected type %v", t))
+ }
+
+ case *types.Pointer, *types.Slice, *types.Chan, *types.Map, *types.Signature:
+ return ast.NewIdent("nil"), true
+
+ case *types.Interface:
+ if !t.IsMethodSet() {
+ return &ast.BasicLit{Kind: token.STRING, Value: `"invalid"`}, false
+ }
+ return ast.NewIdent("nil"), true
+
+ case *types.Named:
+ switch under := t.Underlying().(type) {
+ case *types.Struct, *types.Array:
+ return &ast.CompositeLit{
+ Type: TypeExpr(t, qual),
+ }, true
+ default:
+ return ZeroExpr(under, qual)
+ }
+
+ case *types.Alias:
+ switch t.Underlying().(type) {
+ case *types.Struct, *types.Array:
+ return &ast.CompositeLit{
+ Type: TypeExpr(t, qual),
+ }, true
+ default:
+ return ZeroExpr(types.Unalias(t), qual)
+ }
+
+ case *types.Array, *types.Struct:
+ return &ast.CompositeLit{
+ Type: TypeExpr(t, qual),
+ }, true
+
+ case *types.TypeParam:
+ return &ast.StarExpr{ // *new(T)
+ X: &ast.CallExpr{
+ // Assumes func new is not shadowed.
+ Fun: ast.NewIdent("new"),
+ Args: []ast.Expr{
+ ast.NewIdent(t.Obj().Name()),
+ },
+ },
+ }, true
+
+ case *types.Tuple:
+ // Unlike ZeroString, there is no ast.Expr can express tuple by
+ // "(t[0], ..., t[n])".
+ panic(fmt.Sprintf("invalid type for a variable: %v", t))
+
+ case *types.Union:
+ // Variables of these types cannot be created, so it makes
+ // no sense to ask for their zero value.
+ panic(fmt.Sprintf("invalid type for a variable: %v", t))
+
+ default:
+ panic(t) // unreachable.
+ }
+}
+
+// IsZeroExpr uses simple syntactic heuristics to report whether expr
+// is a obvious zero value, such as 0, "", nil, or false.
+// It cannot do better without type information.
+func IsZeroExpr(expr ast.Expr) bool {
+ switch e := expr.(type) {
+ case *ast.BasicLit:
+ return e.Value == "0" || e.Value == `""`
+ case *ast.Ident:
+ return e.Name == "nil" || e.Name == "false"
+ default:
+ return false
+ }
+}
+
+// TypeExpr returns syntax for the specified type. References to named types
+// are qualified by an appropriate (optional) qualifier function.
+// It may panic for types such as Tuple or Union.
+func TypeExpr(t types.Type, qual types.Qualifier) ast.Expr {
+ switch t := t.(type) {
+ case *types.Basic:
+ switch t.Kind() {
+ case types.UnsafePointer:
+ return &ast.SelectorExpr{X: ast.NewIdent(qual(types.NewPackage("unsafe", "unsafe"))), Sel: ast.NewIdent("Pointer")}
+ default:
+ return ast.NewIdent(t.Name())
+ }
+
+ case *types.Pointer:
+ return &ast.UnaryExpr{
+ Op: token.MUL,
+ X: TypeExpr(t.Elem(), qual),
+ }
+
+ case *types.Array:
+ return &ast.ArrayType{
+ Len: &ast.BasicLit{
+ Kind: token.INT,
+ Value: fmt.Sprintf("%d", t.Len()),
+ },
+ Elt: TypeExpr(t.Elem(), qual),
+ }
+
+ case *types.Slice:
+ return &ast.ArrayType{
+ Elt: TypeExpr(t.Elem(), qual),
+ }
+
+ case *types.Map:
+ return &ast.MapType{
+ Key: TypeExpr(t.Key(), qual),
+ Value: TypeExpr(t.Elem(), qual),
+ }
+
+ case *types.Chan:
+ dir := ast.ChanDir(t.Dir())
+ if t.Dir() == types.SendRecv {
+ dir = ast.SEND | ast.RECV
+ }
+ return &ast.ChanType{
+ Dir: dir,
+ Value: TypeExpr(t.Elem(), qual),
+ }
+
+ case *types.Signature:
+ var params []*ast.Field
+ for i := 0; i < t.Params().Len(); i++ {
+ params = append(params, &ast.Field{
+ Type: TypeExpr(t.Params().At(i).Type(), qual),
+ Names: []*ast.Ident{
+ {
+ Name: t.Params().At(i).Name(),
+ },
+ },
+ })
+ }
+ if t.Variadic() {
+ last := params[len(params)-1]
+ last.Type = &ast.Ellipsis{Elt: last.Type.(*ast.ArrayType).Elt}
+ }
+ var returns []*ast.Field
+ for i := 0; i < t.Results().Len(); i++ {
+ returns = append(returns, &ast.Field{
+ Type: TypeExpr(t.Results().At(i).Type(), qual),
+ })
+ }
+ return &ast.FuncType{
+ Params: &ast.FieldList{
+ List: params,
+ },
+ Results: &ast.FieldList{
+ List: returns,
+ },
+ }
+
+ case *types.TypeParam:
+ pkgName := qual(t.Obj().Pkg())
+ if pkgName == "" || t.Obj().Pkg() == nil {
+ return ast.NewIdent(t.Obj().Name())
+ }
+ return &ast.SelectorExpr{
+ X: ast.NewIdent(pkgName),
+ Sel: ast.NewIdent(t.Obj().Name()),
+ }
+
+ // types.TypeParam also implements interface NamedOrAlias. To differentiate,
+ // case TypeParam need to be present before case NamedOrAlias.
+ // TODO(hxjiang): remove this comment once TypeArgs() is added to interface
+ // NamedOrAlias.
+ case NamedOrAlias:
+ var expr ast.Expr = ast.NewIdent(t.Obj().Name())
+ if pkgName := qual(t.Obj().Pkg()); pkgName != "." && pkgName != "" {
+ expr = &ast.SelectorExpr{
+ X: ast.NewIdent(pkgName),
+ Sel: expr.(*ast.Ident),
+ }
+ }
+
+ // TODO(hxjiang): call t.TypeArgs after adding method TypeArgs() to
+ // typesinternal.NamedOrAlias.
+ if hasTypeArgs, ok := t.(interface{ TypeArgs() *types.TypeList }); ok {
+ if typeArgs := hasTypeArgs.TypeArgs(); typeArgs != nil && typeArgs.Len() > 0 {
+ var indices []ast.Expr
+ for i := range typeArgs.Len() {
+ indices = append(indices, TypeExpr(typeArgs.At(i), qual))
+ }
+ expr = &ast.IndexListExpr{
+ X: expr,
+ Indices: indices,
+ }
+ }
+ }
+
+ return expr
+
+ case *types.Struct:
+ return ast.NewIdent(t.String())
+
+ case *types.Interface:
+ return ast.NewIdent(t.String())
+
+ case *types.Union:
+ if t.Len() == 0 {
+ panic("Union type should have at least one term")
+ }
+ // Same as go/ast, the return expression will put last term in the
+ // Y field at topmost level of BinaryExpr.
+ // For union of type "float32 | float64 | int64", the structure looks
+ // similar to:
+ // {
+ // X: {
+ // X: float32,
+ // Op: |
+ // Y: float64,
+ // }
+ // Op: |,
+ // Y: int64,
+ // }
+ var union ast.Expr
+ for i := range t.Len() {
+ term := t.Term(i)
+ termExpr := TypeExpr(term.Type(), qual)
+ if term.Tilde() {
+ termExpr = &ast.UnaryExpr{
+ Op: token.TILDE,
+ X: termExpr,
+ }
+ }
+ if i == 0 {
+ union = termExpr
+ } else {
+ union = &ast.BinaryExpr{
+ X: union,
+ Op: token.OR,
+ Y: termExpr,
+ }
+ }
+ }
+ return union
+
+ case *types.Tuple:
+ panic("invalid input type types.Tuple")
+
+ default:
+ panic("unreachable")
+ }
+}
diff --git a/vendor/golang.org/x/tools/internal/versions/constraint.go b/vendor/golang.org/x/tools/internal/versions/constraint.go
deleted file mode 100644
index 179063d4..00000000
--- a/vendor/golang.org/x/tools/internal/versions/constraint.go
+++ /dev/null
@@ -1,13 +0,0 @@
-// Copyright 2024 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package versions
-
-import "go/build/constraint"
-
-// ConstraintGoVersion is constraint.GoVersion (if built with go1.21+).
-// Otherwise nil.
-//
-// Deprecate once x/tools is after go1.21.
-var ConstraintGoVersion func(x constraint.Expr) string
diff --git a/vendor/golang.org/x/tools/internal/versions/constraint_go121.go b/vendor/golang.org/x/tools/internal/versions/constraint_go121.go
deleted file mode 100644
index 38011407..00000000
--- a/vendor/golang.org/x/tools/internal/versions/constraint_go121.go
+++ /dev/null
@@ -1,14 +0,0 @@
-// Copyright 2024 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-//go:build go1.21
-// +build go1.21
-
-package versions
-
-import "go/build/constraint"
-
-func init() {
- ConstraintGoVersion = constraint.GoVersion
-}
diff --git a/vendor/modules.txt b/vendor/modules.txt
index 5fa8a653..e604349b 100644
--- a/vendor/modules.txt
+++ b/vendor/modules.txt
@@ -380,8 +380,8 @@ github.com/google/gnostic-models/extensions
github.com/google/gnostic-models/jsonschema
github.com/google/gnostic-models/openapiv2
github.com/google/gnostic-models/openapiv3
-# github.com/google/go-cmp v0.6.0
-## explicit; go 1.13
+# github.com/google/go-cmp v0.7.0
+## explicit; go 1.21
github.com/google/go-cmp/cmp
github.com/google/go-cmp/cmp/internal/diff
github.com/google/go-cmp/cmp/internal/flags
@@ -452,12 +452,13 @@ github.com/josharian/intern
# github.com/json-iterator/go v1.1.12
## explicit; go 1.12
github.com/json-iterator/go
-# github.com/klauspost/compress v1.17.11
-## explicit; go 1.21
+# github.com/klauspost/compress v1.18.0
+## explicit; go 1.22
github.com/klauspost/compress
github.com/klauspost/compress/fse
github.com/klauspost/compress/huff0
github.com/klauspost/compress/internal/cpuinfo
+github.com/klauspost/compress/internal/le
github.com/klauspost/compress/internal/snapref
github.com/klauspost/compress/zstd
github.com/klauspost/compress/zstd/internal/xxhash
@@ -478,8 +479,8 @@ github.com/mitchellh/go-wordwrap
# github.com/mitchellh/hashstructure/v2 v2.0.2
## explicit; go 1.14
github.com/mitchellh/hashstructure/v2
-# github.com/moby/buildkit v0.20.2
-## explicit; go 1.22.0
+# github.com/moby/buildkit v0.21.0-rc1
+## explicit; go 1.23.0
github.com/moby/buildkit/api/services/control
github.com/moby/buildkit/api/types
github.com/moby/buildkit/client
@@ -555,6 +556,7 @@ github.com/moby/buildkit/util/stack
github.com/moby/buildkit/util/system
github.com/moby/buildkit/util/testutil
github.com/moby/buildkit/util/testutil/dockerd
+github.com/moby/buildkit/util/testutil/dockerd/client
github.com/moby/buildkit/util/testutil/integration
github.com/moby/buildkit/util/testutil/workers
github.com/moby/buildkit/util/tracing
@@ -586,7 +588,7 @@ github.com/moby/sys/sequential
# github.com/moby/sys/signal v0.7.1
## explicit; go 1.17
github.com/moby/sys/signal
-# github.com/moby/sys/user v0.3.0
+# github.com/moby/sys/user v0.4.0
## explicit; go 1.17
github.com/moby/sys/user
# github.com/moby/sys/userns v0.1.0
@@ -614,7 +616,7 @@ github.com/mxk/go-flowrate/flowrate
# github.com/opencontainers/go-digest v1.0.0
## explicit; go 1.13
github.com/opencontainers/go-digest
-# github.com/opencontainers/image-spec v1.1.0
+# github.com/opencontainers/image-spec v1.1.1
## explicit; go 1.18
github.com/opencontainers/image-spec/specs-go
github.com/opencontainers/image-spec/specs-go/v1
@@ -697,10 +699,10 @@ github.com/stretchr/testify/assert/yaml
github.com/stretchr/testify/require
# github.com/theupdateframework/notary v0.7.0
## explicit; go 1.12
-# github.com/tonistiigi/dchapes-mode v0.0.0-20241001053921-ca0759fec205
+# github.com/tonistiigi/dchapes-mode v0.0.0-20250318174251-73d941a28323
## explicit; go 1.21
github.com/tonistiigi/dchapes-mode
-# github.com/tonistiigi/fsutil v0.0.0-20250113203817-b14e27f4135a
+# github.com/tonistiigi/fsutil v0.0.0-20250318190121-d73a4b3b8a7e
## explicit; go 1.21
github.com/tonistiigi/fsutil
github.com/tonistiigi/fsutil/copy
@@ -844,8 +846,8 @@ go.opentelemetry.io/proto/otlp/common/v1
go.opentelemetry.io/proto/otlp/metrics/v1
go.opentelemetry.io/proto/otlp/resource/v1
go.opentelemetry.io/proto/otlp/trace/v1
-# golang.org/x/crypto v0.31.0
-## explicit; go 1.20
+# golang.org/x/crypto v0.37.0
+## explicit; go 1.23.0
golang.org/x/crypto/bcrypt
golang.org/x/crypto/blowfish
golang.org/x/crypto/chacha20
@@ -856,24 +858,25 @@ golang.org/x/crypto/nacl/sign
golang.org/x/crypto/ssh
golang.org/x/crypto/ssh/agent
golang.org/x/crypto/ssh/internal/bcrypt_pbkdf
-# golang.org/x/exp v0.0.0-20241108190413-2d47ceb2692f
-## explicit; go 1.22.0
+# golang.org/x/exp v0.0.0-20250408133849-7e4ce0ab07d0
+## explicit; go 1.23.0
golang.org/x/exp/constraints
golang.org/x/exp/maps
golang.org/x/exp/slices
-# golang.org/x/mod v0.22.0
-## explicit; go 1.22.0
+# golang.org/x/mod v0.24.0
+## explicit; go 1.23.0
golang.org/x/mod/internal/lazyregexp
golang.org/x/mod/module
golang.org/x/mod/semver
-# golang.org/x/net v0.33.0
-## explicit; go 1.18
+# golang.org/x/net v0.39.0
+## explicit; go 1.23.0
golang.org/x/net/html
golang.org/x/net/html/atom
golang.org/x/net/http/httpguts
golang.org/x/net/http2
golang.org/x/net/http2/hpack
golang.org/x/net/idna
+golang.org/x/net/internal/httpcommon
golang.org/x/net/internal/socks
golang.org/x/net/internal/timeseries
golang.org/x/net/proxy
@@ -883,22 +886,22 @@ golang.org/x/net/websocket
## explicit; go 1.18
golang.org/x/oauth2
golang.org/x/oauth2/internal
-# golang.org/x/sync v0.10.0
-## explicit; go 1.18
+# golang.org/x/sync v0.13.0
+## explicit; go 1.23.0
golang.org/x/sync/errgroup
golang.org/x/sync/semaphore
-# golang.org/x/sys v0.29.0
-## explicit; go 1.18
+# golang.org/x/sys v0.32.0
+## explicit; go 1.23.0
golang.org/x/sys/cpu
golang.org/x/sys/plan9
golang.org/x/sys/unix
golang.org/x/sys/windows
golang.org/x/sys/windows/registry
-# golang.org/x/term v0.27.0
-## explicit; go 1.18
+# golang.org/x/term v0.31.0
+## explicit; go 1.23.0
golang.org/x/term
-# golang.org/x/text v0.21.0
-## explicit; go 1.18
+# golang.org/x/text v0.24.0
+## explicit; go 1.23.0
golang.org/x/text/cases
golang.org/x/text/internal
golang.org/x/text/internal/language
@@ -910,11 +913,11 @@ golang.org/x/text/transform
golang.org/x/text/unicode/bidi
golang.org/x/text/unicode/norm
golang.org/x/text/width
-# golang.org/x/time v0.6.0
-## explicit; go 1.18
+# golang.org/x/time v0.11.0
+## explicit; go 1.23.0
golang.org/x/time/rate
-# golang.org/x/tools v0.27.0
-## explicit; go 1.22.0
+# golang.org/x/tools v0.32.0
+## explicit; go 1.23.0
golang.org/x/tools/cmd/stringer
golang.org/x/tools/go/gcexportdata
golang.org/x/tools/go/packages