vendor: github.com/moby/buildkit v0.21.0-rc1

Signed-off-by: Jonathan A. Sternberg <jonathan.sternberg@docker.com>
This commit is contained in:
Jonathan A. Sternberg 2025-04-09 10:28:03 -05:00
parent a34cdff84e
commit 8fb1157b5f
No known key found for this signature in database
GPG Key ID: 6603D4B96394F6B1
221 changed files with 6530 additions and 3986 deletions

34
go.mod
View File

@ -29,12 +29,12 @@ require (
github.com/hashicorp/hcl/v2 v2.23.0 github.com/hashicorp/hcl/v2 v2.23.0
github.com/in-toto/in-toto-golang v0.5.0 github.com/in-toto/in-toto-golang v0.5.0
github.com/mitchellh/hashstructure/v2 v2.0.2 github.com/mitchellh/hashstructure/v2 v2.0.2
github.com/moby/buildkit v0.20.2 github.com/moby/buildkit v0.21.0-rc1
github.com/moby/sys/mountinfo v0.7.2 github.com/moby/sys/mountinfo v0.7.2
github.com/moby/sys/signal v0.7.1 github.com/moby/sys/signal v0.7.1
github.com/morikuni/aec v1.0.0 github.com/morikuni/aec v1.0.0
github.com/opencontainers/go-digest v1.0.0 github.com/opencontainers/go-digest v1.0.0
github.com/opencontainers/image-spec v1.1.0 github.com/opencontainers/image-spec v1.1.1
github.com/pelletier/go-toml v1.9.5 github.com/pelletier/go-toml v1.9.5
github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c github.com/pkg/browser v0.0.0-20240102092130-5ac0b6a4141c
github.com/pkg/errors v0.9.1 github.com/pkg/errors v0.9.1
@ -44,7 +44,7 @@ require (
github.com/spf13/cobra v1.9.1 github.com/spf13/cobra v1.9.1
github.com/spf13/pflag v1.0.6 github.com/spf13/pflag v1.0.6
github.com/stretchr/testify v1.10.0 github.com/stretchr/testify v1.10.0
github.com/tonistiigi/fsutil v0.0.0-20250113203817-b14e27f4135a github.com/tonistiigi/fsutil v0.0.0-20250318190121-d73a4b3b8a7e
github.com/tonistiigi/go-csvvalue v0.0.0-20240710180619-ddb21b71c0b4 github.com/tonistiigi/go-csvvalue v0.0.0-20240710180619-ddb21b71c0b4
github.com/tonistiigi/jaeger-ui-rest v0.0.0-20250408171107-3dd17559e117 github.com/tonistiigi/jaeger-ui-rest v0.0.0-20250408171107-3dd17559e117
github.com/zclconf/go-cty v1.16.0 github.com/zclconf/go-cty v1.16.0
@ -53,11 +53,11 @@ require (
go.opentelemetry.io/otel/metric v1.31.0 go.opentelemetry.io/otel/metric v1.31.0
go.opentelemetry.io/otel/sdk v1.31.0 go.opentelemetry.io/otel/sdk v1.31.0
go.opentelemetry.io/otel/trace v1.31.0 go.opentelemetry.io/otel/trace v1.31.0
golang.org/x/mod v0.22.0 golang.org/x/mod v0.24.0
golang.org/x/sync v0.10.0 golang.org/x/sync v0.13.0
golang.org/x/sys v0.29.0 golang.org/x/sys v0.32.0
golang.org/x/term v0.27.0 golang.org/x/term v0.31.0
golang.org/x/text v0.21.0 golang.org/x/text v0.24.0
google.golang.org/genproto/googleapis/rpc v0.0.0-20241021214115-324edc3d5d38 google.golang.org/genproto/googleapis/rpc v0.0.0-20241021214115-324edc3d5d38
google.golang.org/grpc v1.69.4 google.golang.org/grpc v1.69.4
google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.5.1 google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.5.1
@ -111,7 +111,7 @@ require (
github.com/gogo/protobuf v1.3.2 // indirect github.com/gogo/protobuf v1.3.2 // indirect
github.com/golang/protobuf v1.5.4 // indirect github.com/golang/protobuf v1.5.4 // indirect
github.com/google/gnostic-models v0.6.8 // indirect github.com/google/gnostic-models v0.6.8 // indirect
github.com/google/go-cmp v0.6.0 // indirect github.com/google/go-cmp v0.7.0 // indirect
github.com/google/gofuzz v1.2.0 // indirect github.com/google/gofuzz v1.2.0 // indirect
github.com/gorilla/mux v1.8.1 // indirect github.com/gorilla/mux v1.8.1 // indirect
github.com/gorilla/websocket v1.5.0 // indirect github.com/gorilla/websocket v1.5.0 // indirect
@ -122,7 +122,7 @@ require (
github.com/inconshreveable/mousetrap v1.1.0 // indirect github.com/inconshreveable/mousetrap v1.1.0 // indirect
github.com/josharian/intern v1.0.0 // indirect github.com/josharian/intern v1.0.0 // indirect
github.com/json-iterator/go v1.1.12 // indirect github.com/json-iterator/go v1.1.12 // indirect
github.com/klauspost/compress v1.17.11 // indirect github.com/klauspost/compress v1.18.0 // indirect
github.com/mailru/easyjson v0.7.7 // indirect github.com/mailru/easyjson v0.7.7 // indirect
github.com/mattn/go-runewidth v0.0.15 // indirect github.com/mattn/go-runewidth v0.0.15 // indirect
github.com/mattn/go-shellwords v1.0.12 // indirect github.com/mattn/go-shellwords v1.0.12 // indirect
@ -132,7 +132,7 @@ require (
github.com/moby/patternmatcher v0.6.0 // indirect github.com/moby/patternmatcher v0.6.0 // indirect
github.com/moby/spdystream v0.4.0 // indirect github.com/moby/spdystream v0.4.0 // indirect
github.com/moby/sys/sequential v0.6.0 // indirect github.com/moby/sys/sequential v0.6.0 // indirect
github.com/moby/sys/user v0.3.0 // indirect github.com/moby/sys/user v0.4.0 // indirect
github.com/moby/sys/userns v0.1.0 // indirect github.com/moby/sys/userns v0.1.0 // indirect
github.com/moby/term v0.5.2 // indirect github.com/moby/term v0.5.2 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
@ -149,7 +149,7 @@ require (
github.com/secure-systems-lab/go-securesystemslib v0.4.0 // indirect github.com/secure-systems-lab/go-securesystemslib v0.4.0 // indirect
github.com/shibumi/go-pathspec v1.3.0 // indirect github.com/shibumi/go-pathspec v1.3.0 // indirect
github.com/theupdateframework/notary v0.7.0 // indirect github.com/theupdateframework/notary v0.7.0 // indirect
github.com/tonistiigi/dchapes-mode v0.0.0-20241001053921-ca0759fec205 // indirect github.com/tonistiigi/dchapes-mode v0.0.0-20250318174251-73d941a28323 // indirect
github.com/tonistiigi/units v0.0.0-20180711220420-6950e57a87ea // indirect github.com/tonistiigi/units v0.0.0-20180711220420-6950e57a87ea // indirect
github.com/tonistiigi/vt100 v0.0.0-20240514184818-90bafcd6abab // indirect github.com/tonistiigi/vt100 v0.0.0-20240514184818-90bafcd6abab // indirect
github.com/x448/float16 v0.8.4 // indirect github.com/x448/float16 v0.8.4 // indirect
@ -167,12 +167,12 @@ require (
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.31.0 // indirect go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.31.0 // indirect
go.opentelemetry.io/otel/sdk/metric v1.31.0 // indirect go.opentelemetry.io/otel/sdk/metric v1.31.0 // indirect
go.opentelemetry.io/proto/otlp v1.3.1 // indirect go.opentelemetry.io/proto/otlp v1.3.1 // indirect
golang.org/x/crypto v0.31.0 // indirect golang.org/x/crypto v0.37.0 // indirect
golang.org/x/exp v0.0.0-20241108190413-2d47ceb2692f // indirect golang.org/x/exp v0.0.0-20250408133849-7e4ce0ab07d0 // indirect
golang.org/x/net v0.33.0 // indirect golang.org/x/net v0.39.0 // indirect
golang.org/x/oauth2 v0.23.0 // indirect golang.org/x/oauth2 v0.23.0 // indirect
golang.org/x/time v0.6.0 // indirect golang.org/x/time v0.11.0 // indirect
golang.org/x/tools v0.27.0 // indirect golang.org/x/tools v0.32.0 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20241021214115-324edc3d5d38 // indirect google.golang.org/genproto/googleapis/api v0.0.0-20241021214115-324edc3d5d38 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v2 v2.4.0 // indirect

68
go.sum
View File

@ -193,8 +193,8 @@ github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYu
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU= github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0=
github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
@ -245,8 +245,8 @@ github.com/juju/loggo v0.0.0-20190526231331-6e530bcce5d8/go.mod h1:vgyd7OREkbtVE
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8= github.com/kisielk/errcheck v1.5.0/go.mod h1:pFxgyoBC7bSaBwPgfKdkLd5X25qrDl4LWUI2bnpBCr8=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/klauspost/compress v1.17.11 h1:In6xLpyWOi1+C7tXUUWv2ot1QvBjxevKAaI6IXrJmUc= github.com/klauspost/compress v1.18.0 h1:c/Cqfb0r+Yi+JtIEq73FWXVkRonBlf0CRNYc8Zttxdo=
github.com/klauspost/compress v1.17.11/go.mod h1:pMDklpSncoRMuLFrf1W9Ss9KT+0rH90U12bZKk7uwG0= github.com/klauspost/compress v1.18.0/go.mod h1:2Pp+KzxcywXVXMr50+X0Q/Lsb43OQHYWRCY2AiWywWQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
@ -279,8 +279,8 @@ github.com/mitchellh/go-wordwrap v0.0.0-20150314170334-ad45545899c7/go.mod h1:ZX
github.com/mitchellh/hashstructure/v2 v2.0.2 h1:vGKWl0YJqUNxE8d+h8f6NJLcCJrgbhC4NcD46KavDd4= github.com/mitchellh/hashstructure/v2 v2.0.2 h1:vGKWl0YJqUNxE8d+h8f6NJLcCJrgbhC4NcD46KavDd4=
github.com/mitchellh/hashstructure/v2 v2.0.2/go.mod h1:MG3aRVU/N29oo/V/IhBX8GR/zz4kQkprJgF2EVszyDE= github.com/mitchellh/hashstructure/v2 v2.0.2/go.mod h1:MG3aRVU/N29oo/V/IhBX8GR/zz4kQkprJgF2EVszyDE=
github.com/mitchellh/mapstructure v0.0.0-20150613213606-2caf8efc9366/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v0.0.0-20150613213606-2caf8efc9366/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/moby/buildkit v0.20.2 h1:qIeR47eQ1tzI1rwz0on3Xx2enRw/1CKjFhoONVcTlMA= github.com/moby/buildkit v0.21.0-rc1 h1:QWTyHpHUtsyUMH0CH7QStisI/FmS9njRC1FK4vVYeaE=
github.com/moby/buildkit v0.20.2/go.mod h1:DhaF82FjwOElTftl0JUAJpH/SUIUx4UvcFncLeOtlDI= github.com/moby/buildkit v0.21.0-rc1/go.mod h1:coiVDxJmP1PD+79HAnTJvBMetLTdCws8gpWiYX2vcH8=
github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0=
github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo= github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo=
github.com/moby/locker v1.0.1 h1:fOXqR41zeveg4fFODix+1Ch4mj/gT0NE1XJbp/epuBg= github.com/moby/locker v1.0.1 h1:fOXqR41zeveg4fFODix+1Ch4mj/gT0NE1XJbp/epuBg=
@ -295,8 +295,8 @@ github.com/moby/sys/sequential v0.6.0 h1:qrx7XFUd/5DxtqcoH1h438hF5TmOvzC/lspjy7z
github.com/moby/sys/sequential v0.6.0/go.mod h1:uyv8EUTrca5PnDsdMGXhZe6CCe8U/UiTWd+lL+7b/Ko= github.com/moby/sys/sequential v0.6.0/go.mod h1:uyv8EUTrca5PnDsdMGXhZe6CCe8U/UiTWd+lL+7b/Ko=
github.com/moby/sys/signal v0.7.1 h1:PrQxdvxcGijdo6UXXo/lU/TvHUWyPhj7UOpSo8tuvk0= github.com/moby/sys/signal v0.7.1 h1:PrQxdvxcGijdo6UXXo/lU/TvHUWyPhj7UOpSo8tuvk0=
github.com/moby/sys/signal v0.7.1/go.mod h1:Se1VGehYokAkrSQwL4tDzHvETwUZlnY7S5XtQ50mQp8= github.com/moby/sys/signal v0.7.1/go.mod h1:Se1VGehYokAkrSQwL4tDzHvETwUZlnY7S5XtQ50mQp8=
github.com/moby/sys/user v0.3.0 h1:9ni5DlcW5an3SvRSx4MouotOygvzaXbaSrc/wGDFWPo= github.com/moby/sys/user v0.4.0 h1:jhcMKit7SA80hivmFJcbB1vqmw//wU61Zdui2eQXuMs=
github.com/moby/sys/user v0.3.0/go.mod h1:bG+tYYYJgaMtRKgEmuueC0hJEAZWwtIbZTB+85uoHjs= github.com/moby/sys/user v0.4.0/go.mod h1:bG+tYYYJgaMtRKgEmuueC0hJEAZWwtIbZTB+85uoHjs=
github.com/moby/sys/userns v0.1.0 h1:tVLXkFOxVu9A64/yh59slHVv9ahO9UIev4JZusOLG/g= github.com/moby/sys/userns v0.1.0 h1:tVLXkFOxVu9A64/yh59slHVv9ahO9UIev4JZusOLG/g=
github.com/moby/sys/userns v0.1.0/go.mod h1:IHUYgu/kao6N8YZlp9Cf444ySSvCmDlmzUcYfDHOl28= github.com/moby/sys/userns v0.1.0/go.mod h1:IHUYgu/kao6N8YZlp9Cf444ySSvCmDlmzUcYfDHOl28=
github.com/moby/term v0.5.2 h1:6qk3FJAFDs6i/q3W/pQ97SX192qKfZgGjCQqfCJkgzQ= github.com/moby/term v0.5.2 h1:6qk3FJAFDs6i/q3W/pQ97SX192qKfZgGjCQqfCJkgzQ=
@ -329,8 +329,8 @@ github.com/opencontainers/go-digest v0.0.0-20170106003457-a6d0ee40d420/go.mod h1
github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U= github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8Oi/yOhh5U=
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM= github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0= github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
github.com/opencontainers/image-spec v1.1.0 h1:8SG7/vwALn54lVB/0yZ/MMwhFrPYtpEHQb2IpWsCzug= github.com/opencontainers/image-spec v1.1.1 h1:y0fUlFfIZhPF1W537XOLg0/fcx6zcHCJwooC2xJA040=
github.com/opencontainers/image-spec v1.1.0/go.mod h1:W4s4sFTMaBeK1BQLXbG4AdM2szdn85PY75RI83NrTrM= github.com/opencontainers/image-spec v1.1.1/go.mod h1:qpqAh3Dmcf36wStyyWU+kCeDgrGnAve2nCC8+7h8Q0M=
github.com/opencontainers/runtime-spec v1.2.0 h1:z97+pHb3uELt/yiAWD691HNHQIF07bE7dzrbT927iTk= github.com/opencontainers/runtime-spec v1.2.0 h1:z97+pHb3uELt/yiAWD691HNHQIF07bE7dzrbT927iTk=
github.com/opencontainers/runtime-spec v1.2.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.2.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/opencontainers/selinux v1.11.1 h1:nHFvthhM0qY8/m+vfhJylliSshm8G1jJ2jDMcgULaH8= github.com/opencontainers/selinux v1.11.1 h1:nHFvthhM0qY8/m+vfhJylliSshm8G1jJ2jDMcgULaH8=
@ -415,10 +415,10 @@ github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOf
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/theupdateframework/notary v0.7.0 h1:QyagRZ7wlSpjT5N2qQAh/pN+DVqgekv4DzbAiAiEL3c= github.com/theupdateframework/notary v0.7.0 h1:QyagRZ7wlSpjT5N2qQAh/pN+DVqgekv4DzbAiAiEL3c=
github.com/theupdateframework/notary v0.7.0/go.mod h1:c9DRxcmhHmVLDay4/2fUYdISnHqbFDGRSlXPO0AhYWw= github.com/theupdateframework/notary v0.7.0/go.mod h1:c9DRxcmhHmVLDay4/2fUYdISnHqbFDGRSlXPO0AhYWw=
github.com/tonistiigi/dchapes-mode v0.0.0-20241001053921-ca0759fec205 h1:eUk79E1w8yMtXeHSzjKorxuC8qJOnyXQnLaJehxpJaI= github.com/tonistiigi/dchapes-mode v0.0.0-20250318174251-73d941a28323 h1:r0p7fK56l8WPequOaR3i9LBqfPtEdXIQbUTzT55iqT4=
github.com/tonistiigi/dchapes-mode v0.0.0-20241001053921-ca0759fec205/go.mod h1:3Iuxbr0P7D3zUzBMAZB+ois3h/et0shEz0qApgHYGpY= github.com/tonistiigi/dchapes-mode v0.0.0-20250318174251-73d941a28323/go.mod h1:3Iuxbr0P7D3zUzBMAZB+ois3h/et0shEz0qApgHYGpY=
github.com/tonistiigi/fsutil v0.0.0-20250113203817-b14e27f4135a h1:EfGw4G0x/8qXWgtcZ6KVaPS+wpWOQMaypczzP8ojkMY= github.com/tonistiigi/fsutil v0.0.0-20250318190121-d73a4b3b8a7e h1:AiXT0JHwQA52AEOVMsxRytSI9mdJSie5gUp6OQ1R8fU=
github.com/tonistiigi/fsutil v0.0.0-20250113203817-b14e27f4135a/go.mod h1:Dl/9oEjK7IqnjAm21Okx/XIxUCFJzvh+XdVHUlBwXTw= github.com/tonistiigi/fsutil v0.0.0-20250318190121-d73a4b3b8a7e/go.mod h1:BKdcez7BiVtBvIcef90ZPc6ebqIWr4JWD7+EvLm6J98=
github.com/tonistiigi/go-csvvalue v0.0.0-20240710180619-ddb21b71c0b4 h1:7I5c2Ig/5FgqkYOh/N87NzoyI9U15qUPXhDD8uCupv8= github.com/tonistiigi/go-csvvalue v0.0.0-20240710180619-ddb21b71c0b4 h1:7I5c2Ig/5FgqkYOh/N87NzoyI9U15qUPXhDD8uCupv8=
github.com/tonistiigi/go-csvvalue v0.0.0-20240710180619-ddb21b71c0b4/go.mod h1:278M4p8WsNh3n4a1eqiFcV2FGk7wE5fwUpUom9mK9lE= github.com/tonistiigi/go-csvvalue v0.0.0-20240710180619-ddb21b71c0b4/go.mod h1:278M4p8WsNh3n4a1eqiFcV2FGk7wE5fwUpUom9mK9lE=
github.com/tonistiigi/jaeger-ui-rest v0.0.0-20250408171107-3dd17559e117 h1:XFwyh2JZwR5aiKLXHX2C1n0v5F11dCJpyGL1W/Cpl3U= github.com/tonistiigi/jaeger-ui-rest v0.0.0-20250408171107-3dd17559e117 h1:XFwyh2JZwR5aiKLXHX2C1n0v5F11dCJpyGL1W/Cpl3U=
@ -490,14 +490,14 @@ golang.org/x/crypto v0.0.0-20200302210943-78000ba7a073/go.mod h1:LzIPMQfyMNhhGPh
golang.org/x/crypto v0.0.0-20200422194213-44a606286825/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200422194213-44a606286825/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20201117144127-c1f2f97bffc9/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I= golang.org/x/crypto v0.0.0-20201117144127-c1f2f97bffc9/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
golang.org/x/crypto v0.31.0 h1:ihbySMvVjLAeSH1IbfcRTkD/iNscyz8rGzjF/E5hV6U= golang.org/x/crypto v0.37.0 h1:kJNSjF/Xp7kU0iB2Z+9viTPMW4EqqsrywMXLJOOsXSE=
golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk= golang.org/x/crypto v0.37.0/go.mod h1:vg+k43peMZ0pUMhYmVAWysMK35e6ioLh3wB8ZCAfbVc=
golang.org/x/exp v0.0.0-20241108190413-2d47ceb2692f h1:XdNn9LlyWAhLVp6P/i8QYBW+hlyhrhei9uErw2B5GJo= golang.org/x/exp v0.0.0-20250408133849-7e4ce0ab07d0 h1:R84qjqJb5nVJMxqWYb3np9L5ZsaDtB+a39EqjV0JSUM=
golang.org/x/exp v0.0.0-20241108190413-2d47ceb2692f/go.mod h1:D5SMRVC3C2/4+F/DB1wZsLRnSNimn2Sp/NPsCrsv8ak= golang.org/x/exp v0.0.0-20250408133849-7e4ce0ab07d0/go.mod h1:S9Xr4PYopiDyqSyp5NjCrhFrqg6A5zA2E/iPHPhqnS8=
golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.22.0 h1:D4nJWe9zXqHOmWqj4VMOJhvzj7bEZg4wEYa759z1pH4= golang.org/x/mod v0.24.0 h1:ZfthKaKaT4NrhGVZHO1/WDTwGES4De8KtWO0SIbNJMU=
golang.org/x/mod v0.22.0/go.mod h1:6SkKJ3Xj0I0BrPOZoBy3bdMptDDU9oJrpohJ3eWZ1fY= golang.org/x/mod v0.24.0/go.mod h1:IXM97Txy2VM4PJ3gI61r1YEk/gAj6zAHN3AdZt6S9Ww=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
@ -505,8 +505,8 @@ golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLL
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.33.0 h1:74SYHlV8BIgHIFC/LrYkOGIwL19eTYXQ5wc6TBuO36I= golang.org/x/net v0.39.0 h1:ZCu7HMWDxpXpaiKdhzIfaltL9Lp31x/3fCP11bc6/fY=
golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4= golang.org/x/net v0.39.0/go.mod h1:X7NRbYVEA+ewNkCNyJ513WmMdQ3BineSwVtN2zD/d+E=
golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs= golang.org/x/oauth2 v0.23.0 h1:PbgcYx2W7i4LvjJWEbf0ngHV6qJYr86PkAV3bXdLEbs=
golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI= golang.org/x/oauth2 v0.23.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@ -515,8 +515,8 @@ golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ= golang.org/x/sync v0.13.0 h1:AauUjRAJ9OSnvULf/ARrrVywoJDy0YS2AwQ98I37610=
golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk= golang.org/x/sync v0.13.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
@ -529,24 +529,24 @@ golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.0.0-20220715151400-c0bba94af5f8/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg= golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU= golang.org/x/sys v0.32.0 h1:s77OFDvIQeibCmezSnk/q6iAfkdiQaJi4VzroCFrN20=
golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA= golang.org/x/sys v0.32.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw= golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.27.0 h1:WP60Sv1nlK1T6SupCHbXzSaN0b9wUmsPoRS9b61A23Q= golang.org/x/term v0.31.0 h1:erwDkOK1Msy6offm1mOgvspSkslFnIGsFnxOKoufg3o=
golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM= golang.org/x/term v0.31.0/go.mod h1:R4BeIy7D95HzImkxGkTW1UQTtP54tio2RyHz7PwK0aw=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo= golang.org/x/text v0.24.0 h1:dd5Bzh4yt5KYA8f9CJHCP4FB4D51c2c6JvN37xJJkJ0=
golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ= golang.org/x/text v0.24.0/go.mod h1:L8rBsPeo2pSS+xqN0d5u2ikmjtmoJbDBT1b7nHvFCdU=
golang.org/x/time v0.6.0 h1:eTDhh4ZXt5Qf0augr54TN6suAUudPcawVZeIAPU7D4U= golang.org/x/time v0.11.0 h1:/bpjEDfN9tkoN/ryeYHnv5hcMlc8ncjMcM4XBk5NWV0=
golang.org/x/time v0.6.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/time v0.11.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE=
golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA= golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.27.0 h1:qEKojBykQkQ4EynWy4S8Weg69NumxKdn40Fce3uc/8o= golang.org/x/tools v0.32.0 h1:Q7N1vhpkQv7ybVzLFtTjvQya2ewbwNDZzUgfXGqtMWU=
golang.org/x/tools v0.27.0/go.mod h1:sUi0ZgbwW9ZPAq26Ekut+weQPR5eIM6GQLQ1Yjm1H0Q= golang.org/x/tools v0.32.0/go.mod h1:ZxrU41P/wAbZD8EDa6dDCa6XfpkhJ7HFMjHJXfBDu8s=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=

View File

@ -19,6 +19,7 @@ const (
tbFunc // func(T) bool tbFunc // func(T) bool
ttbFunc // func(T, T) bool ttbFunc // func(T, T) bool
ttiFunc // func(T, T) int
trbFunc // func(T, R) bool trbFunc // func(T, R) bool
tibFunc // func(T, I) bool tibFunc // func(T, I) bool
trFunc // func(T) R trFunc // func(T) R
@ -28,11 +29,13 @@ const (
Transformer = trFunc // func(T) R Transformer = trFunc // func(T) R
ValueFilter = ttbFunc // func(T, T) bool ValueFilter = ttbFunc // func(T, T) bool
Less = ttbFunc // func(T, T) bool Less = ttbFunc // func(T, T) bool
Compare = ttiFunc // func(T, T) int
ValuePredicate = tbFunc // func(T) bool ValuePredicate = tbFunc // func(T) bool
KeyValuePredicate = trbFunc // func(T, R) bool KeyValuePredicate = trbFunc // func(T, R) bool
) )
var boolType = reflect.TypeOf(true) var boolType = reflect.TypeOf(true)
var intType = reflect.TypeOf(0)
// IsType reports whether the reflect.Type is of the specified function type. // IsType reports whether the reflect.Type is of the specified function type.
func IsType(t reflect.Type, ft funcType) bool { func IsType(t reflect.Type, ft funcType) bool {
@ -49,6 +52,10 @@ func IsType(t reflect.Type, ft funcType) bool {
if ni == 2 && no == 1 && t.In(0) == t.In(1) && t.Out(0) == boolType { if ni == 2 && no == 1 && t.In(0) == t.In(1) && t.Out(0) == boolType {
return true return true
} }
case ttiFunc: // func(T, T) int
if ni == 2 && no == 1 && t.In(0) == t.In(1) && t.Out(0) == intType {
return true
}
case trbFunc: // func(T, R) bool case trbFunc: // func(T, R) bool
if ni == 2 && no == 1 && t.Out(0) == boolType { if ni == 2 && no == 1 && t.Out(0) == boolType {
return true return true

View File

@ -232,7 +232,15 @@ func (validator) apply(s *state, vx, vy reflect.Value) {
if t := s.curPath.Index(-2).Type(); t.Name() != "" { if t := s.curPath.Index(-2).Type(); t.Name() != "" {
// Named type with unexported fields. // Named type with unexported fields.
name = fmt.Sprintf("%q.%v", t.PkgPath(), t.Name()) // e.g., "path/to/package".MyType name = fmt.Sprintf("%q.%v", t.PkgPath(), t.Name()) // e.g., "path/to/package".MyType
if _, ok := reflect.New(t).Interface().(error); ok { isProtoMessage := func(t reflect.Type) bool {
m, ok := reflect.PointerTo(t).MethodByName("ProtoReflect")
return ok && m.Type.NumIn() == 1 && m.Type.NumOut() == 1 &&
m.Type.Out(0).PkgPath() == "google.golang.org/protobuf/reflect/protoreflect" &&
m.Type.Out(0).Name() == "Message"
}
if isProtoMessage(t) {
help = `consider using "google.golang.org/protobuf/testing/protocmp".Transform to compare proto.Message types`
} else if _, ok := reflect.New(t).Interface().(error); ok {
help = "consider using cmpopts.EquateErrors to compare error values" help = "consider using cmpopts.EquateErrors to compare error values"
} else if t.Comparable() { } else if t.Comparable() {
help = "consider using cmpopts.EquateComparable to compare comparable Go types" help = "consider using cmpopts.EquateComparable to compare comparable Go types"

View File

@ -14,8 +14,34 @@ This package provides various compression algorithms.
[![Go](https://github.com/klauspost/compress/actions/workflows/go.yml/badge.svg)](https://github.com/klauspost/compress/actions/workflows/go.yml) [![Go](https://github.com/klauspost/compress/actions/workflows/go.yml/badge.svg)](https://github.com/klauspost/compress/actions/workflows/go.yml)
[![Sourcegraph Badge](https://sourcegraph.com/github.com/klauspost/compress/-/badge.svg)](https://sourcegraph.com/github.com/klauspost/compress?badge) [![Sourcegraph Badge](https://sourcegraph.com/github.com/klauspost/compress/-/badge.svg)](https://sourcegraph.com/github.com/klauspost/compress?badge)
# package usage
Use `go get github.com/klauspost/compress@latest` to add it to your project.
This package will support the current Go version and 2 versions back.
* Use the `nounsafe` tag to disable all use of the "unsafe" package.
* Use the `noasm` tag to disable all assembly across packages.
Use the links above for more information on each.
# changelog # changelog
* Feb 19th, 2025 - [1.18.0](https://github.com/klauspost/compress/releases/tag/v1.18.0)
* Add unsafe little endian loaders https://github.com/klauspost/compress/pull/1036
* fix: check `r.err != nil` but return a nil value error `err` by @alingse in https://github.com/klauspost/compress/pull/1028
* flate: Simplify L4-6 loading https://github.com/klauspost/compress/pull/1043
* flate: Simplify matchlen (remove asm) https://github.com/klauspost/compress/pull/1045
* s2: Improve small block compression speed w/o asm https://github.com/klauspost/compress/pull/1048
* flate: Fix matchlen L5+L6 https://github.com/klauspost/compress/pull/1049
* flate: Cleanup & reduce casts https://github.com/klauspost/compress/pull/1050
* Oct 11th, 2024 - [1.17.11](https://github.com/klauspost/compress/releases/tag/v1.17.11)
* zstd: Fix extra CRC written with multiple Close calls https://github.com/klauspost/compress/pull/1017
* s2: Don't use stack for index tables https://github.com/klauspost/compress/pull/1014
* gzhttp: No content-type on no body response code by @juliens in https://github.com/klauspost/compress/pull/1011
* gzhttp: Do not set the content-type when response has no body by @kevinpollet in https://github.com/klauspost/compress/pull/1013
* Sep 23rd, 2024 - [1.17.10](https://github.com/klauspost/compress/releases/tag/v1.17.10) * Sep 23rd, 2024 - [1.17.10](https://github.com/klauspost/compress/releases/tag/v1.17.10)
* gzhttp: Add TransportAlwaysDecompress option. https://github.com/klauspost/compress/pull/978 * gzhttp: Add TransportAlwaysDecompress option. https://github.com/klauspost/compress/pull/978
* gzhttp: Add supported decompress request body by @mirecl in https://github.com/klauspost/compress/pull/1002 * gzhttp: Add supported decompress request body by @mirecl in https://github.com/klauspost/compress/pull/1002
@ -65,9 +91,9 @@ https://github.com/klauspost/compress/pull/919 https://github.com/klauspost/comp
* zstd: Fix rare *CORRUPTION* output in "best" mode. See https://github.com/klauspost/compress/pull/876 * zstd: Fix rare *CORRUPTION* output in "best" mode. See https://github.com/klauspost/compress/pull/876
* Oct 14th, 2023 - [v1.17.1](https://github.com/klauspost/compress/releases/tag/v1.17.1) * Oct 14th, 2023 - [v1.17.1](https://github.com/klauspost/compress/releases/tag/v1.17.1)
* s2: Fix S2 "best" dictionary wrong encoding by @klauspost in https://github.com/klauspost/compress/pull/871 * s2: Fix S2 "best" dictionary wrong encoding https://github.com/klauspost/compress/pull/871
* flate: Reduce allocations in decompressor and minor code improvements by @fakefloordiv in https://github.com/klauspost/compress/pull/869 * flate: Reduce allocations in decompressor and minor code improvements by @fakefloordiv in https://github.com/klauspost/compress/pull/869
* s2: Fix EstimateBlockSize on 6&7 length input by @klauspost in https://github.com/klauspost/compress/pull/867 * s2: Fix EstimateBlockSize on 6&7 length input https://github.com/klauspost/compress/pull/867
* Sept 19th, 2023 - [v1.17.0](https://github.com/klauspost/compress/releases/tag/v1.17.0) * Sept 19th, 2023 - [v1.17.0](https://github.com/klauspost/compress/releases/tag/v1.17.0)
* Add experimental dictionary builder https://github.com/klauspost/compress/pull/853 * Add experimental dictionary builder https://github.com/klauspost/compress/pull/853
@ -124,7 +150,7 @@ https://github.com/klauspost/compress/pull/919 https://github.com/klauspost/comp
<summary>See changes to v1.15.x</summary> <summary>See changes to v1.15.x</summary>
* Jan 21st, 2023 (v1.15.15) * Jan 21st, 2023 (v1.15.15)
* deflate: Improve level 7-9 by @klauspost in https://github.com/klauspost/compress/pull/739 * deflate: Improve level 7-9 https://github.com/klauspost/compress/pull/739
* zstd: Add delta encoding support by @greatroar in https://github.com/klauspost/compress/pull/728 * zstd: Add delta encoding support by @greatroar in https://github.com/klauspost/compress/pull/728
* zstd: Various speed improvements by @greatroar https://github.com/klauspost/compress/pull/741 https://github.com/klauspost/compress/pull/734 https://github.com/klauspost/compress/pull/736 https://github.com/klauspost/compress/pull/744 https://github.com/klauspost/compress/pull/743 https://github.com/klauspost/compress/pull/745 * zstd: Various speed improvements by @greatroar https://github.com/klauspost/compress/pull/741 https://github.com/klauspost/compress/pull/734 https://github.com/klauspost/compress/pull/736 https://github.com/klauspost/compress/pull/744 https://github.com/klauspost/compress/pull/743 https://github.com/klauspost/compress/pull/745
* gzhttp: Add SuffixETag() and DropETag() options to prevent ETag collisions on compressed responses by @willbicks in https://github.com/klauspost/compress/pull/740 * gzhttp: Add SuffixETag() and DropETag() options to prevent ETag collisions on compressed responses by @willbicks in https://github.com/klauspost/compress/pull/740
@ -167,7 +193,7 @@ https://github.com/klauspost/compress/pull/919 https://github.com/klauspost/comp
* zstd: Fix decoder crash on amd64 (no BMI) on invalid input https://github.com/klauspost/compress/pull/645 * zstd: Fix decoder crash on amd64 (no BMI) on invalid input https://github.com/klauspost/compress/pull/645
* zstd: Disable decoder extended memory copies (amd64) due to possible crashes https://github.com/klauspost/compress/pull/644 * zstd: Disable decoder extended memory copies (amd64) due to possible crashes https://github.com/klauspost/compress/pull/644
* zstd: Allow single segments up to "max decoded size" by @klauspost in https://github.com/klauspost/compress/pull/643 * zstd: Allow single segments up to "max decoded size" https://github.com/klauspost/compress/pull/643
* July 13, 2022 (v1.15.8) * July 13, 2022 (v1.15.8)
@ -209,7 +235,7 @@ https://github.com/klauspost/compress/pull/919 https://github.com/klauspost/comp
* zstd: Speed up when WithDecoderLowmem(false) https://github.com/klauspost/compress/pull/599 * zstd: Speed up when WithDecoderLowmem(false) https://github.com/klauspost/compress/pull/599
* zstd: faster next state update in BMI2 version of decode by @WojciechMula in https://github.com/klauspost/compress/pull/593 * zstd: faster next state update in BMI2 version of decode by @WojciechMula in https://github.com/klauspost/compress/pull/593
* huff0: Do not check max size when reading table. https://github.com/klauspost/compress/pull/586 * huff0: Do not check max size when reading table. https://github.com/klauspost/compress/pull/586
* flate: Inplace hashing for level 7-9 by @klauspost in https://github.com/klauspost/compress/pull/590 * flate: Inplace hashing for level 7-9 https://github.com/klauspost/compress/pull/590
* May 11, 2022 (v1.15.4) * May 11, 2022 (v1.15.4)
@ -236,12 +262,12 @@ https://github.com/klauspost/compress/pull/919 https://github.com/klauspost/comp
* zstd: Add stricter block size checks in [#523](https://github.com/klauspost/compress/pull/523) * zstd: Add stricter block size checks in [#523](https://github.com/klauspost/compress/pull/523)
* Mar 3, 2022 (v1.15.0) * Mar 3, 2022 (v1.15.0)
* zstd: Refactor decoder by @klauspost in [#498](https://github.com/klauspost/compress/pull/498) * zstd: Refactor decoder [#498](https://github.com/klauspost/compress/pull/498)
* zstd: Add stream encoding without goroutines by @klauspost in [#505](https://github.com/klauspost/compress/pull/505) * zstd: Add stream encoding without goroutines [#505](https://github.com/klauspost/compress/pull/505)
* huff0: Prevent single blocks exceeding 16 bits by @klauspost in[#507](https://github.com/klauspost/compress/pull/507) * huff0: Prevent single blocks exceeding 16 bits by @klauspost in[#507](https://github.com/klauspost/compress/pull/507)
* flate: Inline literal emission by @klauspost in [#509](https://github.com/klauspost/compress/pull/509) * flate: Inline literal emission [#509](https://github.com/klauspost/compress/pull/509)
* gzhttp: Add zstd to transport by @klauspost in [#400](https://github.com/klauspost/compress/pull/400) * gzhttp: Add zstd to transport [#400](https://github.com/klauspost/compress/pull/400)
* gzhttp: Make content-type optional by @klauspost in [#510](https://github.com/klauspost/compress/pull/510) * gzhttp: Make content-type optional [#510](https://github.com/klauspost/compress/pull/510)
Both compression and decompression now supports "synchronous" stream operations. This means that whenever "concurrency" is set to 1, they will operate without spawning goroutines. Both compression and decompression now supports "synchronous" stream operations. This means that whenever "concurrency" is set to 1, they will operate without spawning goroutines.
@ -258,7 +284,7 @@ While the release has been extensively tested, it is recommended to testing when
* flate: Fix rare huffman only (-2) corruption. [#503](https://github.com/klauspost/compress/pull/503) * flate: Fix rare huffman only (-2) corruption. [#503](https://github.com/klauspost/compress/pull/503)
* zip: Update deprecated CreateHeaderRaw to correctly call CreateRaw by @saracen in [#502](https://github.com/klauspost/compress/pull/502) * zip: Update deprecated CreateHeaderRaw to correctly call CreateRaw by @saracen in [#502](https://github.com/klauspost/compress/pull/502)
* zip: don't read data descriptor early by @saracen in [#501](https://github.com/klauspost/compress/pull/501) #501 * zip: don't read data descriptor early by @saracen in [#501](https://github.com/klauspost/compress/pull/501) #501
* huff0: Use static decompression buffer up to 30% faster by @klauspost in [#499](https://github.com/klauspost/compress/pull/499) [#500](https://github.com/klauspost/compress/pull/500) * huff0: Use static decompression buffer up to 30% faster [#499](https://github.com/klauspost/compress/pull/499) [#500](https://github.com/klauspost/compress/pull/500)
* Feb 17, 2022 (v1.14.3) * Feb 17, 2022 (v1.14.3)
* flate: Improve fastest levels compression speed ~10% more throughput. [#482](https://github.com/klauspost/compress/pull/482) [#489](https://github.com/klauspost/compress/pull/489) [#490](https://github.com/klauspost/compress/pull/490) [#491](https://github.com/klauspost/compress/pull/491) [#494](https://github.com/klauspost/compress/pull/494) [#478](https://github.com/klauspost/compress/pull/478) * flate: Improve fastest levels compression speed ~10% more throughput. [#482](https://github.com/klauspost/compress/pull/482) [#489](https://github.com/klauspost/compress/pull/489) [#490](https://github.com/klauspost/compress/pull/490) [#491](https://github.com/klauspost/compress/pull/491) [#494](https://github.com/klauspost/compress/pull/494) [#478](https://github.com/klauspost/compress/pull/478)
@ -565,12 +591,14 @@ While the release has been extensively tested, it is recommended to testing when
The packages are drop-in replacements for standard libraries. Simply replace the import path to use them: The packages are drop-in replacements for standard libraries. Simply replace the import path to use them:
| old import | new import | Documentation Typical speed is about 2x of the standard library packages.
|--------------------|-----------------------------------------|--------------------|
| `compress/gzip` | `github.com/klauspost/compress/gzip` | [gzip](https://pkg.go.dev/github.com/klauspost/compress/gzip?tab=doc) | old import | new import | Documentation |
| `compress/zlib` | `github.com/klauspost/compress/zlib` | [zlib](https://pkg.go.dev/github.com/klauspost/compress/zlib?tab=doc) |------------------|---------------------------------------|-------------------------------------------------------------------------|
| `archive/zip` | `github.com/klauspost/compress/zip` | [zip](https://pkg.go.dev/github.com/klauspost/compress/zip?tab=doc) | `compress/gzip` | `github.com/klauspost/compress/gzip` | [gzip](https://pkg.go.dev/github.com/klauspost/compress/gzip?tab=doc) |
| `compress/flate` | `github.com/klauspost/compress/flate` | [flate](https://pkg.go.dev/github.com/klauspost/compress/flate?tab=doc) | `compress/zlib` | `github.com/klauspost/compress/zlib` | [zlib](https://pkg.go.dev/github.com/klauspost/compress/zlib?tab=doc) |
| `archive/zip` | `github.com/klauspost/compress/zip` | [zip](https://pkg.go.dev/github.com/klauspost/compress/zip?tab=doc) |
| `compress/flate` | `github.com/klauspost/compress/flate` | [flate](https://pkg.go.dev/github.com/klauspost/compress/flate?tab=doc) |
* Optimized [deflate](https://godoc.org/github.com/klauspost/compress/flate) packages which can be used as a dropin replacement for [gzip](https://godoc.org/github.com/klauspost/compress/gzip), [zip](https://godoc.org/github.com/klauspost/compress/zip) and [zlib](https://godoc.org/github.com/klauspost/compress/zlib). * Optimized [deflate](https://godoc.org/github.com/klauspost/compress/flate) packages which can be used as a dropin replacement for [gzip](https://godoc.org/github.com/klauspost/compress/gzip), [zip](https://godoc.org/github.com/klauspost/compress/zip) and [zlib](https://godoc.org/github.com/klauspost/compress/zlib).
@ -625,84 +653,6 @@ This will only use up to 4KB in memory when the writer is idle.
Compression is almost always worse than the fastest compression level Compression is almost always worse than the fastest compression level
and each write will allocate (a little) memory. and each write will allocate (a little) memory.
# Performance Update 2018
It has been a while since we have been looking at the speed of this package compared to the standard library, so I thought I would re-do my tests and give some overall recommendations based on the current state. All benchmarks have been performed with Go 1.10 on my Desktop Intel(R) Core(TM) i7-2600 CPU @3.40GHz. Since I last ran the tests, I have gotten more RAM, which means tests with big files are no longer limited by my SSD.
The raw results are in my [updated spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing). Due to cgo changes and upstream updates i could not get the cgo version of gzip to compile. Instead I included the [zstd](https://github.com/datadog/zstd) cgo implementation. If I get cgo gzip to work again, I might replace the results in the sheet.
The columns to take note of are: *MB/s* - the throughput. *Reduction* - the data size reduction in percent of the original. *Rel Speed* relative speed compared to the standard library at the same level. *Smaller* - how many percent smaller is the compressed output compared to stdlib. Negative means the output was bigger. *Loss* means the loss (or gain) in compression as a percentage difference of the input.
The `gzstd` (standard library gzip) and `gzkp` (this package gzip) only uses one CPU core. [`pgzip`](https://github.com/klauspost/pgzip), [`bgzf`](https://github.com/biogo/hts/tree/master/bgzf) uses all 4 cores. [`zstd`](https://github.com/DataDog/zstd) uses one core, and is a beast (but not Go, yet).
## Overall differences.
There appears to be a roughly 5-10% speed advantage over the standard library when comparing at similar compression levels.
The biggest difference you will see is the result of [re-balancing](https://blog.klauspost.com/rebalancing-deflate-compression-levels/) the compression levels. I wanted by library to give a smoother transition between the compression levels than the standard library.
This package attempts to provide a more smooth transition, where "1" is taking a lot of shortcuts, "5" is the reasonable trade-off and "9" is the "give me the best compression", and the values in between gives something reasonable in between. The standard library has big differences in levels 1-4, but levels 5-9 having no significant gains - often spending a lot more time than can be justified by the achieved compression.
There are links to all the test data in the [spreadsheet](https://docs.google.com/spreadsheets/d/1nuNE2nPfuINCZJRMt6wFWhKpToF95I47XjSsc-1rbPQ/edit?usp=sharing) in the top left field on each tab.
## Web Content
This test set aims to emulate typical use in a web server. The test-set is 4GB data in 53k files, and is a mixture of (mostly) HTML, JS, CSS.
Since level 1 and 9 are close to being the same code, they are quite close. But looking at the levels in-between the differences are quite big.
Looking at level 6, this package is 88% faster, but will output about 6% more data. For a web server, this means you can serve 88% more data, but have to pay for 6% more bandwidth. You can draw your own conclusions on what would be the most expensive for your case.
## Object files
This test is for typical data files stored on a server. In this case it is a collection of Go precompiled objects. They are very compressible.
The picture is similar to the web content, but with small differences since this is very compressible. Levels 2-3 offer good speed, but is sacrificing quite a bit of compression.
The standard library seems suboptimal on level 3 and 4 - offering both worse compression and speed than level 6 & 7 of this package respectively.
## Highly Compressible File
This is a JSON file with very high redundancy. The reduction starts at 95% on level 1, so in real life terms we are dealing with something like a highly redundant stream of data, etc.
It is definitely visible that we are dealing with specialized content here, so the results are very scattered. This package does not do very well at levels 1-4, but picks up significantly at level 5 and levels 7 and 8 offering great speed for the achieved compression.
So if you know you content is extremely compressible you might want to go slightly higher than the defaults. The standard library has a huge gap between levels 3 and 4 in terms of speed (2.75x slowdown), so it offers little "middle ground".
## Medium-High Compressible
This is a pretty common test corpus: [enwik9](http://mattmahoney.net/dc/textdata.html). It contains the first 10^9 bytes of the English Wikipedia dump on Mar. 3, 2006. This is a very good test of typical text based compression and more data heavy streams.
We see a similar picture here as in "Web Content". On equal levels some compression is sacrificed for more speed. Level 5 seems to be the best trade-off between speed and size, beating stdlib level 3 in both.
## Medium Compressible
I will combine two test sets, one [10GB file set](http://mattmahoney.net/dc/10gb.html) and a VM disk image (~8GB). Both contain different data types and represent a typical backup scenario.
The most notable thing is how quickly the standard library drops to very low compression speeds around level 5-6 without any big gains in compression. Since this type of data is fairly common, this does not seem like good behavior.
## Un-compressible Content
This is mainly a test of how good the algorithms are at detecting un-compressible input. The standard library only offers this feature with very conservative settings at level 1. Obviously there is no reason for the algorithms to try to compress input that cannot be compressed. The only downside is that it might skip some compressible data on false detections.
## Huffman only compression
This compression library adds a special compression level, named `HuffmanOnly`, which allows near linear time compression. This is done by completely disabling matching of previous data, and only reduce the number of bits to represent each character.
This means that often used characters, like 'e' and ' ' (space) in text use the fewest bits to represent, and rare characters like '¤' takes more bits to represent. For more information see [wikipedia](https://en.wikipedia.org/wiki/Huffman_coding) or this nice [video](https://youtu.be/ZdooBTdW5bM).
Since this type of compression has much less variance, the compression speed is mostly unaffected by the input data, and is usually more than *180MB/s* for a single core.
The downside is that the compression ratio is usually considerably worse than even the fastest conventional compression. The compression ratio can never be better than 8:1 (12.5%).
The linear time compression can be used as a "better than nothing" mode, where you cannot risk the encoder to slow down on some content. For comparison, the size of the "Twain" text is *233460 bytes* (+29% vs. level 1) and encode speed is 144MB/s (4.5x level 1). So in this case you trade a 30% size increase for a 4 times speedup.
For more information see my blog post on [Fast Linear Time Compression](http://blog.klauspost.com/constant-time-gzipzip-compression/).
This is implemented on Go 1.7 as "Huffman Only" mode, though not exposed for gzip.
# Other packages # Other packages

View File

@ -6,10 +6,11 @@
package huff0 package huff0
import ( import (
"encoding/binary"
"errors" "errors"
"fmt" "fmt"
"io" "io"
"github.com/klauspost/compress/internal/le"
) )
// bitReader reads a bitstream in reverse. // bitReader reads a bitstream in reverse.
@ -46,7 +47,7 @@ func (b *bitReaderBytes) init(in []byte) error {
return nil return nil
} }
// peekBitsFast requires that at least one bit is requested every time. // peekByteFast requires that at least one byte is requested every time.
// There are no checks if the buffer is filled. // There are no checks if the buffer is filled.
func (b *bitReaderBytes) peekByteFast() uint8 { func (b *bitReaderBytes) peekByteFast() uint8 {
got := uint8(b.value >> 56) got := uint8(b.value >> 56)
@ -66,8 +67,7 @@ func (b *bitReaderBytes) fillFast() {
} }
// 2 bounds checks. // 2 bounds checks.
v := b.in[b.off-4 : b.off] low := le.Load32(b.in, b.off-4)
low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
b.value |= uint64(low) << (b.bitsRead - 32) b.value |= uint64(low) << (b.bitsRead - 32)
b.bitsRead -= 32 b.bitsRead -= 32
b.off -= 4 b.off -= 4
@ -76,7 +76,7 @@ func (b *bitReaderBytes) fillFast() {
// fillFastStart() assumes the bitReaderBytes is empty and there is at least 8 bytes to read. // fillFastStart() assumes the bitReaderBytes is empty and there is at least 8 bytes to read.
func (b *bitReaderBytes) fillFastStart() { func (b *bitReaderBytes) fillFastStart() {
// Do single re-slice to avoid bounds checks. // Do single re-slice to avoid bounds checks.
b.value = binary.LittleEndian.Uint64(b.in[b.off-8:]) b.value = le.Load64(b.in, b.off-8)
b.bitsRead = 0 b.bitsRead = 0
b.off -= 8 b.off -= 8
} }
@ -86,9 +86,8 @@ func (b *bitReaderBytes) fill() {
if b.bitsRead < 32 { if b.bitsRead < 32 {
return return
} }
if b.off > 4 { if b.off >= 4 {
v := b.in[b.off-4 : b.off] low := le.Load32(b.in, b.off-4)
low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
b.value |= uint64(low) << (b.bitsRead - 32) b.value |= uint64(low) << (b.bitsRead - 32)
b.bitsRead -= 32 b.bitsRead -= 32
b.off -= 4 b.off -= 4
@ -175,9 +174,7 @@ func (b *bitReaderShifted) fillFast() {
return return
} }
// 2 bounds checks. low := le.Load32(b.in, b.off-4)
v := b.in[b.off-4 : b.off]
low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
b.value |= uint64(low) << ((b.bitsRead - 32) & 63) b.value |= uint64(low) << ((b.bitsRead - 32) & 63)
b.bitsRead -= 32 b.bitsRead -= 32
b.off -= 4 b.off -= 4
@ -185,8 +182,7 @@ func (b *bitReaderShifted) fillFast() {
// fillFastStart() assumes the bitReaderShifted is empty and there is at least 8 bytes to read. // fillFastStart() assumes the bitReaderShifted is empty and there is at least 8 bytes to read.
func (b *bitReaderShifted) fillFastStart() { func (b *bitReaderShifted) fillFastStart() {
// Do single re-slice to avoid bounds checks. b.value = le.Load64(b.in, b.off-8)
b.value = binary.LittleEndian.Uint64(b.in[b.off-8:])
b.bitsRead = 0 b.bitsRead = 0
b.off -= 8 b.off -= 8
} }
@ -197,8 +193,7 @@ func (b *bitReaderShifted) fill() {
return return
} }
if b.off > 4 { if b.off > 4 {
v := b.in[b.off-4 : b.off] low := le.Load32(b.in, b.off-4)
low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
b.value |= uint64(low) << ((b.bitsRead - 32) & 63) b.value |= uint64(low) << ((b.bitsRead - 32) & 63)
b.bitsRead -= 32 b.bitsRead -= 32
b.off -= 4 b.off -= 4

View File

@ -0,0 +1,5 @@
package le
type Indexer interface {
int | int8 | int16 | int32 | int64 | uint | uint8 | uint16 | uint32 | uint64
}

View File

@ -0,0 +1,42 @@
//go:build !(amd64 || arm64 || ppc64le || riscv64) || nounsafe || purego || appengine
package le
import (
"encoding/binary"
)
// Load8 will load from b at index i.
func Load8[I Indexer](b []byte, i I) byte {
return b[i]
}
// Load16 will load from b at index i.
func Load16[I Indexer](b []byte, i I) uint16 {
return binary.LittleEndian.Uint16(b[i:])
}
// Load32 will load from b at index i.
func Load32[I Indexer](b []byte, i I) uint32 {
return binary.LittleEndian.Uint32(b[i:])
}
// Load64 will load from b at index i.
func Load64[I Indexer](b []byte, i I) uint64 {
return binary.LittleEndian.Uint64(b[i:])
}
// Store16 will store v at b.
func Store16(b []byte, v uint16) {
binary.LittleEndian.PutUint16(b, v)
}
// Store32 will store v at b.
func Store32(b []byte, v uint32) {
binary.LittleEndian.PutUint32(b, v)
}
// Store64 will store v at b.
func Store64(b []byte, v uint64) {
binary.LittleEndian.PutUint64(b, v)
}

View File

@ -0,0 +1,55 @@
// We enable 64 bit LE platforms:
//go:build (amd64 || arm64 || ppc64le || riscv64) && !nounsafe && !purego && !appengine
package le
import (
"unsafe"
)
// Load8 will load from b at index i.
func Load8[I Indexer](b []byte, i I) byte {
//return binary.LittleEndian.Uint16(b[i:])
//return *(*uint16)(unsafe.Pointer(&b[i]))
return *(*byte)(unsafe.Add(unsafe.Pointer(unsafe.SliceData(b)), i))
}
// Load16 will load from b at index i.
func Load16[I Indexer](b []byte, i I) uint16 {
//return binary.LittleEndian.Uint16(b[i:])
//return *(*uint16)(unsafe.Pointer(&b[i]))
return *(*uint16)(unsafe.Add(unsafe.Pointer(unsafe.SliceData(b)), i))
}
// Load32 will load from b at index i.
func Load32[I Indexer](b []byte, i I) uint32 {
//return binary.LittleEndian.Uint32(b[i:])
//return *(*uint32)(unsafe.Pointer(&b[i]))
return *(*uint32)(unsafe.Add(unsafe.Pointer(unsafe.SliceData(b)), i))
}
// Load64 will load from b at index i.
func Load64[I Indexer](b []byte, i I) uint64 {
//return binary.LittleEndian.Uint64(b[i:])
//return *(*uint64)(unsafe.Pointer(&b[i]))
return *(*uint64)(unsafe.Add(unsafe.Pointer(unsafe.SliceData(b)), i))
}
// Store16 will store v at b.
func Store16(b []byte, v uint16) {
//binary.LittleEndian.PutUint16(b, v)
*(*uint16)(unsafe.Pointer(unsafe.SliceData(b))) = v
}
// Store32 will store v at b.
func Store32(b []byte, v uint32) {
//binary.LittleEndian.PutUint32(b, v)
*(*uint32)(unsafe.Pointer(unsafe.SliceData(b))) = v
}
// Store64 will store v at b.
func Store64(b []byte, v uint64) {
//binary.LittleEndian.PutUint64(b, v)
*(*uint64)(unsafe.Pointer(unsafe.SliceData(b))) = v
}

View File

@ -1,4 +1,3 @@
module github.com/klauspost/compress module github.com/klauspost/compress
go 1.19 go 1.22

View File

@ -6,7 +6,7 @@ A high performance compression algorithm is implemented. For now focused on spee
This package provides [compression](#Compressor) to and [decompression](#Decompressor) of Zstandard content. This package provides [compression](#Compressor) to and [decompression](#Decompressor) of Zstandard content.
This package is pure Go and without use of "unsafe". This package is pure Go. Use `noasm` and `nounsafe` to disable relevant features.
The `zstd` package is provided as open source software using a Go standard license. The `zstd` package is provided as open source software using a Go standard license.

View File

@ -5,11 +5,12 @@
package zstd package zstd
import ( import (
"encoding/binary"
"errors" "errors"
"fmt" "fmt"
"io" "io"
"math/bits" "math/bits"
"github.com/klauspost/compress/internal/le"
) )
// bitReader reads a bitstream in reverse. // bitReader reads a bitstream in reverse.
@ -18,6 +19,7 @@ import (
type bitReader struct { type bitReader struct {
in []byte in []byte
value uint64 // Maybe use [16]byte, but shifting is awkward. value uint64 // Maybe use [16]byte, but shifting is awkward.
cursor int // offset where next read should end
bitsRead uint8 bitsRead uint8
} }
@ -32,6 +34,7 @@ func (b *bitReader) init(in []byte) error {
if v == 0 { if v == 0 {
return errors.New("corrupt stream, did not find end of stream") return errors.New("corrupt stream, did not find end of stream")
} }
b.cursor = len(in)
b.bitsRead = 64 b.bitsRead = 64
b.value = 0 b.value = 0
if len(in) >= 8 { if len(in) >= 8 {
@ -67,18 +70,15 @@ func (b *bitReader) fillFast() {
if b.bitsRead < 32 { if b.bitsRead < 32 {
return return
} }
v := b.in[len(b.in)-4:] b.cursor -= 4
b.in = b.in[:len(b.in)-4] b.value = (b.value << 32) | uint64(le.Load32(b.in, b.cursor))
low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
b.value = (b.value << 32) | uint64(low)
b.bitsRead -= 32 b.bitsRead -= 32
} }
// fillFastStart() assumes the bitreader is empty and there is at least 8 bytes to read. // fillFastStart() assumes the bitreader is empty and there is at least 8 bytes to read.
func (b *bitReader) fillFastStart() { func (b *bitReader) fillFastStart() {
v := b.in[len(b.in)-8:] b.cursor -= 8
b.in = b.in[:len(b.in)-8] b.value = le.Load64(b.in, b.cursor)
b.value = binary.LittleEndian.Uint64(v)
b.bitsRead = 0 b.bitsRead = 0
} }
@ -87,25 +87,23 @@ func (b *bitReader) fill() {
if b.bitsRead < 32 { if b.bitsRead < 32 {
return return
} }
if len(b.in) >= 4 { if b.cursor >= 4 {
v := b.in[len(b.in)-4:] b.cursor -= 4
b.in = b.in[:len(b.in)-4] b.value = (b.value << 32) | uint64(le.Load32(b.in, b.cursor))
low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
b.value = (b.value << 32) | uint64(low)
b.bitsRead -= 32 b.bitsRead -= 32
return return
} }
b.bitsRead -= uint8(8 * len(b.in)) b.bitsRead -= uint8(8 * b.cursor)
for len(b.in) > 0 { for b.cursor > 0 {
b.value = (b.value << 8) | uint64(b.in[len(b.in)-1]) b.cursor -= 1
b.in = b.in[:len(b.in)-1] b.value = (b.value << 8) | uint64(b.in[b.cursor])
} }
} }
// finished returns true if all bits have been read from the bit stream. // finished returns true if all bits have been read from the bit stream.
func (b *bitReader) finished() bool { func (b *bitReader) finished() bool {
return len(b.in) == 0 && b.bitsRead >= 64 return b.cursor == 0 && b.bitsRead >= 64
} }
// overread returns true if more bits have been requested than is on the stream. // overread returns true if more bits have been requested than is on the stream.
@ -115,13 +113,14 @@ func (b *bitReader) overread() bool {
// remain returns the number of bits remaining. // remain returns the number of bits remaining.
func (b *bitReader) remain() uint { func (b *bitReader) remain() uint {
return 8*uint(len(b.in)) + 64 - uint(b.bitsRead) return 8*uint(b.cursor) + 64 - uint(b.bitsRead)
} }
// close the bitstream and returns an error if out-of-buffer reads occurred. // close the bitstream and returns an error if out-of-buffer reads occurred.
func (b *bitReader) close() error { func (b *bitReader) close() error {
// Release reference. // Release reference.
b.in = nil b.in = nil
b.cursor = 0
if !b.finished() { if !b.finished() {
return fmt.Errorf("%d extra bits on block, should be 0", b.remain()) return fmt.Errorf("%d extra bits on block, should be 0", b.remain())
} }

View File

@ -5,14 +5,10 @@
package zstd package zstd
import ( import (
"bytes"
"encoding/binary"
"errors" "errors"
"fmt" "fmt"
"hash/crc32" "hash/crc32"
"io" "io"
"os"
"path/filepath"
"sync" "sync"
"github.com/klauspost/compress/huff0" "github.com/klauspost/compress/huff0"
@ -648,21 +644,6 @@ func (b *blockDec) prepareSequences(in []byte, hist *history) (err error) {
println("initializing sequences:", err) println("initializing sequences:", err)
return err return err
} }
// Extract blocks...
if false && hist.dict == nil {
fatalErr := func(err error) {
if err != nil {
panic(err)
}
}
fn := fmt.Sprintf("n-%d-lits-%d-prev-%d-%d-%d-win-%d.blk", hist.decoders.nSeqs, len(hist.decoders.literals), hist.recentOffsets[0], hist.recentOffsets[1], hist.recentOffsets[2], hist.windowSize)
var buf bytes.Buffer
fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.litLengths.fse))
fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.matchLengths.fse))
fatalErr(binary.Write(&buf, binary.LittleEndian, hist.decoders.offsets.fse))
buf.Write(in)
os.WriteFile(filepath.Join("testdata", "seqs", fn), buf.Bytes(), os.ModePerm)
}
return nil return nil
} }

View File

@ -9,6 +9,7 @@ import (
"fmt" "fmt"
"math" "math"
"math/bits" "math/bits"
"slices"
"github.com/klauspost/compress/huff0" "github.com/klauspost/compress/huff0"
) )
@ -457,16 +458,7 @@ func fuzzFseEncoder(data []byte) int {
// All 0 // All 0
return 0 return 0
} }
maxCount := func(a []uint32) int { cnt := int(slices.Max(hist[:maxSym]))
var max uint32
for _, v := range a {
if v > max {
max = v
}
}
return int(max)
}
cnt := maxCount(hist[:maxSym])
if cnt == len(data) { if cnt == len(data) {
// RLE // RLE
return 0 return 0
@ -884,15 +876,6 @@ func (b *blockEnc) genCodes() {
} }
} }
} }
maxCount := func(a []uint32) int {
var max uint32
for _, v := range a {
if v > max {
max = v
}
}
return int(max)
}
if debugAsserts && mlMax > maxMatchLengthSymbol { if debugAsserts && mlMax > maxMatchLengthSymbol {
panic(fmt.Errorf("mlMax > maxMatchLengthSymbol (%d)", mlMax)) panic(fmt.Errorf("mlMax > maxMatchLengthSymbol (%d)", mlMax))
} }
@ -903,7 +886,7 @@ func (b *blockEnc) genCodes() {
panic(fmt.Errorf("llMax > maxLiteralLengthSymbol (%d)", llMax)) panic(fmt.Errorf("llMax > maxLiteralLengthSymbol (%d)", llMax))
} }
b.coders.mlEnc.HistogramFinished(mlMax, maxCount(mlH[:mlMax+1])) b.coders.mlEnc.HistogramFinished(mlMax, int(slices.Max(mlH[:mlMax+1])))
b.coders.ofEnc.HistogramFinished(ofMax, maxCount(ofH[:ofMax+1])) b.coders.ofEnc.HistogramFinished(ofMax, int(slices.Max(ofH[:ofMax+1])))
b.coders.llEnc.HistogramFinished(llMax, maxCount(llH[:llMax+1])) b.coders.llEnc.HistogramFinished(llMax, int(slices.Max(llH[:llMax+1])))
} }

View File

@ -123,7 +123,7 @@ func NewReader(r io.Reader, opts ...DOption) (*Decoder, error) {
} }
// Read bytes from the decompressed stream into p. // Read bytes from the decompressed stream into p.
// Returns the number of bytes written and any error that occurred. // Returns the number of bytes read and any error that occurred.
// When the stream is done, io.EOF will be returned. // When the stream is done, io.EOF will be returned.
func (d *Decoder) Read(p []byte) (int, error) { func (d *Decoder) Read(p []byte) (int, error) {
var n int var n int
@ -323,6 +323,7 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) {
frame.bBuf = nil frame.bBuf = nil
if frame.history.decoders.br != nil { if frame.history.decoders.br != nil {
frame.history.decoders.br.in = nil frame.history.decoders.br.in = nil
frame.history.decoders.br.cursor = 0
} }
d.decoders <- block d.decoders <- block
}() }()

View File

@ -116,7 +116,7 @@ func (e *fastBase) matchlen(s, t int32, src []byte) int32 {
panic(err) panic(err)
} }
if t < 0 { if t < 0 {
err := fmt.Sprintf("s (%d) < 0", s) err := fmt.Sprintf("t (%d) < 0", t)
panic(err) panic(err)
} }
if s-t > e.maxMatchOff { if s-t > e.maxMatchOff {

View File

@ -7,20 +7,25 @@
package zstd package zstd
import ( import (
"encoding/binary"
"math/bits" "math/bits"
"github.com/klauspost/compress/internal/le"
) )
// matchLen returns the maximum common prefix length of a and b. // matchLen returns the maximum common prefix length of a and b.
// a must be the shortest of the two. // a must be the shortest of the two.
func matchLen(a, b []byte) (n int) { func matchLen(a, b []byte) (n int) {
for ; len(a) >= 8 && len(b) >= 8; a, b = a[8:], b[8:] { left := len(a)
diff := binary.LittleEndian.Uint64(a) ^ binary.LittleEndian.Uint64(b) for left >= 8 {
diff := le.Load64(a, n) ^ le.Load64(b, n)
if diff != 0 { if diff != 0 {
return n + bits.TrailingZeros64(diff)>>3 return n + bits.TrailingZeros64(diff)>>3
} }
n += 8 n += 8
left -= 8
} }
a = a[n:]
b = b[n:]
for i := range a { for i := range a {
if a[i] != b[i] { if a[i] != b[i] {

View File

@ -245,7 +245,7 @@ func (s *sequenceDecs) decodeSync(hist []byte) error {
return io.ErrUnexpectedEOF return io.ErrUnexpectedEOF
} }
var ll, mo, ml int var ll, mo, ml int
if len(br.in) > 4+((maxOffsetBits+16+16)>>3) { if br.cursor > 4+((maxOffsetBits+16+16)>>3) {
// inlined function: // inlined function:
// ll, mo, ml = s.nextFast(br, llState, mlState, ofState) // ll, mo, ml = s.nextFast(br, llState, mlState, ofState)

View File

@ -7,9 +7,9 @@
TEXT ·sequenceDecs_decode_amd64(SB), $8-32 TEXT ·sequenceDecs_decode_amd64(SB), $8-32
MOVQ br+8(FP), CX MOVQ br+8(FP), CX
MOVQ 24(CX), DX MOVQ 24(CX), DX
MOVBQZX 32(CX), BX MOVBQZX 40(CX), BX
MOVQ (CX), AX MOVQ (CX), AX
MOVQ 8(CX), SI MOVQ 32(CX), SI
ADDQ SI, AX ADDQ SI, AX
MOVQ AX, (SP) MOVQ AX, (SP)
MOVQ ctx+16(FP), AX MOVQ ctx+16(FP), AX
@ -299,8 +299,8 @@ sequenceDecs_decode_amd64_match_len_ofs_ok:
MOVQ R13, 160(AX) MOVQ R13, 160(AX)
MOVQ br+8(FP), AX MOVQ br+8(FP), AX
MOVQ DX, 24(AX) MOVQ DX, 24(AX)
MOVB BL, 32(AX) MOVB BL, 40(AX)
MOVQ SI, 8(AX) MOVQ SI, 32(AX)
// Return success // Return success
MOVQ $0x00000000, ret+24(FP) MOVQ $0x00000000, ret+24(FP)
@ -335,9 +335,9 @@ error_overread:
TEXT ·sequenceDecs_decode_56_amd64(SB), $8-32 TEXT ·sequenceDecs_decode_56_amd64(SB), $8-32
MOVQ br+8(FP), CX MOVQ br+8(FP), CX
MOVQ 24(CX), DX MOVQ 24(CX), DX
MOVBQZX 32(CX), BX MOVBQZX 40(CX), BX
MOVQ (CX), AX MOVQ (CX), AX
MOVQ 8(CX), SI MOVQ 32(CX), SI
ADDQ SI, AX ADDQ SI, AX
MOVQ AX, (SP) MOVQ AX, (SP)
MOVQ ctx+16(FP), AX MOVQ ctx+16(FP), AX
@ -598,8 +598,8 @@ sequenceDecs_decode_56_amd64_match_len_ofs_ok:
MOVQ R13, 160(AX) MOVQ R13, 160(AX)
MOVQ br+8(FP), AX MOVQ br+8(FP), AX
MOVQ DX, 24(AX) MOVQ DX, 24(AX)
MOVB BL, 32(AX) MOVB BL, 40(AX)
MOVQ SI, 8(AX) MOVQ SI, 32(AX)
// Return success // Return success
MOVQ $0x00000000, ret+24(FP) MOVQ $0x00000000, ret+24(FP)
@ -634,9 +634,9 @@ error_overread:
TEXT ·sequenceDecs_decode_bmi2(SB), $8-32 TEXT ·sequenceDecs_decode_bmi2(SB), $8-32
MOVQ br+8(FP), BX MOVQ br+8(FP), BX
MOVQ 24(BX), AX MOVQ 24(BX), AX
MOVBQZX 32(BX), DX MOVBQZX 40(BX), DX
MOVQ (BX), CX MOVQ (BX), CX
MOVQ 8(BX), BX MOVQ 32(BX), BX
ADDQ BX, CX ADDQ BX, CX
MOVQ CX, (SP) MOVQ CX, (SP)
MOVQ ctx+16(FP), CX MOVQ ctx+16(FP), CX
@ -884,8 +884,8 @@ sequenceDecs_decode_bmi2_match_len_ofs_ok:
MOVQ R12, 160(CX) MOVQ R12, 160(CX)
MOVQ br+8(FP), CX MOVQ br+8(FP), CX
MOVQ AX, 24(CX) MOVQ AX, 24(CX)
MOVB DL, 32(CX) MOVB DL, 40(CX)
MOVQ BX, 8(CX) MOVQ BX, 32(CX)
// Return success // Return success
MOVQ $0x00000000, ret+24(FP) MOVQ $0x00000000, ret+24(FP)
@ -920,9 +920,9 @@ error_overread:
TEXT ·sequenceDecs_decode_56_bmi2(SB), $8-32 TEXT ·sequenceDecs_decode_56_bmi2(SB), $8-32
MOVQ br+8(FP), BX MOVQ br+8(FP), BX
MOVQ 24(BX), AX MOVQ 24(BX), AX
MOVBQZX 32(BX), DX MOVBQZX 40(BX), DX
MOVQ (BX), CX MOVQ (BX), CX
MOVQ 8(BX), BX MOVQ 32(BX), BX
ADDQ BX, CX ADDQ BX, CX
MOVQ CX, (SP) MOVQ CX, (SP)
MOVQ ctx+16(FP), CX MOVQ ctx+16(FP), CX
@ -1141,8 +1141,8 @@ sequenceDecs_decode_56_bmi2_match_len_ofs_ok:
MOVQ R12, 160(CX) MOVQ R12, 160(CX)
MOVQ br+8(FP), CX MOVQ br+8(FP), CX
MOVQ AX, 24(CX) MOVQ AX, 24(CX)
MOVB DL, 32(CX) MOVB DL, 40(CX)
MOVQ BX, 8(CX) MOVQ BX, 32(CX)
// Return success // Return success
MOVQ $0x00000000, ret+24(FP) MOVQ $0x00000000, ret+24(FP)
@ -1787,9 +1787,9 @@ empty_seqs:
TEXT ·sequenceDecs_decodeSync_amd64(SB), $64-32 TEXT ·sequenceDecs_decodeSync_amd64(SB), $64-32
MOVQ br+8(FP), CX MOVQ br+8(FP), CX
MOVQ 24(CX), DX MOVQ 24(CX), DX
MOVBQZX 32(CX), BX MOVBQZX 40(CX), BX
MOVQ (CX), AX MOVQ (CX), AX
MOVQ 8(CX), SI MOVQ 32(CX), SI
ADDQ SI, AX ADDQ SI, AX
MOVQ AX, (SP) MOVQ AX, (SP)
MOVQ ctx+16(FP), AX MOVQ ctx+16(FP), AX
@ -2281,8 +2281,8 @@ handle_loop:
loop_finished: loop_finished:
MOVQ br+8(FP), AX MOVQ br+8(FP), AX
MOVQ DX, 24(AX) MOVQ DX, 24(AX)
MOVB BL, 32(AX) MOVB BL, 40(AX)
MOVQ SI, 8(AX) MOVQ SI, 32(AX)
// Update the context // Update the context
MOVQ ctx+16(FP), AX MOVQ ctx+16(FP), AX
@ -2349,9 +2349,9 @@ error_not_enough_space:
TEXT ·sequenceDecs_decodeSync_bmi2(SB), $64-32 TEXT ·sequenceDecs_decodeSync_bmi2(SB), $64-32
MOVQ br+8(FP), BX MOVQ br+8(FP), BX
MOVQ 24(BX), AX MOVQ 24(BX), AX
MOVBQZX 32(BX), DX MOVBQZX 40(BX), DX
MOVQ (BX), CX MOVQ (BX), CX
MOVQ 8(BX), BX MOVQ 32(BX), BX
ADDQ BX, CX ADDQ BX, CX
MOVQ CX, (SP) MOVQ CX, (SP)
MOVQ ctx+16(FP), CX MOVQ ctx+16(FP), CX
@ -2801,8 +2801,8 @@ handle_loop:
loop_finished: loop_finished:
MOVQ br+8(FP), CX MOVQ br+8(FP), CX
MOVQ AX, 24(CX) MOVQ AX, 24(CX)
MOVB DL, 32(CX) MOVB DL, 40(CX)
MOVQ BX, 8(CX) MOVQ BX, 32(CX)
// Update the context // Update the context
MOVQ ctx+16(FP), AX MOVQ ctx+16(FP), AX
@ -2869,9 +2869,9 @@ error_not_enough_space:
TEXT ·sequenceDecs_decodeSync_safe_amd64(SB), $64-32 TEXT ·sequenceDecs_decodeSync_safe_amd64(SB), $64-32
MOVQ br+8(FP), CX MOVQ br+8(FP), CX
MOVQ 24(CX), DX MOVQ 24(CX), DX
MOVBQZX 32(CX), BX MOVBQZX 40(CX), BX
MOVQ (CX), AX MOVQ (CX), AX
MOVQ 8(CX), SI MOVQ 32(CX), SI
ADDQ SI, AX ADDQ SI, AX
MOVQ AX, (SP) MOVQ AX, (SP)
MOVQ ctx+16(FP), AX MOVQ ctx+16(FP), AX
@ -3465,8 +3465,8 @@ handle_loop:
loop_finished: loop_finished:
MOVQ br+8(FP), AX MOVQ br+8(FP), AX
MOVQ DX, 24(AX) MOVQ DX, 24(AX)
MOVB BL, 32(AX) MOVB BL, 40(AX)
MOVQ SI, 8(AX) MOVQ SI, 32(AX)
// Update the context // Update the context
MOVQ ctx+16(FP), AX MOVQ ctx+16(FP), AX
@ -3533,9 +3533,9 @@ error_not_enough_space:
TEXT ·sequenceDecs_decodeSync_safe_bmi2(SB), $64-32 TEXT ·sequenceDecs_decodeSync_safe_bmi2(SB), $64-32
MOVQ br+8(FP), BX MOVQ br+8(FP), BX
MOVQ 24(BX), AX MOVQ 24(BX), AX
MOVBQZX 32(BX), DX MOVBQZX 40(BX), DX
MOVQ (BX), CX MOVQ (BX), CX
MOVQ 8(BX), BX MOVQ 32(BX), BX
ADDQ BX, CX ADDQ BX, CX
MOVQ CX, (SP) MOVQ CX, (SP)
MOVQ ctx+16(FP), CX MOVQ ctx+16(FP), CX
@ -4087,8 +4087,8 @@ handle_loop:
loop_finished: loop_finished:
MOVQ br+8(FP), CX MOVQ br+8(FP), CX
MOVQ AX, 24(CX) MOVQ AX, 24(CX)
MOVB DL, 32(CX) MOVB DL, 40(CX)
MOVQ BX, 8(CX) MOVQ BX, 32(CX)
// Update the context // Update the context
MOVQ ctx+16(FP), AX MOVQ ctx+16(FP), AX

View File

@ -29,7 +29,7 @@ func (s *sequenceDecs) decode(seqs []seqVals) error {
} }
for i := range seqs { for i := range seqs {
var ll, mo, ml int var ll, mo, ml int
if len(br.in) > 4+((maxOffsetBits+16+16)>>3) { if br.cursor > 4+((maxOffsetBits+16+16)>>3) {
// inlined function: // inlined function:
// ll, mo, ml = s.nextFast(br, llState, mlState, ofState) // ll, mo, ml = s.nextFast(br, llState, mlState, ofState)

View File

@ -69,7 +69,6 @@ var llBitsTable = [maxLLCode + 1]byte{
func llCode(litLength uint32) uint8 { func llCode(litLength uint32) uint8 {
const llDeltaCode = 19 const llDeltaCode = 19
if litLength <= 63 { if litLength <= 63 {
// Compiler insists on bounds check (Go 1.12)
return llCodeTable[litLength&63] return llCodeTable[litLength&63]
} }
return uint8(highBit(litLength)) + llDeltaCode return uint8(highBit(litLength)) + llDeltaCode
@ -102,7 +101,6 @@ var mlBitsTable = [maxMLCode + 1]byte{
func mlCode(mlBase uint32) uint8 { func mlCode(mlBase uint32) uint8 {
const mlDeltaCode = 36 const mlDeltaCode = 36
if mlBase <= 127 { if mlBase <= 127 {
// Compiler insists on bounds check (Go 1.12)
return mlCodeTable[mlBase&127] return mlCodeTable[mlBase&127]
} }
return uint8(highBit(mlBase)) + mlDeltaCode return uint8(highBit(mlBase)) + mlDeltaCode

View File

@ -197,7 +197,7 @@ func (r *SnappyConverter) Convert(in io.Reader, w io.Writer) (int64, error) {
n, r.err = w.Write(r.block.output) n, r.err = w.Write(r.block.output)
if r.err != nil { if r.err != nil {
return written, err return written, r.err
} }
written += int64(n) written += int64(n)
continue continue
@ -239,7 +239,7 @@ func (r *SnappyConverter) Convert(in io.Reader, w io.Writer) (int64, error) {
} }
n, r.err = w.Write(r.block.output) n, r.err = w.Write(r.block.output)
if r.err != nil { if r.err != nil {
return written, err return written, r.err
} }
written += int64(n) written += int64(n)
continue continue

View File

@ -5,10 +5,11 @@ package zstd
import ( import (
"bytes" "bytes"
"encoding/binary"
"errors" "errors"
"log" "log"
"math" "math"
"github.com/klauspost/compress/internal/le"
) )
// enable debug printing // enable debug printing
@ -110,11 +111,11 @@ func printf(format string, a ...interface{}) {
} }
func load3232(b []byte, i int32) uint32 { func load3232(b []byte, i int32) uint32 {
return binary.LittleEndian.Uint32(b[:len(b):len(b)][i:]) return le.Load32(b, i)
} }
func load6432(b []byte, i int32) uint64 { func load6432(b []byte, i int32) uint64 {
return binary.LittleEndian.Uint64(b[:len(b):len(b)][i:]) return le.Load64(b, i)
} }
type byter interface { type byter interface {

View File

@ -614,7 +614,7 @@ func Shlex(str string) RunOption {
}) })
} }
func Shlexf(str string, v ...interface{}) RunOption { func Shlexf(str string, v ...any) RunOption {
return runOptionFunc(func(ei *ExecInfo) { return runOptionFunc(func(ei *ExecInfo) {
ei.State = shlexf(str, true, v...)(ei.State) ei.State = shlexf(str, true, v...)(ei.State)
}) })

View File

@ -2,6 +2,7 @@ package llb
import ( import (
"io" "io"
"slices"
"sync" "sync"
cerrdefs "github.com/containerd/errdefs" cerrdefs "github.com/containerd/errdefs"
@ -84,7 +85,7 @@ func ReadFrom(r io.Reader) (*Definition, error) {
func MarshalConstraints(base, override *Constraints) (*pb.Op, *pb.OpMetadata) { func MarshalConstraints(base, override *Constraints) (*pb.Op, *pb.OpMetadata) {
c := *base c := *base
c.WorkerConstraints = append([]string{}, c.WorkerConstraints...) c.WorkerConstraints = slices.Clone(c.WorkerConstraints)
if p := override.Platform; p != nil { if p := override.Platform; p != nil {
c.Platform = p c.Platform = p
@ -105,7 +106,7 @@ func MarshalConstraints(base, override *Constraints) (*pb.Op, *pb.OpMetadata) {
OSVersion: c.Platform.OSVersion, OSVersion: c.Platform.OSVersion,
} }
if c.Platform.OSFeatures != nil { if c.Platform.OSFeatures != nil {
opPlatform.OSFeatures = append([]string{}, c.Platform.OSFeatures...) opPlatform.OSFeatures = slices.Clone(c.Platform.OSFeatures)
} }
return &pb.Op{ return &pb.Op{

View File

@ -35,7 +35,7 @@ var (
// AddEnvf is the same as [AddEnv] but allows for a format string. // AddEnvf is the same as [AddEnv] but allows for a format string.
// This is the equivalent of `[State.AddEnvf]` // This is the equivalent of `[State.AddEnvf]`
func AddEnvf(key, value string, v ...interface{}) StateOption { func AddEnvf(key, value string, v ...any) StateOption {
return addEnvf(key, value, true, v...) return addEnvf(key, value, true, v...)
} }
@ -46,12 +46,12 @@ func AddEnv(key, value string) StateOption {
return addEnvf(key, value, false) return addEnvf(key, value, false)
} }
func addEnvf(key, value string, replace bool, v ...interface{}) StateOption { func addEnvf(key, value string, replace bool, v ...any) StateOption {
if replace { if replace {
value = fmt.Sprintf(value, v...) value = fmt.Sprintf(value, v...)
} }
return func(s State) State { return func(s State) State {
return s.withValue(keyEnv, func(ctx context.Context, c *Constraints) (interface{}, error) { return s.withValue(keyEnv, func(ctx context.Context, c *Constraints) (any, error) {
env, err := getEnv(s)(ctx, c) env, err := getEnv(s)(ctx, c)
if err != nil { if err != nil {
return nil, err return nil, err
@ -69,16 +69,16 @@ func Dir(str string) StateOption {
} }
// Dirf is the same as [Dir] but allows for a format string. // Dirf is the same as [Dir] but allows for a format string.
func Dirf(str string, v ...interface{}) StateOption { func Dirf(str string, v ...any) StateOption {
return dirf(str, true, v...) return dirf(str, true, v...)
} }
func dirf(value string, replace bool, v ...interface{}) StateOption { func dirf(value string, replace bool, v ...any) StateOption {
if replace { if replace {
value = fmt.Sprintf(value, v...) value = fmt.Sprintf(value, v...)
} }
return func(s State) State { return func(s State) State {
return s.withValue(keyDir, func(ctx context.Context, c *Constraints) (interface{}, error) { return s.withValue(keyDir, func(ctx context.Context, c *Constraints) (any, error) {
if !path.IsAbs(value) { if !path.IsAbs(value) {
prev, err := getDir(s)(ctx, c) prev, err := getDir(s)(ctx, c)
if err != nil { if err != nil {
@ -213,7 +213,7 @@ func args(args ...string) StateOption {
} }
} }
func shlexf(str string, replace bool, v ...interface{}) StateOption { func shlexf(str string, replace bool, v ...any) StateOption {
if replace { if replace {
str = fmt.Sprintf(str, v...) str = fmt.Sprintf(str, v...)
} }
@ -248,7 +248,7 @@ func getPlatform(s State) func(context.Context, *Constraints) (*ocispecs.Platfor
func extraHost(host string, ip net.IP) StateOption { func extraHost(host string, ip net.IP) StateOption {
return func(s State) State { return func(s State) State {
return s.withValue(keyExtraHost, func(ctx context.Context, c *Constraints) (interface{}, error) { return s.withValue(keyExtraHost, func(ctx context.Context, c *Constraints) (any, error) {
v, err := getExtraHosts(s)(ctx, c) v, err := getExtraHosts(s)(ctx, c)
if err != nil { if err != nil {
return nil, err return nil, err
@ -278,7 +278,7 @@ type HostIP struct {
func ulimit(name UlimitName, soft int64, hard int64) StateOption { func ulimit(name UlimitName, soft int64, hard int64) StateOption {
return func(s State) State { return func(s State) State {
return s.withValue(keyUlimit, func(ctx context.Context, c *Constraints) (interface{}, error) { return s.withValue(keyUlimit, func(ctx context.Context, c *Constraints) (any, error) {
v, err := getUlimit(s)(ctx, c) v, err := getUlimit(s)(ctx, c)
if err != nil { if err != nil {
return nil, err return nil, err

View File

@ -360,13 +360,6 @@ func AuthTokenSecret(v string) GitOption {
}) })
} }
func AuthHeaderSecret(v string) GitOption {
return gitOptionFunc(func(gi *GitInfo) {
gi.AuthHeaderSecret = v
gi.addAuthCap = true
})
}
func KnownSSHHosts(key string) GitOption { func KnownSSHHosts(key string) GitOption {
key = strings.TrimSuffix(key, "\n") key = strings.TrimSuffix(key, "\n")
return gitOptionFunc(func(gi *GitInfo) { return gitOptionFunc(func(gi *GitInfo) {
@ -380,6 +373,29 @@ func MountSSHSock(sshID string) GitOption {
}) })
} }
// AuthOption can be used with either HTTP or Git sources.
type AuthOption interface {
GitOption
HTTPOption
}
// AuthHeaderSecret returns an AuthOption that defines the name of a
// secret to use for HTTP based authentication.
func AuthHeaderSecret(secretName string) AuthOption {
return struct {
GitOption
HTTPOption
}{
GitOption: gitOptionFunc(func(gi *GitInfo) {
gi.AuthHeaderSecret = secretName
gi.addAuthCap = true
}),
HTTPOption: httpOptionFunc(func(hi *HTTPInfo) {
hi.AuthHeaderSecret = secretName
}),
}
}
// Scratch returns a state that represents an empty filesystem. // Scratch returns a state that represents an empty filesystem.
func Scratch() State { func Scratch() State {
return NewState(nil) return NewState(nil)
@ -595,6 +611,14 @@ func HTTP(url string, opts ...HTTPOption) State {
attrs[pb.AttrHTTPGID] = strconv.Itoa(hi.GID) attrs[pb.AttrHTTPGID] = strconv.Itoa(hi.GID)
addCap(&hi.Constraints, pb.CapSourceHTTPUIDGID) addCap(&hi.Constraints, pb.CapSourceHTTPUIDGID)
} }
if hi.AuthHeaderSecret != "" {
attrs[pb.AttrHTTPAuthHeaderSecret] = hi.AuthHeaderSecret
addCap(&hi.Constraints, pb.CapSourceHTTPAuth)
}
if hi.Header != nil {
hi.Header.setAttrs(attrs)
addCap(&hi.Constraints, pb.CapSourceHTTPHeader)
}
addCap(&hi.Constraints, pb.CapSourceHTTP) addCap(&hi.Constraints, pb.CapSourceHTTP)
source := NewSource(url, attrs, hi.Constraints) source := NewSource(url, attrs, hi.Constraints)
@ -603,11 +627,13 @@ func HTTP(url string, opts ...HTTPOption) State {
type HTTPInfo struct { type HTTPInfo struct {
constraintsWrapper constraintsWrapper
Checksum digest.Digest Checksum digest.Digest
Filename string Filename string
Perm int Perm int
UID int UID int
GID int GID int
AuthHeaderSecret string
Header *HTTPHeader
} }
type HTTPOption interface { type HTTPOption interface {
@ -645,6 +671,33 @@ func Chown(uid, gid int) HTTPOption {
}) })
} }
// Header returns an [HTTPOption] that ensures additional request headers will
// be sent when retrieving the HTTP source.
func Header(header HTTPHeader) HTTPOption {
return httpOptionFunc(func(hi *HTTPInfo) {
hi.Header = &header
})
}
type HTTPHeader struct {
Accept string
UserAgent string
}
func (hh *HTTPHeader) setAttrs(attrs map[string]string) {
if hh.Accept != "" {
attrs[hh.attr("accept")] = hh.Accept
}
if hh.UserAgent != "" {
attrs[hh.attr("user-agent")] = hh.UserAgent
}
}
func (hh *HTTPHeader) attr(name string) string {
return pb.AttrHTTPHeaderPrefix + name
}
func platformSpecificSource(id string) bool { func platformSpecificSource(id string) bool {
return strings.HasPrefix(id, "docker-image://") || strings.HasPrefix(id, "oci-layout://") return strings.HasPrefix(id, "docker-image://") || strings.HasPrefix(id, "oci-layout://")
} }

View File

@ -6,6 +6,7 @@ import (
"fmt" "fmt"
"maps" "maps"
"net" "net"
"slices"
"strings" "strings"
"github.com/containerd/platforms" "github.com/containerd/platforms"
@ -59,8 +60,8 @@ func NewState(o Output) State {
type State struct { type State struct {
out Output out Output
prev *State prev *State
key interface{} key any
value func(context.Context, *Constraints) (interface{}, error) value func(context.Context, *Constraints) (any, error)
opts []ConstraintsOpt opts []ConstraintsOpt
async *asyncState async *asyncState
} }
@ -76,13 +77,13 @@ func (s State) ensurePlatform() State {
return s return s
} }
func (s State) WithValue(k, v interface{}) State { func (s State) WithValue(k, v any) State {
return s.withValue(k, func(context.Context, *Constraints) (interface{}, error) { return s.withValue(k, func(context.Context, *Constraints) (any, error) {
return v, nil return v, nil
}) })
} }
func (s State) withValue(k interface{}, v func(context.Context, *Constraints) (interface{}, error)) State { func (s State) withValue(k any, v func(context.Context, *Constraints) (any, error)) State {
return State{ return State{
out: s.Output(), out: s.Output(),
prev: &s, // doesn't need to be original pointer prev: &s, // doesn't need to be original pointer
@ -91,7 +92,7 @@ func (s State) withValue(k interface{}, v func(context.Context, *Constraints) (i
} }
} }
func (s State) Value(ctx context.Context, k interface{}, co ...ConstraintsOpt) (interface{}, error) { func (s State) Value(ctx context.Context, k any, co ...ConstraintsOpt) (any, error) {
c := &Constraints{} c := &Constraints{}
for _, f := range co { for _, f := range co {
f.SetConstraintsOption(c) f.SetConstraintsOption(c)
@ -99,12 +100,12 @@ func (s State) Value(ctx context.Context, k interface{}, co ...ConstraintsOpt) (
return s.getValue(k)(ctx, c) return s.getValue(k)(ctx, c)
} }
func (s State) getValue(k interface{}) func(context.Context, *Constraints) (interface{}, error) { func (s State) getValue(k any) func(context.Context, *Constraints) (any, error) {
if s.key == k { if s.key == k {
return s.value return s.value
} }
if s.async != nil { if s.async != nil {
return func(ctx context.Context, c *Constraints) (interface{}, error) { return func(ctx context.Context, c *Constraints) (any, error) {
target, err := s.async.Do(ctx, c) target, err := s.async.Do(ctx, c)
if err != nil { if err != nil {
return nil, err return nil, err
@ -271,7 +272,7 @@ func (s State) WithImageConfig(c []byte) (State, error) {
OSVersion: img.OSVersion, OSVersion: img.OSVersion,
} }
if img.OSFeatures != nil { if img.OSFeatures != nil {
plat.OSFeatures = append([]string{}, img.OSFeatures...) plat.OSFeatures = slices.Clone(img.OSFeatures)
} }
s = s.Platform(plat) s = s.Platform(plat)
} }
@ -321,7 +322,7 @@ func (s State) AddEnv(key, value string) State {
} }
// AddEnvf is the same as [State.AddEnv] but with a format string. // AddEnvf is the same as [State.AddEnv] but with a format string.
func (s State) AddEnvf(key, value string, v ...interface{}) State { func (s State) AddEnvf(key, value string, v ...any) State {
return AddEnvf(key, value, v...)(s) return AddEnvf(key, value, v...)(s)
} }
@ -332,7 +333,7 @@ func (s State) Dir(str string) State {
} }
// Dirf is the same as [State.Dir] but with a format string. // Dirf is the same as [State.Dir] but with a format string.
func (s State) Dirf(str string, v ...interface{}) State { func (s State) Dirf(str string, v ...any) State {
return Dirf(str, v...)(s) return Dirf(str, v...)(s)
} }
@ -608,7 +609,7 @@ func WithCustomName(name string) ConstraintsOpt {
}) })
} }
func WithCustomNamef(name string, a ...interface{}) ConstraintsOpt { func WithCustomNamef(name string, a ...any) ConstraintsOpt {
return WithCustomName(fmt.Sprintf(name, a...)) return WithCustomName(fmt.Sprintf(name, a...))
} }
@ -746,6 +747,6 @@ func Require(filters ...string) ConstraintsOpt {
}) })
} }
func nilValue(context.Context, *Constraints) (interface{}, error) { func nilValue(context.Context, *Constraints) (any, error) {
return nil, nil return nil, nil
} }

View File

@ -142,9 +142,7 @@ func (c *Client) solve(ctx context.Context, def *llb.Definition, runGateway runG
} }
contentStores := map[string]content.Store{} contentStores := map[string]content.Store{}
for key, store := range cacheOpt.contentStores { maps.Copy(contentStores, cacheOpt.contentStores)
contentStores[key] = store
}
for key, store := range opt.OCIStores { for key, store := range opt.OCIStores {
key2 := "oci:" + key key2 := "oci:" + key
if _, ok := contentStores[key2]; ok { if _, ok := contentStores[key2]; ok {
@ -361,7 +359,7 @@ func (c *Client) solve(ctx context.Context, def *llb.Definition, runGateway runG
} }
for _, storePath := range storesToUpdate { for _, storePath := range storesToUpdate {
names := []ociindex.NameOrTag{ociindex.Tag("latest")} names := []ociindex.NameOrTag{ociindex.Tag("latest")}
if t, ok := res.ExporterResponse["image.name"]; ok { if t, ok := res.ExporterResponse[exptypes.ExporterImageNameKey]; ok {
inp := strings.Split(t, ",") inp := strings.Split(t, ",")
names = make([]ociindex.NameOrTag, len(inp)) names = make([]ociindex.NameOrTag, len(inp))
for i, n := range inp { for i, n := range inp {
@ -538,9 +536,7 @@ func parseCacheOptions(ctx context.Context, isGateway bool, opt SolveOpt) (*cach
func prepareMounts(opt *SolveOpt) (map[string]fsutil.FS, error) { func prepareMounts(opt *SolveOpt) (map[string]fsutil.FS, error) {
// merge local mounts and fallback local directories together // merge local mounts and fallback local directories together
mounts := make(map[string]fsutil.FS) mounts := make(map[string]fsutil.FS)
for k, mount := range opt.LocalMounts { maps.Copy(mounts, opt.LocalMounts)
mounts[k] = mount
}
for k, dir := range opt.LocalDirs { for k, dir := range opt.LocalDirs {
mount, err := fsutil.NewFS(dir) mount, err := fsutil.NewFS(dir)
if err != nil { if err != nil {

View File

@ -121,7 +121,7 @@ type OCIConfig struct {
// StargzSnapshotterConfig is configuration for stargz snapshotter. // StargzSnapshotterConfig is configuration for stargz snapshotter.
// We use a generic map[string]interface{} in order to remove the dependency // We use a generic map[string]interface{} in order to remove the dependency
// on stargz snapshotter's config pkg from our config. // on stargz snapshotter's config pkg from our config.
StargzSnapshotterConfig map[string]interface{} `toml:"stargzSnapshotter"` StargzSnapshotterConfig map[string]any `toml:"stargzSnapshotter"`
// ApparmorProfile is the name of the apparmor profile that should be used to constrain build containers. // ApparmorProfile is the name of the apparmor profile that should be used to constrain build containers.
// The profile should already be loaded (by a higher level system) before creating a worker. // The profile should already be loaded (by a higher level system) before creating a worker.
@ -160,9 +160,9 @@ type ContainerdConfig struct {
} }
type ContainerdRuntime struct { type ContainerdRuntime struct {
Name string `toml:"name"` Name string `toml:"name"`
Path string `toml:"path"` Path string `toml:"path"`
Options map[string]interface{} `toml:"options"` Options map[string]any `toml:"options"`
} }
type GCPolicy struct { type GCPolicy struct {

View File

@ -26,11 +26,17 @@ var (
// Value: bool <true|false> // Value: bool <true|false>
OptKeyUnpack ImageExporterOptKey = "unpack" OptKeyUnpack ImageExporterOptKey = "unpack"
// Fallback image name prefix if image name isn't provided. // Image name prefix to be used for tagging a dangling image.
// If used, image will be named as <value>@<digest> // If used, image will be named as <value>@<digest> in addition
// to any other specified names.
// Value: string // Value: string
OptKeyDanglingPrefix ImageExporterOptKey = "dangling-name-prefix" OptKeyDanglingPrefix ImageExporterOptKey = "dangling-name-prefix"
// Only use the dangling image name as a fallback if image name isn't provided.
// Ignored if dangling-name-prefix is not set.
// Value: bool <true|false>
OptKeyDanglingEmptyOnly ImageExporterOptKey = "danging-name-empty-only"
// Creates additional image name with format <name>@<digest> // Creates additional image name with format <name>@<digest>
// Value: bool <true|false> // Value: bool <true|false>
OptKeyNameCanonical ImageExporterOptKey = "name-canonical" OptKeyNameCanonical ImageExporterOptKey = "name-canonical"

View File

@ -32,7 +32,7 @@ func ParsePlatforms(meta map[string][]byte) (Platforms, error) {
return ps, nil return ps, nil
} }
p := platforms.DefaultSpec() var p ocispecs.Platform
if imgConfig, ok := meta[ExporterImageConfigKey]; ok { if imgConfig, ok := meta[ExporterImageConfigKey]; ok {
var img ocispecs.Image var img ocispecs.Image
err := json.Unmarshal(imgConfig, &img) err := json.Unmarshal(imgConfig, &img)
@ -51,6 +51,8 @@ func ParsePlatforms(meta map[string][]byte) (Platforms, error) {
} else if img.OS != "" || img.Architecture != "" { } else if img.OS != "" || img.Architecture != "" {
return Platforms{}, errors.Errorf("invalid image config: os and architecture must be specified together") return Platforms{}, errors.Errorf("invalid image config: os and architecture must be specified together")
} }
} else {
p = platforms.DefaultSpec()
} }
p = platforms.Normalize(p) p = platforms.Normalize(p)
pk := platforms.FormatAll(p) pk := platforms.FormatAll(p)

View File

@ -9,6 +9,7 @@ import (
const ( const (
ExporterConfigDigestKey = "config.digest" ExporterConfigDigestKey = "config.digest"
ExporterImageNameKey = "image.name"
ExporterImageDigestKey = "containerimage.digest" ExporterImageDigestKey = "containerimage.digest"
ExporterImageConfigKey = "containerimage.config" ExporterImageConfigKey = "containerimage.config"
ExporterImageConfigDigestKey = "containerimage.config.digest" ExporterImageConfigDigestKey = "containerimage.config.digest"

View File

@ -148,14 +148,16 @@ func parseDirective(key string, dt []byte, anyFormat bool) (string, string, []Ra
} }
// use json directive, and search for { "key": "..." } // use json directive, and search for { "key": "..." }
jsonDirective := map[string]string{} jsonDirective := map[string]any{}
if err := json.Unmarshal(dt, &jsonDirective); err == nil { if err := json.Unmarshal(dt, &jsonDirective); err == nil {
if v, ok := jsonDirective[key]; ok { if vAny, ok := jsonDirective[key]; ok {
loc := []Range{{ if v, ok := vAny.(string); ok {
Start: Position{Line: line}, loc := []Range{{
End: Position{Line: line}, Start: Position{Line: line},
}} End: Position{Line: line},
return v, v, loc, true }}
return v, v, loc, true
}
} }
} }

View File

@ -281,7 +281,7 @@ func parseJSON(rest string) (*Node, map[string]bool, error) {
return nil, nil, errDockerfileNotJSONArray return nil, nil, errDockerfileNotJSONArray
} }
var myJSON []interface{} var myJSON []any
if err := json.Unmarshal([]byte(rest), &myJSON); err != nil { if err := json.Unmarshal([]byte(rest), &myJSON); err != nil {
return nil, nil, err return nil, nil, err
} }

View File

@ -220,7 +220,7 @@ func init() {
// based on the command and command arguments. A Node is created from the // based on the command and command arguments. A Node is created from the
// result of the dispatch. // result of the dispatch.
func newNodeFromLine(line string, d *directives, comments []string) (*Node, error) { func newNodeFromLine(line string, d *directives, comments []string) (*Node, error) {
cmd, flags, args, err := splitCommand(line) cmd, flags, args, err := splitCommand(line, d)
if err != nil { if err != nil {
return nil, err return nil, err
} }

View File

@ -7,7 +7,7 @@ import (
// splitCommand takes a single line of text and parses out the cmd and args, // splitCommand takes a single line of text and parses out the cmd and args,
// which are used for dispatching to more exact parsing functions. // which are used for dispatching to more exact parsing functions.
func splitCommand(line string) (string, []string, string, error) { func splitCommand(line string, d *directives) (string, []string, string, error) {
var args string var args string
var flags []string var flags []string
@ -16,7 +16,7 @@ func splitCommand(line string) (string, []string, string, error) {
if len(cmdline) == 2 { if len(cmdline) == 2 {
var err error var err error
args, flags, err = extractBuilderFlags(cmdline[1]) args, flags, err = extractBuilderFlags(cmdline[1], d)
if err != nil { if err != nil {
return "", nil, "", err return "", nil, "", err
} }
@ -25,7 +25,7 @@ func splitCommand(line string) (string, []string, string, error) {
return cmdline[0], flags, strings.TrimSpace(args), nil return cmdline[0], flags, strings.TrimSpace(args), nil
} }
func extractBuilderFlags(line string) (string, []string, error) { func extractBuilderFlags(line string, d *directives) (string, []string, error) {
// Parses the BuilderFlags and returns the remaining part of the line // Parses the BuilderFlags and returns the remaining part of the line
const ( const (
@ -87,7 +87,7 @@ func extractBuilderFlags(line string) (string, []string, error) {
phase = inQuote phase = inQuote
continue continue
} }
if ch == '\\' { if ch == d.escapeToken {
if pos+1 == len(line) { if pos+1 == len(line) {
continue // just skip \ at end continue // just skip \ at end
} }
@ -104,7 +104,7 @@ func extractBuilderFlags(line string) (string, []string, error) {
phase = inWord phase = inWord
continue continue
} }
if ch == '\\' { if ch == d.escapeToken {
if pos+1 == len(line) { if pos+1 == len(line) {
phase = inWord phase = inWord
continue // just skip \ at end continue // just skip \ at end

View File

@ -4,6 +4,7 @@ import (
"context" "context"
"encoding/json" "encoding/json"
"fmt" "fmt"
"slices"
"github.com/containerd/platforms" "github.com/containerd/platforms"
"github.com/moby/buildkit/exporter/containerimage/exptypes" "github.com/moby/buildkit/exporter/containerimage/exptypes"
@ -54,23 +55,16 @@ func (bc *Client) Build(ctx context.Context, fn BuildFunc) (*ResultBuilder, erro
} }
} }
p := platforms.DefaultSpec() var p ocispecs.Platform
if tp != nil { if tp != nil {
p = *tp p = *tp
} else {
p = platforms.DefaultSpec()
} }
// in certain conditions we allow input platform to be extended from base image
if p.OS == "windows" && img.OS == p.OS {
if p.OSVersion == "" && img.OSVersion != "" {
p.OSVersion = img.OSVersion
}
if p.OSFeatures == nil && len(img.OSFeatures) > 0 {
p.OSFeatures = append([]string{}, img.OSFeatures...)
}
}
p = platforms.Normalize(p)
k := platforms.FormatAll(p) k := platforms.FormatAll(p)
p = extendWindowsPlatform(p, img.Platform)
p = platforms.Normalize(p)
if bc.MultiPlatformRequested { if bc.MultiPlatformRequested {
res.AddRef(k, ref) res.AddRef(k, ref)
@ -126,3 +120,16 @@ func (rb *ResultBuilder) EachPlatform(ctx context.Context, fn func(ctx context.C
} }
return eg.Wait() return eg.Wait()
} }
func extendWindowsPlatform(p, imgP ocispecs.Platform) ocispecs.Platform {
// in certain conditions we allow input platform to be extended from base image
if p.OS == "windows" && imgP.OS == p.OS {
if p.OSVersion == "" && imgP.OSVersion != "" {
p.OSVersion = imgP.OSVersion
}
if p.OSFeatures == nil && len(imgP.OSFeatures) > 0 {
p.OSFeatures = slices.Clone(imgP.OSFeatures)
}
}
return p
}

View File

@ -148,9 +148,11 @@ func (bc *Client) BuildOpts() client.BuildOpts {
func (bc *Client) init() error { func (bc *Client) init() error {
opts := bc.bopts.Opts opts := bc.bopts.Opts
defaultBuildPlatform := platforms.Normalize(platforms.DefaultSpec()) var defaultBuildPlatform ocispecs.Platform
if workers := bc.bopts.Workers; len(workers) > 0 && len(workers[0].Platforms) > 0 { if workers := bc.bopts.Workers; len(workers) > 0 && len(workers[0].Platforms) > 0 {
defaultBuildPlatform = workers[0].Platforms[0] defaultBuildPlatform = workers[0].Platforms[0]
} else {
defaultBuildPlatform = platforms.Normalize(platforms.DefaultSpec())
} }
buildPlatforms := []ocispecs.Platform{defaultBuildPlatform} buildPlatforms := []ocispecs.Platform{defaultBuildPlatform}
targetPlatforms := []ocispecs.Platform{} targetPlatforms := []ocispecs.Platform{}
@ -459,9 +461,11 @@ func (bc *Client) NamedContext(name string, opt ContextOpt) (*NamedContext, erro
} }
name = strings.TrimSuffix(reference.FamiliarString(named), ":latest") name = strings.TrimSuffix(reference.FamiliarString(named), ":latest")
pp := platforms.DefaultSpec() var pp ocispecs.Platform
if opt.Platform != nil { if opt.Platform != nil {
pp = *opt.Platform pp = *opt.Platform
} else {
pp = platforms.DefaultSpec()
} }
pname := name + "::" + platforms.FormatAll(platforms.Normalize(pp)) pname := name + "::" + platforms.FormatAll(platforms.Normalize(pp))
nc, err := bc.namedContext(name, pname, opt) nc, err := bc.namedContext(name, pname, opt)

View File

@ -134,7 +134,7 @@ func (results *LintResults) ToResult(scb SourceInfoMap) (*client.Result, error)
if len(results.Warnings) > 0 || results.Error != nil { if len(results.Warnings) > 0 || results.Error != nil {
status = 1 status = 1
} }
res.AddMeta("result.statuscode", []byte(fmt.Sprintf("%d", status))) res.AddMeta("result.statuscode", fmt.Appendf(nil, "%d", status))
res.AddMeta("version", []byte(SubrequestLintDefinition.Version)) res.AddMeta("version", []byte(SubrequestLintDefinition.Version))
return res, nil return res, nil

View File

@ -45,7 +45,7 @@ type DockerAuthProviderConfig struct {
TLSConfigs map[string]*AuthTLSConfig TLSConfigs map[string]*AuthTLSConfig
// ExpireCachedAuth is a function that returns true auth config should be refreshed // ExpireCachedAuth is a function that returns true auth config should be refreshed
// instead of using a pre-cached result. // instead of using a pre-cached result.
// If nil then the cached result will expire after 10 minutes. // If nil then the cached result will expire after 4 minutes and 50 seconds.
// The function is called with the time the cached auth config was created // The function is called with the time the cached auth config was created
// and the server URL the auth config is for. // and the server URL the auth config is for.
ExpireCachedAuth func(created time.Time, serverURL string) bool ExpireCachedAuth func(created time.Time, serverURL string) bool
@ -59,7 +59,8 @@ type authConfigCacheEntry struct {
func NewDockerAuthProvider(cfg DockerAuthProviderConfig) session.Attachable { func NewDockerAuthProvider(cfg DockerAuthProviderConfig) session.Attachable {
if cfg.ExpireCachedAuth == nil { if cfg.ExpireCachedAuth == nil {
cfg.ExpireCachedAuth = func(created time.Time, _ string) bool { cfg.ExpireCachedAuth = func(created time.Time, _ string) bool {
return time.Since(created) > 10*time.Minute // Tokens for Google Artifact Registry via Workload Identity expire after 5 minutes.
return time.Since(created) > 4*time.Minute+50*time.Second
} }
} }
return &authProvider{ return &authProvider{

View File

@ -17,8 +17,8 @@ import (
type Stream interface { type Stream interface {
Context() context.Context Context() context.Context
SendMsg(m interface{}) error SendMsg(m any) error
RecvMsg(m interface{}) error RecvMsg(m any) error
} }
func newStreamWriter(stream grpc.ClientStream) io.WriteCloser { func newStreamWriter(stream grpc.ClientStream) io.WriteCloser {

View File

@ -32,8 +32,8 @@ func Dialer(api controlapi.ControlClient) session.Dialer {
type stream interface { type stream interface {
Context() context.Context Context() context.Context
SendMsg(m interface{}) error SendMsg(m any) error
RecvMsg(m interface{}) error RecvMsg(m any) error
} }
func streamToConn(stream stream) (net.Conn, <-chan struct{}) { func streamToConn(stream stream) (net.Conn, <-chan struct{}) {

View File

@ -9,8 +9,8 @@ import (
) )
type Stream interface { type Stream interface {
SendMsg(m interface{}) error SendMsg(m any) error
RecvMsg(m interface{}) error RecvMsg(m any) error
} }
func Copy(ctx context.Context, conn io.ReadWriteCloser, stream Stream, closeStream func() error) error { func Copy(ctx context.Context, conn io.ReadWriteCloser, stream Stream, closeStream func() error) error {

View File

@ -63,23 +63,20 @@ type writer struct {
grpc.ServerStream grpc.ServerStream
} }
func (w *writer) Write(dt []byte) (int, error) { func (w *writer) Write(dt []byte) (n int, err error) {
// avoid sending too big messages on grpc stream
const maxChunkSize = 3 * 1024 * 1024 const maxChunkSize = 3 * 1024 * 1024
if len(dt) > maxChunkSize { for len(dt) > 0 {
n1, err := w.Write(dt[:maxChunkSize]) data := dt
if err != nil { if len(data) > maxChunkSize {
return n1, err data = data[:maxChunkSize]
} }
dt = dt[maxChunkSize:]
var n2 int msg := &upload.BytesMessage{Data: data}
if n2, err := w.Write(dt); err != nil { if err := w.SendMsg(msg); err != nil {
return n1 + n2, err return n, err
} }
return n1 + n2, nil n += len(data)
dt = dt[len(data):]
} }
if err := w.SendMsg(&upload.BytesMessage{Data: dt}); err != nil { return n, nil
return 0, err
}
return len(dt), nil
} }

View File

@ -12,7 +12,7 @@ func (e *OpError) Unwrap() error {
return e.error return e.error
} }
func WithOp(err error, anyOp interface{}, opDesc map[string]string) error { func WithOp(err error, anyOp any, opDesc map[string]string) error {
op, ok := anyOp.(*pb.Op) op, ok := anyOp.(*pb.Op)
if err == nil || !ok { if err == nil || !ok {
return err return err

View File

@ -98,10 +98,7 @@ func (s *Source) Print(w io.Writer) error {
func containsLine(rr []*pb.Range, l int) bool { func containsLine(rr []*pb.Range, l int) bool {
for _, r := range rr { for _, r := range rr {
e := r.End.Line e := max(r.End.Line, r.Start.Line)
if e < r.Start.Line {
e = r.Start.Line
}
if r.Start.Line <= int32(l) && e >= int32(l) { if r.Start.Line <= int32(l) && e >= int32(l) {
return true return true
} }
@ -112,10 +109,7 @@ func containsLine(rr []*pb.Range, l int) bool {
func getStartEndLine(rr []*pb.Range) (start int, end int, ok bool) { func getStartEndLine(rr []*pb.Range) (start int, end int, ok bool) {
first := true first := true
for _, r := range rr { for _, r := range rr {
e := r.End.Line e := max(r.End.Line, r.Start.Line)
if e < r.Start.Line {
e = r.Start.Line
}
if first || int(r.Start.Line) < start { if first || int(r.Start.Line) < start {
start = int(r.Start.Line) start = int(r.Start.Line)
} }

View File

@ -20,6 +20,8 @@ const AttrHTTPFilename = "http.filename"
const AttrHTTPPerm = "http.perm" const AttrHTTPPerm = "http.perm"
const AttrHTTPUID = "http.uid" const AttrHTTPUID = "http.uid"
const AttrHTTPGID = "http.gid" const AttrHTTPGID = "http.gid"
const AttrHTTPAuthHeaderSecret = "http.authheadersecret"
const AttrHTTPHeaderPrefix = "http.header."
const AttrImageResolveMode = "image.resolvemode" const AttrImageResolveMode = "image.resolvemode"
const AttrImageResolveModeDefault = "default" const AttrImageResolveModeDefault = "default"

View File

@ -31,9 +31,12 @@ const (
CapSourceGitSubdir apicaps.CapID = "source.git.subdir" CapSourceGitSubdir apicaps.CapID = "source.git.subdir"
CapSourceHTTP apicaps.CapID = "source.http" CapSourceHTTP apicaps.CapID = "source.http"
CapSourceHTTPAuth apicaps.CapID = "source.http.auth"
CapSourceHTTPChecksum apicaps.CapID = "source.http.checksum" CapSourceHTTPChecksum apicaps.CapID = "source.http.checksum"
CapSourceHTTPPerm apicaps.CapID = "source.http.perm" CapSourceHTTPPerm apicaps.CapID = "source.http.perm"
CapSourceHTTPUIDGID apicaps.CapID = "soruce.http.uidgid" // NOTE the historical typo
CapSourceHTTPUIDGID apicaps.CapID = "soruce.http.uidgid"
CapSourceHTTPHeader apicaps.CapID = "source.http.header"
CapSourceOCILayout apicaps.CapID = "source.ocilayout" CapSourceOCILayout apicaps.CapID = "source.ocilayout"
@ -230,7 +233,7 @@ func init() {
}) })
Caps.Init(apicaps.Cap{ Caps.Init(apicaps.Cap{
ID: CapSourceOCILayout, ID: CapSourceHTTPAuth,
Enabled: true, Enabled: true,
Status: apicaps.CapStatusExperimental, Status: apicaps.CapStatusExperimental,
}) })
@ -241,6 +244,18 @@ func init() {
Status: apicaps.CapStatusExperimental, Status: apicaps.CapStatusExperimental,
}) })
Caps.Init(apicaps.Cap{
ID: CapSourceHTTPHeader,
Enabled: true,
Status: apicaps.CapStatusExperimental,
})
Caps.Init(apicaps.Cap{
ID: CapSourceOCILayout,
Enabled: true,
Status: apicaps.CapStatusExperimental,
})
Caps.Init(apicaps.Cap{ Caps.Init(apicaps.Cap{
ID: CapBuildOpLLBFileName, ID: CapBuildOpLLBFileName,
Enabled: true, Enabled: true,

View File

@ -1,6 +1,8 @@
package pb package pb
import ( import (
"slices"
ocispecs "github.com/opencontainers/image-spec/specs-go/v1" ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
) )
@ -12,7 +14,7 @@ func (p *Platform) Spec() ocispecs.Platform {
OSVersion: p.OSVersion, OSVersion: p.OSVersion,
} }
if p.OSFeatures != nil { if p.OSFeatures != nil {
result.OSFeatures = append([]string{}, p.OSFeatures...) result.OSFeatures = slices.Clone(p.OSFeatures)
} }
return result return result
} }
@ -25,7 +27,7 @@ func PlatformFromSpec(p ocispecs.Platform) *Platform {
OSVersion: p.OSVersion, OSVersion: p.OSVersion,
} }
if p.OSFeatures != nil { if p.OSFeatures != nil {
result.OSFeatures = append([]string{}, p.OSFeatures...) result.OSFeatures = slices.Clone(p.OSFeatures)
} }
return result return result
} }

View File

@ -2,6 +2,7 @@ package contentutil
import ( import (
"net/url" "net/url"
"slices"
"strings" "strings"
"github.com/containerd/containerd/v2/core/content" "github.com/containerd/containerd/v2/core/content"
@ -24,11 +25,8 @@ func HasSource(info content.Info, refspec reference.Spec) (bool, error) {
return false, nil return false, nil
} }
for _, repo := range strings.Split(repoLabel, ",") { if slices.Contains(strings.Split(repoLabel, ","), target) {
// the target repo is not a candidate return true, nil
if repo == target {
return true, nil
}
} }
return false, nil return false, nil
} }

View File

@ -4,6 +4,7 @@ import (
"context" "context"
"io" "io"
"math/rand" "math/rand"
"slices"
"sort" "sort"
"sync" "sync"
"time" "time"
@ -211,7 +212,7 @@ func (c *call[T]) Err() error {
} }
} }
func (c *call[T]) Value(key interface{}) interface{} { func (c *call[T]) Value(key any) any {
if key == contextKey { if key == contextKey {
return c.progressState return c.progressState
} }
@ -353,7 +354,7 @@ func (ps *progressState) close(pw progress.Writer) {
for i, w := range ps.writers { for i, w := range ps.writers {
if w == rw { if w == rw {
w.Close() w.Close()
ps.writers = append(ps.writers[:i], ps.writers[i+1:]...) ps.writers = slices.Delete(ps.writers, i, i+1)
break break
} }
} }

View File

@ -6,6 +6,7 @@ import (
"io" "io"
"os" "os"
"os/exec" "os/exec"
"slices"
"strings" "strings"
"github.com/pkg/errors" "github.com/pkg/errors"
@ -120,7 +121,7 @@ func NewGitCLI(opts ...Option) *GitCLI {
// with the given options applied on top. // with the given options applied on top.
func (cli *GitCLI) New(opts ...Option) *GitCLI { func (cli *GitCLI) New(opts ...Option) *GitCLI {
clone := *cli clone := *cli
clone.args = append([]string{}, cli.args...) clone.args = slices.Clone(cli.args)
for _, opt := range opts { for _, opt := range opts {
opt(&clone) opt(&clone)

View File

@ -10,7 +10,7 @@ import (
"google.golang.org/grpc" "google.golang.org/grpc"
) )
func UnaryServerInterceptor(ctx context.Context, req interface{}, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp interface{}, err error) { func UnaryServerInterceptor(ctx context.Context, req any, info *grpc.UnaryServerInfo, handler grpc.UnaryHandler) (resp any, err error) {
resp, err = handler(ctx, req) resp, err = handler(ctx, req)
oldErr := err oldErr := err
if err != nil { if err != nil {
@ -29,7 +29,7 @@ func UnaryServerInterceptor(ctx context.Context, req interface{}, info *grpc.Una
return resp, err return resp, err
} }
func StreamServerInterceptor(srv interface{}, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error { func StreamServerInterceptor(srv any, ss grpc.ServerStream, info *grpc.StreamServerInfo, handler grpc.StreamHandler) error {
err := ToGRPC(ss.Context(), handler(srv, ss)) err := ToGRPC(ss.Context(), handler(srv, ss))
if err != nil { if err != nil {
stack.Helper() stack.Helper()
@ -37,7 +37,7 @@ func StreamServerInterceptor(srv interface{}, ss grpc.ServerStream, info *grpc.S
return err return err
} }
func UnaryClientInterceptor(ctx context.Context, method string, req, reply interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error { func UnaryClientInterceptor(ctx context.Context, method string, req, reply any, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error {
err := FromGRPC(invoker(ctx, method, req, reply, cc, opts...)) err := FromGRPC(invoker(ctx, method, req, reply, cc, opts...))
if err != nil { if err != nil {
stack.Helper() stack.Helper()

View File

@ -16,7 +16,7 @@ type MultiWriter struct {
mu sync.Mutex mu sync.Mutex
items []*Progress items []*Progress
writers map[rawProgressWriter]struct{} writers map[rawProgressWriter]struct{}
meta map[string]interface{} meta map[string]any
} }
var _ rawProgressWriter = &MultiWriter{} var _ rawProgressWriter = &MultiWriter{}
@ -24,7 +24,7 @@ var _ rawProgressWriter = &MultiWriter{}
func NewMultiWriter(opts ...WriterOption) *MultiWriter { func NewMultiWriter(opts ...WriterOption) *MultiWriter {
mw := &MultiWriter{ mw := &MultiWriter{
writers: map[rawProgressWriter]struct{}{}, writers: map[rawProgressWriter]struct{}{},
meta: map[string]interface{}{}, meta: map[string]any{},
} }
for _, o := range opts { for _, o := range opts {
o(mw) o(mw)
@ -70,7 +70,7 @@ func (ps *MultiWriter) Delete(pw Writer) {
ps.mu.Unlock() ps.mu.Unlock()
} }
func (ps *MultiWriter) Write(id string, v interface{}) error { func (ps *MultiWriter) Write(id string, v any) error {
p := &Progress{ p := &Progress{
ID: id, ID: id,
Timestamp: time.Now(), Timestamp: time.Now(),
@ -83,7 +83,7 @@ func (ps *MultiWriter) Write(id string, v interface{}) error {
func (ps *MultiWriter) WriteRawProgress(p *Progress) error { func (ps *MultiWriter) WriteRawProgress(p *Progress) error {
meta := p.meta meta := p.meta
if len(ps.meta) > 0 { if len(ps.meta) > 0 {
meta = map[string]interface{}{} meta = map[string]any{}
maps.Copy(meta, p.meta) maps.Copy(meta, p.meta)
for k, v := range ps.meta { for k, v := range ps.meta {
if _, ok := meta[k]; !ok { if _, ok := meta[k]; !ok {

View File

@ -67,7 +67,7 @@ func WithProgress(ctx context.Context, pw Writer) context.Context {
return context.WithValue(ctx, contextKey, pw) return context.WithValue(ctx, contextKey, pw)
} }
func WithMetadata(key string, val interface{}) WriterOption { func WithMetadata(key string, val any) WriterOption {
return func(w Writer) { return func(w Writer) {
if pw, ok := w.(*progressWriter); ok { if pw, ok := w.(*progressWriter); ok {
pw.meta[key] = val pw.meta[key] = val
@ -84,7 +84,7 @@ type Controller interface {
} }
type Writer interface { type Writer interface {
Write(id string, value interface{}) error Write(id string, value any) error
Close() error Close() error
} }
@ -95,8 +95,8 @@ type Reader interface {
type Progress struct { type Progress struct {
ID string ID string
Timestamp time.Time Timestamp time.Time
Sys interface{} Sys any
meta map[string]interface{} meta map[string]any
} }
type Status struct { type Status struct {
@ -207,7 +207,7 @@ func pipe() (*progressReader, *progressWriter, func(error)) {
} }
func newWriter(pw *progressWriter) *progressWriter { func newWriter(pw *progressWriter) *progressWriter {
meta := make(map[string]interface{}) meta := make(map[string]any)
maps.Copy(meta, pw.meta) maps.Copy(meta, pw.meta)
pw = &progressWriter{ pw = &progressWriter{
reader: pw.reader, reader: pw.reader,
@ -220,10 +220,10 @@ func newWriter(pw *progressWriter) *progressWriter {
type progressWriter struct { type progressWriter struct {
done bool done bool
reader *progressReader reader *progressReader
meta map[string]interface{} meta map[string]any
} }
func (pw *progressWriter) Write(id string, v interface{}) error { func (pw *progressWriter) Write(id string, v any) error {
if pw.done { if pw.done {
return errors.Errorf("writing %s to closed progress writer", id) return errors.Errorf("writing %s to closed progress writer", id)
} }
@ -238,7 +238,7 @@ func (pw *progressWriter) Write(id string, v interface{}) error {
func (pw *progressWriter) WriteRawProgress(p *Progress) error { func (pw *progressWriter) WriteRawProgress(p *Progress) error {
meta := p.meta meta := p.meta
if len(pw.meta) > 0 { if len(pw.meta) > 0 {
meta = map[string]interface{}{} meta = map[string]any{}
maps.Copy(meta, p.meta) maps.Copy(meta, p.meta)
for k, v := range pw.meta { for k, v := range pw.meta {
if _, ok := meta[k]; !ok { if _, ok := meta[k]; !ok {
@ -267,14 +267,14 @@ func (pw *progressWriter) Close() error {
return nil return nil
} }
func (p *Progress) Meta(key string) (interface{}, bool) { func (p *Progress) Meta(key string) (any, bool) {
v, ok := p.meta[key] v, ok := p.meta[key]
return v, ok return v, ok
} }
type noOpWriter struct{} type noOpWriter struct{}
func (pw *noOpWriter) Write(_ string, _ interface{}) error { func (pw *noOpWriter) Write(_ string, _ any) error {
return nil return nil
} }

View File

@ -765,7 +765,7 @@ func (t *trace) update(s *client.SolveStatus, termWidth int) {
} else if sec < 100 { } else if sec < 100 {
prec = 2 prec = 2
} }
v.logs = append(v.logs, []byte(fmt.Sprintf("%s %s", fmt.Sprintf("%.[2]*[1]f", sec, prec), dt))) v.logs = append(v.logs, fmt.Appendf(nil, "%s %s", fmt.Sprintf("%.[2]*[1]f", sec, prec), dt))
} }
i++ i++
}) })
@ -787,7 +787,7 @@ func (t *trace) printErrorLogs(f io.Writer) {
} }
// printer keeps last logs buffer // printer keeps last logs buffer
if v.logsBuffer != nil { if v.logsBuffer != nil {
for i := 0; i < v.logsBuffer.Len(); i++ { for range v.logsBuffer.Len() {
if v.logsBuffer.Value != nil { if v.logsBuffer.Value != nil {
fmt.Fprintln(f, string(v.logsBuffer.Value.([]byte))) fmt.Fprintln(f, string(v.logsBuffer.Value.([]byte)))
} }
@ -1071,7 +1071,7 @@ func (disp *ttyDisplay) print(d displayInfo, width, height int, all bool) {
} }
// override previous content // override previous content
if diff := disp.lineCount - lineCount; diff > 0 { if diff := disp.lineCount - lineCount; diff > 0 {
for i := 0; i < diff; i++ { for range diff {
fmt.Fprintln(disp.c, strings.Repeat(" ", width)) fmt.Fprintln(disp.c, strings.Repeat(" ", width))
} }
fmt.Fprint(disp.c, aec.EmptyBuilder.Up(uint(diff)).Column(0).ANSI) fmt.Fprint(disp.c, aec.EmptyBuilder.Up(uint(diff)).Column(0).ANSI)

View File

@ -33,7 +33,7 @@ func New(f images.HandlerFunc, logger func([]byte)) images.HandlerFunc {
} }
} }
if logger != nil { if logger != nil {
logger([]byte(fmt.Sprintf("error: %v\n", err.Error()))) logger(fmt.Appendf(nil, "error: %v\n", err.Error()))
} }
} else { } else {
return descs, nil return descs, nil
@ -43,7 +43,7 @@ func New(f images.HandlerFunc, logger func([]byte)) images.HandlerFunc {
return nil, err return nil, err
} }
if logger != nil { if logger != nil {
logger([]byte(fmt.Sprintf("retrying in %v\n", backoff))) logger(fmt.Appendf(nil, "retrying in %v\n", backoff))
} }
time.Sleep(backoff) time.Sleep(backoff)
backoff *= 2 backoff *= 2

View File

@ -2,7 +2,6 @@ package system
import ( import (
"path" "path"
"path/filepath"
"strings" "strings"
"github.com/pkg/errors" "github.com/pkg/errors"
@ -46,7 +45,7 @@ func NormalizePath(parent, newPath, inputOS string, keepSlash bool) (string, err
} }
var err error var err error
parent, err = CheckSystemDriveAndRemoveDriveLetter(parent, inputOS) parent, err = CheckSystemDriveAndRemoveDriveLetter(parent, inputOS, keepSlash)
if err != nil { if err != nil {
return "", errors.Wrap(err, "removing drive letter") return "", errors.Wrap(err, "removing drive letter")
} }
@ -61,7 +60,7 @@ func NormalizePath(parent, newPath, inputOS string, keepSlash bool) (string, err
newPath = parent newPath = parent
} }
newPath, err = CheckSystemDriveAndRemoveDriveLetter(newPath, inputOS) newPath, err = CheckSystemDriveAndRemoveDriveLetter(newPath, inputOS, keepSlash)
if err != nil { if err != nil {
return "", errors.Wrap(err, "removing drive letter") return "", errors.Wrap(err, "removing drive letter")
} }
@ -137,7 +136,7 @@ func IsAbs(pth, inputOS string) bool {
if inputOS == "" { if inputOS == "" {
inputOS = "linux" inputOS = "linux"
} }
cleanedPath, err := CheckSystemDriveAndRemoveDriveLetter(pth, inputOS) cleanedPath, err := CheckSystemDriveAndRemoveDriveLetter(pth, inputOS, false)
if err != nil { if err != nil {
return false return false
} }
@ -174,7 +173,7 @@ func IsAbs(pth, inputOS string) bool {
// There is no sane way to support this without adding a lot of complexity // There is no sane way to support this without adding a lot of complexity
// which I am not sure is worth it. // which I am not sure is worth it.
// \\.\C$\a --> Fail // \\.\C$\a --> Fail
func CheckSystemDriveAndRemoveDriveLetter(path string, inputOS string) (string, error) { func CheckSystemDriveAndRemoveDriveLetter(path string, inputOS string, keepSlash bool) (string, error) {
if inputOS == "" { if inputOS == "" {
inputOS = "linux" inputOS = "linux"
} }
@ -193,9 +192,10 @@ func CheckSystemDriveAndRemoveDriveLetter(path string, inputOS string) (string,
} }
parts := strings.SplitN(path, ":", 2) parts := strings.SplitN(path, ":", 2)
// Path does not have a drive letter. Just return it. // Path does not have a drive letter. Just return it.
if len(parts) < 2 { if len(parts) < 2 {
return ToSlash(filepath.Clean(path), inputOS), nil return ToSlash(cleanPath(path, inputOS, keepSlash), inputOS), nil
} }
// We expect all paths to be in C: // We expect all paths to be in C:
@ -220,5 +220,30 @@ func CheckSystemDriveAndRemoveDriveLetter(path string, inputOS string) (string,
// //
// We must return the second element of the split path, as is, without attempting to convert // We must return the second element of the split path, as is, without attempting to convert
// it to an absolute path. We have no knowledge of the CWD; that is treated elsewhere. // it to an absolute path. We have no knowledge of the CWD; that is treated elsewhere.
return ToSlash(filepath.Clean(parts[1]), inputOS), nil return ToSlash(cleanPath(parts[1], inputOS, keepSlash), inputOS), nil
}
// An adaptation of filepath.Clean to allow an option to
// retain the trailing slash, on either of the platforms.
// See https://github.com/moby/buildkit/issues/5249
func cleanPath(origPath, inputOS string, keepSlash bool) string {
// so as to handle cases like \\a\\b\\..\\c\\
// on Linux, when inputOS is Windows
origPath = ToSlash(origPath, inputOS)
if !keepSlash {
return path.Clean(origPath)
}
cleanedPath := path.Clean(origPath)
// Windows supports both \\ and / as path separator.
hasTrailingSlash := strings.HasSuffix(origPath, "/")
if inputOS == "windows" {
hasTrailingSlash = hasTrailingSlash || strings.HasSuffix(origPath, "\\")
}
if len(cleanedPath) > 1 && hasTrailingSlash {
return cleanedPath + "/"
}
return cleanedPath
} }

View File

@ -0,0 +1,17 @@
//go:build !windows
package system
import "path/filepath"
// IsAbsolutePath is just a wrapper that calls filepath.IsAbs.
// Has been added here just for symmetry with Windows.
func IsAbsolutePath(path string) bool {
return filepath.IsAbs(path)
}
// GetAbsolutePath does nothing on non-Windows, just returns
// the same path.
func GetAbsolutePath(path string) string {
return path
}

View File

@ -0,0 +1,29 @@
package system
import (
"path/filepath"
"strings"
)
// DefaultSystemVolumeName is the default system volume label on Windows
const DefaultSystemVolumeName = "C:"
// IsAbsolutePath prepends the default system volume label
// to the path that is presumed absolute, and then calls filepath.IsAbs
func IsAbsolutePath(path string) bool {
path = filepath.Clean(path)
if strings.HasPrefix(path, "\\") {
path = DefaultSystemVolumeName + path
}
return filepath.IsAbs(path)
}
// GetAbsolutePath returns an absolute path rooted
// to C:\\ on Windows.
func GetAbsolutePath(path string) string {
path = filepath.Clean(path)
if len(path) >= 2 && strings.EqualFold(path[:2], DefaultSystemVolumeName) {
return path
}
return DefaultSystemVolumeName + path
}

View File

@ -0,0 +1,248 @@
package client
import (
"context"
"crypto/tls"
"net"
"net/http"
"net/url"
"strings"
"time"
"github.com/pkg/errors"
)
// DummyHost is a hostname used for local communication.
//
// It acts as a valid formatted hostname for local connections (such as "unix://"
// or "npipe://") which do not require a hostname. It should never be resolved,
// but uses the special-purpose ".localhost" TLD (as defined in [RFC 2606, Section 2]
// and [RFC 6761, Section 6.3]).
//
// [RFC 7230, Section 5.4] defines that an empty header must be used for such
// cases:
//
// If the authority component is missing or undefined for the target URI,
// then a client MUST send a Host header field with an empty field-value.
//
// However, [Go stdlib] enforces the semantics of HTTP(S) over TCP, does not
// allow an empty header to be used, and requires req.URL.Scheme to be either
// "http" or "https".
//
// For further details, refer to:
//
// - https://github.com/docker/engine-api/issues/189
// - https://github.com/golang/go/issues/13624
// - https://github.com/golang/go/issues/61076
// - https://github.com/moby/moby/issues/45935
//
// [RFC 2606, Section 2]: https://www.rfc-editor.org/rfc/rfc2606.html#section-2
// [RFC 6761, Section 6.3]: https://www.rfc-editor.org/rfc/rfc6761#section-6.3
// [RFC 7230, Section 5.4]: https://datatracker.ietf.org/doc/html/rfc7230#section-5.4
// [Go stdlib]: https://github.com/golang/go/blob/6244b1946bc2101b01955468f1be502dbadd6807/src/net/http/transport.go#L558-L569
const DummyHost = "api.moby.localhost"
// DefaultVersion is the pinned version of the docker API we utilize.
const DefaultVersion = "1.47"
// Client is the API client that performs all operations
// against a docker server.
type Client struct {
// scheme sets the scheme for the client
scheme string
// host holds the server address to connect to
host string
// proto holds the client protocol i.e. unix.
proto string
// addr holds the client address.
addr string
// basePath holds the path to prepend to the requests.
basePath string
// client used to send and receive http requests.
client *http.Client
// version of the server to talk to.
version string
// When the client transport is an *http.Transport (default) we need to do some extra things (like closing idle connections).
// Store the original transport as the http.Client transport will be wrapped with tracing libs.
baseTransport *http.Transport
}
// ErrRedirect is the error returned by checkRedirect when the request is non-GET.
var ErrRedirect = errors.New("unexpected redirect in response")
// CheckRedirect specifies the policy for dealing with redirect responses. It
// can be set on [http.Client.CheckRedirect] to prevent HTTP redirects for
// non-GET requests. It returns an [ErrRedirect] for non-GET request, otherwise
// returns a [http.ErrUseLastResponse], which is special-cased by http.Client
// to use the last response.
//
// Go 1.8 changed behavior for HTTP redirects (specifically 301, 307, and 308)
// in the client. The client (and by extension API client) can be made to send
// a request like "POST /containers//start" where what would normally be in the
// name section of the URL is empty. This triggers an HTTP 301 from the daemon.
//
// In go 1.8 this 301 is converted to a GET request, and ends up getting
// a 404 from the daemon. This behavior change manifests in the client in that
// before, the 301 was not followed and the client did not generate an error,
// but now results in a message like "Error response from daemon: page not found".
func CheckRedirect(_ *http.Request, via []*http.Request) error {
if via[0].Method == http.MethodGet {
return http.ErrUseLastResponse
}
return ErrRedirect
}
// NewClientWithOpts initializes a new API client with a default HTTPClient, and
// default API host and version. It also initializes the custom HTTP headers to
// add to each request.
//
// It takes an optional list of [Opt] functional arguments, which are applied in
// the order they're provided, which allows modifying the defaults when creating
// the client. For example, the following initializes a client that configures
// itself with values from environment variables ([FromEnv]), and has automatic
// API version negotiation enabled ([WithAPIVersionNegotiation]).
//
// cli, err := client.NewClientWithOpts(
// client.FromEnv,
// client.WithAPIVersionNegotiation(),
// )
func NewClientWithOpts(ops ...Opt) (*Client, error) {
hostURL, err := ParseHostURL(DefaultDockerHost)
if err != nil {
return nil, err
}
client, err := defaultHTTPClient(hostURL)
if err != nil {
return nil, err
}
c := &Client{
host: DefaultDockerHost,
version: DefaultVersion,
client: client,
proto: hostURL.Scheme,
addr: hostURL.Host,
}
for _, op := range ops {
if err := op(c); err != nil {
return nil, err
}
}
if tr, ok := c.client.Transport.(*http.Transport); ok {
// Store the base transport before we wrap it in tracing libs below
// This is used, as an example, to close idle connections when the client is closed
c.baseTransport = tr
}
if c.scheme == "" {
// TODO(stevvooe): This isn't really the right way to write clients in Go.
// `NewClient` should probably only take an `*http.Client` and work from there.
// Unfortunately, the model of having a host-ish/url-thingy as the connection
// string has us confusing protocol and transport layers. We continue doing
// this to avoid breaking existing clients but this should be addressed.
if c.tlsConfig() != nil {
c.scheme = "https"
} else {
c.scheme = "http"
}
}
return c, nil
}
func (cli *Client) tlsConfig() *tls.Config {
if cli.baseTransport == nil {
return nil
}
return cli.baseTransport.TLSClientConfig
}
func defaultHTTPClient(hostURL *url.URL) (*http.Client, error) {
// Necessary to prevent long-lived processes using the
// client from leaking connections due to idle connections
// not being released.
transport := &http.Transport{
MaxIdleConns: 6,
IdleConnTimeout: 30 * time.Second,
}
if err := configureTransport(transport, hostURL.Scheme, hostURL.Host); err != nil {
return nil, err
}
return &http.Client{
Transport: transport,
CheckRedirect: CheckRedirect,
}, nil
}
// Close the transport used by the client
func (cli *Client) Close() error {
if cli.baseTransport != nil {
cli.baseTransport.CloseIdleConnections()
return nil
}
return nil
}
// ParseHostURL parses a url string, validates the string is a host url, and
// returns the parsed URL
func ParseHostURL(host string) (*url.URL, error) {
proto, addr, ok := strings.Cut(host, "://")
if !ok || addr == "" {
return nil, errors.Errorf("unable to parse docker host `%s`", host)
}
var basePath string
if proto == "tcp" {
parsed, err := url.Parse("tcp://" + addr)
if err != nil {
return nil, err
}
addr = parsed.Host
basePath = parsed.Path
}
return &url.URL{
Scheme: proto,
Host: addr,
Path: basePath,
}, nil
}
func (cli *Client) dialerFromTransport() func(context.Context, string, string) (net.Conn, error) {
if cli.baseTransport == nil || cli.baseTransport.DialContext == nil {
return nil
}
if cli.baseTransport.TLSClientConfig != nil {
// When using a tls config we don't use the configured dialer but instead a fallback dialer...
// Note: It seems like this should use the normal dialer and wrap the returned net.Conn in a tls.Conn
// I honestly don't know why it doesn't do that, but it doesn't and such a change is entirely unrelated to the change in this commit.
return nil
}
return cli.baseTransport.DialContext
}
// Dialer returns a dialer for a raw stream connection, with an HTTP/1.1 header,
// that can be used for proxying the daemon connection. It is used by
// ["docker dial-stdio"].
//
// ["docker dial-stdio"]: https://github.com/docker/cli/pull/1014
func (cli *Client) Dialer() func(context.Context) (net.Conn, error) {
return func(ctx context.Context) (net.Conn, error) {
if dialFn := cli.dialerFromTransport(); dialFn != nil {
return dialFn(ctx, cli.proto, cli.addr)
}
switch cli.proto {
case "unix":
return net.Dial(cli.proto, cli.addr)
case "npipe":
return DialPipe(cli.addr, 32*time.Second)
default:
if tlsConfig := cli.tlsConfig(); tlsConfig != nil {
return tls.Dial(cli.proto, cli.addr, tlsConfig)
}
return net.Dial(cli.proto, cli.addr)
}
}
}

View File

@ -0,0 +1,7 @@
//go:build !windows
package client
// DefaultDockerHost defines OS-specific default host if the DOCKER_HOST
// (EnvOverrideHost) environment variable is unset or empty.
const DefaultDockerHost = "unix:///var/run/docker.sock"

View File

@ -0,0 +1,5 @@
package client
// DefaultDockerHost defines OS-specific default host if the DOCKER_HOST
// (EnvOverrideHost) environment variable is unset or empty.
const DefaultDockerHost = "npipe:////./pipe/docker_engine"

View File

@ -0,0 +1,13 @@
package client
import (
"github.com/pkg/errors"
)
// ErrorConnectionFailed returns an error with host in the error message when connection to docker daemon failed.
func ErrorConnectionFailed(host string) error {
if host == "" {
return errors.New("Cannot connect to the Docker daemon. Is the docker daemon running on this host?")
}
return errors.Errorf("Cannot connect to the Docker daemon at %s. Is the docker daemon running?", host)
}

View File

@ -0,0 +1,118 @@
package client
import (
"bufio"
"context"
"net"
"net/http"
"time"
"github.com/pkg/errors"
)
// DialHijack returns a hijacked connection with negotiated protocol proto.
func (cli *Client) DialHijack(ctx context.Context, url, proto string, meta map[string][]string) (net.Conn, error) {
req, err := http.NewRequestWithContext(ctx, http.MethodPost, url, nil)
if err != nil {
return nil, err
}
conn, _, err := cli.setupHijackConn(req, proto)
return conn, err
}
func (cli *Client) setupHijackConn(req *http.Request, proto string) (_ net.Conn, _ string, retErr error) {
ctx := req.Context()
req.Header.Set("Connection", "Upgrade")
req.Header.Set("Upgrade", proto)
dialer := cli.Dialer()
conn, err := dialer(ctx)
if err != nil {
return nil, "", errors.Wrap(err, "cannot connect to the Docker daemon. Is 'docker daemon' running on this host?")
}
defer func() {
if retErr != nil {
conn.Close()
}
}()
// When we set up a TCP connection for hijack, there could be long periods
// of inactivity (a long running command with no output) that in certain
// network setups may cause ECONNTIMEOUT, leaving the client in an unknown
// state. Setting TCP KeepAlive on the socket connection will prohibit
// ECONNTIMEOUT unless the socket connection truly is broken
if tcpConn, ok := conn.(*net.TCPConn); ok {
_ = tcpConn.SetKeepAlive(true)
_ = tcpConn.SetKeepAlivePeriod(30 * time.Second)
}
hc := &hijackedConn{conn, bufio.NewReader(conn)}
// Server hijacks the connection, error 'connection closed' expected
resp, err := hc.RoundTrip(req)
if err != nil {
return nil, "", err
}
if resp.StatusCode != http.StatusSwitchingProtocols {
_ = resp.Body.Close()
return nil, "", errors.Errorf("unable to upgrade to %s, received %d", proto, resp.StatusCode)
}
if hc.r.Buffered() > 0 {
// If there is buffered content, wrap the connection. We return an
// object that implements CloseWrite if the underlying connection
// implements it.
if _, ok := hc.Conn.(CloseWriter); ok {
conn = &hijackedConnCloseWriter{hc}
} else {
conn = hc
}
} else {
hc.r.Reset(nil)
}
mediaType := resp.Header.Get("Content-Type")
return conn, mediaType, nil
}
// CloseWriter is an interface that implements structs
// that close input streams to prevent from writing.
type CloseWriter interface {
CloseWrite() error
}
// hijackedConn wraps a net.Conn and is returned by setupHijackConn in the case
// that a) there was already buffered data in the http layer when Hijack() was
// called, and b) the underlying net.Conn does *not* implement CloseWrite().
// hijackedConn does not implement CloseWrite() either.
type hijackedConn struct {
net.Conn
r *bufio.Reader
}
func (c *hijackedConn) RoundTrip(req *http.Request) (*http.Response, error) {
if err := req.Write(c.Conn); err != nil {
return nil, err
}
return http.ReadResponse(c.r, req)
}
func (c *hijackedConn) Read(b []byte) (int, error) {
return c.r.Read(b)
}
// hijackedConnCloseWriter is a hijackedConn which additionally implements
// CloseWrite(). It is returned by setupHijackConn in the case that a) there
// was already buffered data in the http layer when Hijack() was called, and b)
// the underlying net.Conn *does* implement CloseWrite().
type hijackedConnCloseWriter struct {
*hijackedConn
}
var _ CloseWriter = &hijackedConnCloseWriter{}
func (c *hijackedConnCloseWriter) CloseWrite() error {
conn := c.Conn.(CloseWriter)
return conn.CloseWrite()
}

View File

@ -0,0 +1,28 @@
package client
import (
"net/http"
"github.com/pkg/errors"
)
// Opt is a configuration option to initialize a [Client].
type Opt func(*Client) error
// WithHost overrides the client host with the specified one.
func WithHost(host string) Opt {
return func(c *Client) error {
hostURL, err := ParseHostURL(host)
if err != nil {
return err
}
c.host = host
c.proto = hostURL.Scheme
c.addr = hostURL.Host
c.basePath = hostURL.Path
if transport, ok := c.client.Transport.(*http.Transport); ok {
return configureTransport(transport, c.proto, c.addr)
}
return errors.Errorf("cannot apply host to transport: %T", c.client.Transport)
}
}

View File

@ -0,0 +1,25 @@
package client
import (
"context"
"net/http"
"path"
)
type PingResponse struct{}
func (cli *Client) Ping(ctx context.Context) error {
// Using cli.buildRequest() + cli.doRequest() instead of cli.sendRequest()
// because ping requests are used during API version negotiation, so we want
// to hit the non-versioned /_ping endpoint, not /v1.xx/_ping
req, err := cli.buildRequest(ctx, http.MethodHead, path.Join(cli.basePath, "/_ping"), nil, nil)
if err != nil {
return err
}
serverResp, err := cli.doRequest(req)
if err != nil {
return err
}
defer ensureReaderClosed(serverResp)
return cli.checkResponseErr(serverResp)
}

View File

@ -0,0 +1,162 @@
//nolint:forbidigo
package client
import (
"context"
"encoding/json"
"fmt"
"io"
"net"
"net/http"
"net/url"
"os"
"strings"
"github.com/pkg/errors"
)
func (cli *Client) buildRequest(ctx context.Context, method, path string, body io.Reader, headers http.Header) (*http.Request, error) {
req, err := http.NewRequestWithContext(ctx, method, path, body)
if err != nil {
return nil, err
}
req = cli.addHeaders(req, headers)
req.URL.Scheme = cli.scheme
req.URL.Host = cli.addr
if cli.proto == "unix" || cli.proto == "npipe" {
// Override host header for non-tcp connections.
req.Host = DummyHost
}
if body != nil && req.Header.Get("Content-Type") == "" {
req.Header.Set("Content-Type", "text/plain")
}
return req, nil
}
func (cli *Client) doRequest(req *http.Request) (*http.Response, error) {
resp, err := cli.client.Do(req)
if err != nil {
if cli.scheme != "https" && strings.Contains(err.Error(), "malformed HTTP response") {
return nil, errors.Errorf("%v.\n* Are you trying to connect to a TLS-enabled daemon without TLS?", err)
}
if cli.scheme == "https" && strings.Contains(err.Error(), "bad certificate") {
return nil, errors.Wrap(err, "the server probably has client authentication (--tlsverify) enabled; check your TLS client certification settings")
}
// Don't decorate context sentinel errors; users may be comparing to
// them directly.
if errors.Is(err, context.Canceled) || errors.Is(err, context.DeadlineExceeded) {
return nil, err
}
if uErr, ok := err.(*url.Error); ok {
if nErr, ok := uErr.Err.(*net.OpError); ok {
if os.IsPermission(nErr.Err) {
return nil, errors.Wrapf(err, "permission denied while trying to connect to the Docker daemon socket at %v", cli.host)
}
}
}
if nErr, ok := err.(net.Error); ok {
// FIXME(thaJeztah): any net.Error should be considered a connection error (but we should include the original error)?
if nErr.Timeout() {
return nil, ErrorConnectionFailed(cli.host)
}
if strings.Contains(nErr.Error(), "connection refused") || strings.Contains(nErr.Error(), "dial unix") {
return nil, ErrorConnectionFailed(cli.host)
}
}
// Although there's not a strongly typed error for this in go-winio,
// lots of people are using the default configuration for the docker
// daemon on Windows where the daemon is listening on a named pipe
// `//./pipe/docker_engine, and the client must be running elevated.
// Give users a clue rather than the not-overly useful message
// such as `error during connect: Get http://%2F%2F.%2Fpipe%2Fdocker_engine/v1.26/info:
// open //./pipe/docker_engine: The system cannot find the file specified.`.
// Note we can't string compare "The system cannot find the file specified" as
// this is localised - for example in French the error would be
// `open //./pipe/docker_engine: Le fichier spécifié est introuvable.`
if strings.Contains(err.Error(), `open //./pipe/docker_engine`) {
// Checks if client is running with elevated privileges
if f, elevatedErr := os.Open(`\\.\PHYSICALDRIVE0`); elevatedErr != nil {
err = errors.Wrap(err, "in the default daemon configuration on Windows, the docker client must be run with elevated privileges to connect")
} else {
_ = f.Close()
err = errors.Wrap(err, "this error may indicate that the docker daemon is not running")
}
}
return nil, errors.Wrap(err, "error during connect")
}
return resp, nil
}
func (cli *Client) checkResponseErr(serverResp *http.Response) error {
if serverResp == nil {
return nil
}
if serverResp.StatusCode >= 200 && serverResp.StatusCode < 400 {
return nil
}
var body []byte
var err error
var reqURL string
if serverResp.Request != nil {
reqURL = serverResp.Request.URL.String()
}
statusMsg := serverResp.Status
if statusMsg == "" {
statusMsg = http.StatusText(serverResp.StatusCode)
}
if serverResp.Body != nil {
bodyMax := 1 * 1024 * 1024 // 1 MiB
bodyR := &io.LimitedReader{
R: serverResp.Body,
N: int64(bodyMax),
}
body, err = io.ReadAll(bodyR)
if err != nil {
return err
}
if bodyR.N == 0 {
return fmt.Errorf("request returned %s with a message (> %d bytes) for API route and version %s, check if the server supports the requested API version", statusMsg, bodyMax, reqURL)
}
}
if len(body) == 0 {
return fmt.Errorf("request returned %s for API route and version %s, check if the server supports the requested API version", statusMsg, reqURL)
}
var daemonErr error
if serverResp.Header.Get("Content-Type") == "application/json" {
var errorResponse struct {
Message string `json:"message"`
}
if err := json.Unmarshal(body, &errorResponse); err != nil {
return errors.Wrap(err, "Error reading JSON")
}
daemonErr = errors.New(strings.TrimSpace(errorResponse.Message))
} else {
daemonErr = errors.New(strings.TrimSpace(string(body)))
}
return errors.Wrap(daemonErr, "Error response from daemon")
}
func (cli *Client) addHeaders(req *http.Request, headers http.Header) *http.Request {
for k, v := range headers {
req.Header[http.CanonicalHeaderKey(k)] = v
}
return req
}
func ensureReaderClosed(response *http.Response) {
if response.Body != nil {
// Drain up to 512 bytes and close the body to let the Transport reuse the connection
_, _ = io.CopyN(io.Discard, response.Body, 512)
_ = response.Body.Close()
}
}

View File

@ -0,0 +1,32 @@
package client
import (
"net"
"net/http"
"time"
)
const defaultTimeout = 10 * time.Second
// configureTransport configures the specified [http.Transport] according to the specified proto
// and addr.
//
// If the proto is unix (using a unix socket to communicate) or npipe the compression is disabled.
// For other protos, compression is enabled. If you want to manually enable/disable compression,
// make sure you do it _after_ any subsequent calls to ConfigureTransport is made against the same
// [http.Transport].
func configureTransport(tr *http.Transport, proto, addr string) error {
switch proto {
case "unix":
return configureUnixTransport(tr, proto, addr)
case "npipe":
return configureNpipeTransport(tr, proto, addr)
default:
tr.Proxy = http.ProxyFromEnvironment
tr.DisableCompression = false
tr.DialContext = (&net.Dialer{
Timeout: defaultTimeout,
}).DialContext
}
return nil
}

View File

@ -0,0 +1,40 @@
//go:build !windows
package client
import (
"context"
"net"
"net/http"
"syscall"
"time"
"github.com/pkg/errors"
)
const maxUnixSocketPathSize = len(syscall.RawSockaddrUnix{}.Path)
func configureUnixTransport(tr *http.Transport, proto, addr string) error {
if len(addr) > maxUnixSocketPathSize {
return errors.Errorf("unix socket path %q is too long", addr)
}
// No need for compression in local communications.
tr.DisableCompression = true
dialer := &net.Dialer{
Timeout: defaultTimeout,
}
tr.DialContext = func(ctx context.Context, _, _ string) (net.Conn, error) {
return dialer.DialContext(ctx, proto, addr)
}
return nil
}
func configureNpipeTransport(_ *http.Transport, _, _ string) error {
return errors.New("protocol not available")
}
// DialPipe connects to a Windows named pipe.
// This is not supported on other OSes.
func DialPipe(_ string, _ time.Duration) (net.Conn, error) {
return nil, syscall.EAFNOSUPPORT
}

View File

@ -0,0 +1,29 @@
package client
import (
"context"
"net"
"net/http"
"time"
"github.com/Microsoft/go-winio"
"github.com/pkg/errors"
)
func configureUnixTransport(_ *http.Transport, _, _ string) error {
return errors.New("protocol not available")
}
func configureNpipeTransport(tr *http.Transport, _, addr string) error {
// No need for compression in local communications.
tr.DisableCompression = true
tr.DialContext = func(ctx context.Context, _, _ string) (net.Conn, error) {
return winio.DialPipeContext(ctx, addr)
}
return nil
}
// DialPipe connects to a Windows named pipe.
func DialPipe(addr string, timeout time.Duration) (net.Conn, error) {
return winio.DialPipe(addr, &timeout)
}

View File

@ -16,12 +16,12 @@ import (
) )
type LogT interface { type LogT interface {
Logf(string, ...interface{}) Logf(string, ...any)
} }
type nopLog struct{} type nopLog struct{}
func (nopLog) Logf(string, ...interface{}) {} func (nopLog) Logf(string, ...any) {}
const ( const (
shortLen = 12 shortLen = 12
@ -73,7 +73,7 @@ func NewDaemon(workingDir string, ops ...Option) (*Daemon, error) {
dockerdBinary: DefaultDockerdBinary, dockerdBinary: DefaultDockerdBinary,
Log: nopLog{}, Log: nopLog{},
sockPath: filepath.Join(sockRoot, id+".sock"), sockPath: filepath.Join(sockRoot, id+".sock"),
envs: append([]string{}, os.Environ()...), envs: os.Environ(),
} }
for _, op := range ops { for _, op := range ops {

View File

@ -5,6 +5,7 @@ import (
"context" "context"
"fmt" "fmt"
"maps" "maps"
"math"
"math/rand" "math/rand"
"os" "os"
"os/exec" "os/exec"
@ -12,6 +13,7 @@ import (
"reflect" "reflect"
"runtime" "runtime"
"sort" "sort"
"strconv"
"strings" "strings"
"sync" "sync"
"testing" "testing"
@ -58,7 +60,7 @@ type Sandbox interface {
PrintLogs(*testing.T) PrintLogs(*testing.T)
ClearLogs() ClearLogs()
NewRegistry() (string, error) NewRegistry() (string, error)
Value(string) interface{} // chosen matrix value Value(string) any // chosen matrix value
Name() string Name() string
CDISpecDir() string CDISpecDir() string
} }
@ -129,10 +131,10 @@ func List() []Worker {
// tests. // tests.
type TestOpt func(*testConf) type TestOpt func(*testConf)
func WithMatrix(key string, m map[string]interface{}) TestOpt { func WithMatrix(key string, m map[string]any) TestOpt {
return func(tc *testConf) { return func(tc *testConf) {
if tc.matrix == nil { if tc.matrix == nil {
tc.matrix = map[string]map[string]interface{}{} tc.matrix = map[string]map[string]any{}
} }
tc.matrix[key] = m tc.matrix[key] = m
} }
@ -148,7 +150,7 @@ func WithMirroredImages(m map[string]string) TestOpt {
} }
type testConf struct { type testConf struct {
matrix map[string]map[string]interface{} matrix map[string]map[string]any
mirroredImages map[string]string mirroredImages map[string]string
} }
@ -161,6 +163,29 @@ func Run(t *testing.T, testCases []Test, opt ...TestOpt) {
t.Skip("skipping integration tests") t.Skip("skipping integration tests")
} }
var sliceSplit int
if filter, ok := lookupTestFilter(); ok {
parts := strings.Split(filter, "/")
if len(parts) >= 2 {
const prefix = "slice="
if strings.HasPrefix(parts[1], prefix) {
conf := strings.TrimPrefix(parts[1], prefix)
offsetS, totalS, ok := strings.Cut(conf, "-")
if !ok {
t.Fatalf("invalid slice=%q", conf)
}
offset, err := strconv.Atoi(offsetS)
require.NoError(t, err)
total, err := strconv.Atoi(totalS)
require.NoError(t, err)
if offset < 1 || total < 1 || offset > total {
t.Fatalf("invalid slice=%q", conf)
}
sliceSplit = total
}
}
}
var tc testConf var tc testConf
for _, o := range opt { for _, o := range opt {
o(&tc) o(&tc)
@ -182,9 +207,14 @@ func Run(t *testing.T, testCases []Test, opt ...TestOpt) {
}) })
for _, br := range list { for _, br := range list {
for _, tc := range testCases { for i, tc := range testCases {
for _, mv := range matrix { for _, mv := range matrix {
fn := tc.Name() fn := tc.Name()
if sliceSplit > 0 {
pageLimit := int(math.Ceil(float64(len(testCases)) / float64(sliceSplit)))
sliceName := fmt.Sprintf("slice=%d-%d/", i/pageLimit+1, sliceSplit)
fn = sliceName + fn
}
name := fn + "/worker=" + br.Name() + mv.functionSuffix() name := fn + "/worker=" + br.Name() + mv.functionSuffix()
func(fn, testName string, br Worker, tc Test, mv matrixValue) { func(fn, testName string, br Worker, tc Test, mv matrixValue) {
ok := t.Run(testName, func(t *testing.T) { ok := t.Run(testName, func(t *testing.T) {
@ -221,7 +251,7 @@ func Run(t *testing.T, testCases []Test, opt ...TestOpt) {
} }
} }
func getFunctionName(i interface{}) string { func getFunctionName(i any) string {
fullname := runtime.FuncForPC(reflect.ValueOf(i).Pointer()).Name() fullname := runtime.FuncForPC(reflect.ValueOf(i).Pointer()).Name()
dot := strings.LastIndex(fullname, ".") + 1 dot := strings.LastIndex(fullname, ".") + 1
return strings.Title(fullname[dot:]) //nolint:staticcheck // ignoring "SA1019: strings.Title is deprecated", as for our use we don't need full unicode support return strings.Title(fullname[dot:]) //nolint:staticcheck // ignoring "SA1019: strings.Title is deprecated", as for our use we don't need full unicode support
@ -330,37 +360,80 @@ func lazyMirrorRunnerFunc(t *testing.T, images map[string]string) func() string
var mirror string var mirror string
return func() string { return func() string {
once.Do(func() { once.Do(func() {
host, cleanup, err := runMirror(t, images) m, err := RunMirror()
require.NoError(t, err) require.NoError(t, err)
t.Cleanup(func() { _ = cleanup() }) require.NoError(t, m.AddImages(t, images))
mirror = host t.Cleanup(func() { _ = m.Close() })
mirror = m.Host
}) })
return mirror return mirror
} }
} }
func runMirror(t *testing.T, mirroredImages map[string]string) (host string, _ func() error, err error) { type Mirror struct {
Host string
dir string
cleanup func() error
}
func (m *Mirror) lock() (*flock.Flock, error) {
if m.dir == "" {
return nil, nil
}
if err := os.MkdirAll(m.dir, 0700); err != nil {
return nil, err
}
lock := flock.New(filepath.Join(m.dir, "lock"))
if err := lock.Lock(); err != nil {
return nil, err
}
return lock, nil
}
func (m *Mirror) Close() error {
if m.cleanup != nil {
return m.cleanup()
}
return nil
}
func (m *Mirror) AddImages(t *testing.T, images map[string]string) (err error) {
lock, err := m.lock()
if err != nil {
return err
}
defer func() {
if lock != nil {
lock.Unlock()
}
}()
if err := copyImagesLocal(t, m.Host, images); err != nil {
return err
}
return nil
}
func RunMirror() (_ *Mirror, err error) {
mirrorDir := os.Getenv("BUILDKIT_REGISTRY_MIRROR_DIR") mirrorDir := os.Getenv("BUILDKIT_REGISTRY_MIRROR_DIR")
var lock *flock.Flock m := &Mirror{
if mirrorDir != "" { dir: mirrorDir,
if err := os.MkdirAll(mirrorDir, 0700); err != nil {
return "", nil, err
}
lock = flock.New(filepath.Join(mirrorDir, "lock"))
if err := lock.Lock(); err != nil {
return "", nil, err
}
defer func() {
if err != nil {
lock.Unlock()
}
}()
} }
mirror, cleanup, err := NewRegistry(mirrorDir) lock, err := m.lock()
if err != nil { if err != nil {
return "", nil, err return nil, err
}
defer func() {
if err != nil {
lock.Unlock()
}
}()
host, cleanup, err := NewRegistry(mirrorDir)
if err != nil {
return nil, err
} }
defer func() { defer func() {
if err != nil { if err != nil {
@ -368,17 +441,16 @@ func runMirror(t *testing.T, mirroredImages map[string]string) (host string, _ f
} }
}() }()
if err := copyImagesLocal(t, mirror, mirroredImages); err != nil { m.Host = host
return "", nil, err m.cleanup = cleanup
}
if mirrorDir != "" { if lock != nil {
if err := lock.Unlock(); err != nil { if err := lock.Unlock(); err != nil {
return "", nil, err return nil, err
} }
} }
return mirror, cleanup, err return m, err
} }
type matrixValue struct { type matrixValue struct {
@ -400,10 +472,10 @@ func (mv matrixValue) functionSuffix() string {
type matrixValueChoice struct { type matrixValueChoice struct {
name string name string
value interface{} value any
} }
func newMatrixValue(key, name string, v interface{}) matrixValue { func newMatrixValue(key, name string, v any) matrixValue {
return matrixValue{ return matrixValue{
fn: []string{key}, fn: []string{key},
values: map[string]matrixValueChoice{ values: map[string]matrixValueChoice{
@ -463,3 +535,13 @@ func UnixOrWindows[T any](unix, windows T) T {
} }
return unix return unix
} }
func lookupTestFilter() (string, bool) {
const prefix = "-test.run="
for _, arg := range os.Args {
if strings.HasPrefix(arg, prefix) {
return strings.TrimPrefix(arg, prefix), true
}
}
return "", false
}

View File

@ -86,7 +86,7 @@ func (sb *sandbox) Cmd(args ...string) *exec.Cmd {
return cmd return cmd
} }
func (sb *sandbox) Value(k string) interface{} { func (sb *sandbox) Value(k string) any {
return sb.mv.values[k].value return sb.mv.values[k].value
} }
@ -197,7 +197,7 @@ func RootlessSupported(uid int) bool {
return true return true
} }
func PrintLogs(logs map[string]*bytes.Buffer, f func(args ...interface{})) { func PrintLogs(logs map[string]*bytes.Buffer, f func(args ...any)) {
for name, l := range logs { for name, l := range logs {
f(name) f(name)
s := bufio.NewScanner(l) s := bufio.NewScanner(l)

View File

@ -2,6 +2,7 @@ package workers
import ( import (
"os" "os"
"slices"
"strings" "strings"
) )
@ -52,23 +53,14 @@ func (b backend) ExtraEnv() []string {
func (b backend) Supports(feature string) bool { func (b backend) Supports(feature string) bool {
if enabledFeatures := os.Getenv("BUILDKIT_TEST_ENABLE_FEATURES"); enabledFeatures != "" { if enabledFeatures := os.Getenv("BUILDKIT_TEST_ENABLE_FEATURES"); enabledFeatures != "" {
for _, enabledFeature := range strings.Split(enabledFeatures, ",") { if slices.Contains(strings.Split(enabledFeatures, ","), feature) {
if feature == enabledFeature { return true
return true
}
} }
} }
if disabledFeatures := os.Getenv("BUILDKIT_TEST_DISABLE_FEATURES"); disabledFeatures != "" { if disabledFeatures := os.Getenv("BUILDKIT_TEST_DISABLE_FEATURES"); disabledFeatures != "" {
for _, disabledFeature := range strings.Split(disabledFeatures, ",") { if slices.Contains(strings.Split(disabledFeatures, ","), feature) {
if feature == disabledFeature {
return false
}
}
}
for _, unsupportedFeature := range b.unsupportedFeatures {
if feature == unsupportedFeature {
return false return false
} }
} }
return true return !slices.Contains(b.unsupportedFeatures, feature)
} }

View File

@ -10,9 +10,9 @@ import (
"strings" "strings"
"time" "time"
"github.com/docker/docker/client"
"github.com/moby/buildkit/cmd/buildkitd/config" "github.com/moby/buildkit/cmd/buildkitd/config"
"github.com/moby/buildkit/util/testutil/dockerd" "github.com/moby/buildkit/util/testutil/dockerd"
"github.com/moby/buildkit/util/testutil/dockerd/client"
"github.com/moby/buildkit/util/testutil/integration" "github.com/moby/buildkit/util/testutil/integration"
"github.com/pkg/errors" "github.com/pkg/errors"
"golang.org/x/sync/errgroup" "golang.org/x/sync/errgroup"
@ -252,7 +252,7 @@ func waitForAPI(ctx context.Context, apiClient *client.Client, d time.Duration)
step := 50 * time.Millisecond step := 50 * time.Millisecond
i := 0 i := 0
for { for {
if _, err := apiClient.Ping(ctx); err == nil { if err := apiClient.Ping(ctx); err == nil {
break break
} }
i++ i++

View File

@ -5,7 +5,11 @@ import (
"fmt" "fmt"
"net/http" "net/http"
"net/http/httptrace" "net/http/httptrace"
"slices"
"github.com/moby/buildkit/util/bklog"
"github.com/moby/buildkit/util/stack"
"github.com/pkg/errors"
"go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace" "go.opentelemetry.io/contrib/instrumentation/net/http/httptrace/otelhttptrace"
"go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp" "go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"
"go.opentelemetry.io/otel/attribute" "go.opentelemetry.io/otel/attribute"
@ -14,11 +18,6 @@ import (
semconv "go.opentelemetry.io/otel/semconv/v1.26.0" semconv "go.opentelemetry.io/otel/semconv/v1.26.0"
"go.opentelemetry.io/otel/trace" "go.opentelemetry.io/otel/trace"
"go.opentelemetry.io/otel/trace/noop" "go.opentelemetry.io/otel/trace/noop"
"github.com/pkg/errors"
"github.com/moby/buildkit/util/bklog"
"github.com/moby/buildkit/util/stack"
) )
// StartSpan starts a new span as a child of the span in context. // StartSpan starts a new span as a child of the span in context.
@ -43,10 +42,8 @@ func hasStacktrace(err error) bool {
case interface{ Unwrap() error }: case interface{ Unwrap() error }:
return hasStacktrace(e.Unwrap()) return hasStacktrace(e.Unwrap())
case interface{ Unwrap() []error }: case interface{ Unwrap() []error }:
for _, ue := range e.Unwrap() { if slices.ContainsFunc(e.Unwrap(), hasStacktrace) {
if hasStacktrace(ue) { return true
return true
}
} }
} }
return false return false

141
vendor/github.com/moby/sys/user/idtools.go generated vendored Normal file
View File

@ -0,0 +1,141 @@
package user
import (
"fmt"
"os"
)
// MkdirOpt is a type for options to pass to Mkdir calls
type MkdirOpt func(*mkdirOptions)
type mkdirOptions struct {
onlyNew bool
}
// WithOnlyNew is an option for MkdirAllAndChown that will only change ownership and permissions
// on newly created directories. If the directory already exists, it will not be modified
func WithOnlyNew(o *mkdirOptions) {
o.onlyNew = true
}
// MkdirAllAndChown creates a directory (include any along the path) and then modifies
// ownership to the requested uid/gid. By default, if the directory already exists, this
// function will still change ownership and permissions. If WithOnlyNew is passed as an
// option, then only the newly created directories will have ownership and permissions changed.
func MkdirAllAndChown(path string, mode os.FileMode, uid, gid int, opts ...MkdirOpt) error {
var options mkdirOptions
for _, opt := range opts {
opt(&options)
}
return mkdirAs(path, mode, uid, gid, true, options.onlyNew)
}
// MkdirAndChown creates a directory and then modifies ownership to the requested uid/gid.
// By default, if the directory already exists, this function still changes ownership and permissions.
// If WithOnlyNew is passed as an option, then only the newly created directory will have ownership
// and permissions changed.
// Note that unlike os.Mkdir(), this function does not return IsExist error
// in case path already exists.
func MkdirAndChown(path string, mode os.FileMode, uid, gid int, opts ...MkdirOpt) error {
var options mkdirOptions
for _, opt := range opts {
opt(&options)
}
return mkdirAs(path, mode, uid, gid, false, options.onlyNew)
}
// getRootUIDGID retrieves the remapped root uid/gid pair from the set of maps.
// If the maps are empty, then the root uid/gid will default to "real" 0/0
func getRootUIDGID(uidMap, gidMap []IDMap) (int, int, error) {
uid, err := toHost(0, uidMap)
if err != nil {
return -1, -1, err
}
gid, err := toHost(0, gidMap)
if err != nil {
return -1, -1, err
}
return uid, gid, nil
}
// toContainer takes an id mapping, and uses it to translate a
// host ID to the remapped ID. If no map is provided, then the translation
// assumes a 1-to-1 mapping and returns the passed in id
func toContainer(hostID int, idMap []IDMap) (int, error) {
if idMap == nil {
return hostID, nil
}
for _, m := range idMap {
if (int64(hostID) >= m.ParentID) && (int64(hostID) <= (m.ParentID + m.Count - 1)) {
contID := int(m.ID + (int64(hostID) - m.ParentID))
return contID, nil
}
}
return -1, fmt.Errorf("host ID %d cannot be mapped to a container ID", hostID)
}
// toHost takes an id mapping and a remapped ID, and translates the
// ID to the mapped host ID. If no map is provided, then the translation
// assumes a 1-to-1 mapping and returns the passed in id #
func toHost(contID int, idMap []IDMap) (int, error) {
if idMap == nil {
return contID, nil
}
for _, m := range idMap {
if (int64(contID) >= m.ID) && (int64(contID) <= (m.ID + m.Count - 1)) {
hostID := int(m.ParentID + (int64(contID) - m.ID))
return hostID, nil
}
}
return -1, fmt.Errorf("container ID %d cannot be mapped to a host ID", contID)
}
// IdentityMapping contains a mappings of UIDs and GIDs.
// The zero value represents an empty mapping.
type IdentityMapping struct {
UIDMaps []IDMap `json:"UIDMaps"`
GIDMaps []IDMap `json:"GIDMaps"`
}
// RootPair returns a uid and gid pair for the root user. The error is ignored
// because a root user always exists, and the defaults are correct when the uid
// and gid maps are empty.
func (i IdentityMapping) RootPair() (int, int) {
uid, gid, _ := getRootUIDGID(i.UIDMaps, i.GIDMaps)
return uid, gid
}
// ToHost returns the host UID and GID for the container uid, gid.
// Remapping is only performed if the ids aren't already the remapped root ids
func (i IdentityMapping) ToHost(uid, gid int) (int, int, error) {
var err error
ruid, rgid := i.RootPair()
if uid != ruid {
ruid, err = toHost(uid, i.UIDMaps)
if err != nil {
return ruid, rgid, err
}
}
if gid != rgid {
rgid, err = toHost(gid, i.GIDMaps)
}
return ruid, rgid, err
}
// ToContainer returns the container UID and GID for the host uid and gid
func (i IdentityMapping) ToContainer(uid, gid int) (int, int, error) {
ruid, err := toContainer(uid, i.UIDMaps)
if err != nil {
return -1, -1, err
}
rgid, err := toContainer(gid, i.GIDMaps)
return ruid, rgid, err
}
// Empty returns true if there are no id mappings
func (i IdentityMapping) Empty() bool {
return len(i.UIDMaps) == 0 && len(i.GIDMaps) == 0
}

143
vendor/github.com/moby/sys/user/idtools_unix.go generated vendored Normal file
View File

@ -0,0 +1,143 @@
//go:build !windows
package user
import (
"fmt"
"os"
"path/filepath"
"strconv"
"syscall"
)
func mkdirAs(path string, mode os.FileMode, uid, gid int, mkAll, onlyNew bool) error {
path, err := filepath.Abs(path)
if err != nil {
return err
}
stat, err := os.Stat(path)
if err == nil {
if !stat.IsDir() {
return &os.PathError{Op: "mkdir", Path: path, Err: syscall.ENOTDIR}
}
if onlyNew {
return nil
}
// short-circuit -- we were called with an existing directory and chown was requested
return setPermissions(path, mode, uid, gid, stat)
}
// make an array containing the original path asked for, plus (for mkAll == true)
// all path components leading up to the complete path that don't exist before we MkdirAll
// so that we can chown all of them properly at the end. If onlyNew is true, we won't
// chown the full directory path if it exists
var paths []string
if os.IsNotExist(err) {
paths = append(paths, path)
}
if mkAll {
// walk back to "/" looking for directories which do not exist
// and add them to the paths array for chown after creation
dirPath := path
for {
dirPath = filepath.Dir(dirPath)
if dirPath == "/" {
break
}
if _, err = os.Stat(dirPath); os.IsNotExist(err) {
paths = append(paths, dirPath)
}
}
if err = os.MkdirAll(path, mode); err != nil {
return err
}
} else if err = os.Mkdir(path, mode); err != nil {
return err
}
// even if it existed, we will chown the requested path + any subpaths that
// didn't exist when we called MkdirAll
for _, pathComponent := range paths {
if err = setPermissions(pathComponent, mode, uid, gid, nil); err != nil {
return err
}
}
return nil
}
// setPermissions performs a chown/chmod only if the uid/gid don't match what's requested
// Normally a Chown is a no-op if uid/gid match, but in some cases this can still cause an error, e.g. if the
// dir is on an NFS share, so don't call chown unless we absolutely must.
// Likewise for setting permissions.
func setPermissions(p string, mode os.FileMode, uid, gid int, stat os.FileInfo) error {
if stat == nil {
var err error
stat, err = os.Stat(p)
if err != nil {
return err
}
}
if stat.Mode().Perm() != mode.Perm() {
if err := os.Chmod(p, mode.Perm()); err != nil {
return err
}
}
ssi := stat.Sys().(*syscall.Stat_t)
if ssi.Uid == uint32(uid) && ssi.Gid == uint32(gid) {
return nil
}
return os.Chown(p, uid, gid)
}
// LoadIdentityMapping takes a requested username and
// using the data from /etc/sub{uid,gid} ranges, creates the
// proper uid and gid remapping ranges for that user/group pair
func LoadIdentityMapping(name string) (IdentityMapping, error) {
// TODO: Consider adding support for calling out to "getent"
usr, err := LookupUser(name)
if err != nil {
return IdentityMapping{}, fmt.Errorf("could not get user for username %s: %w", name, err)
}
subuidRanges, err := lookupSubRangesFile("/etc/subuid", usr)
if err != nil {
return IdentityMapping{}, err
}
subgidRanges, err := lookupSubRangesFile("/etc/subgid", usr)
if err != nil {
return IdentityMapping{}, err
}
return IdentityMapping{
UIDMaps: subuidRanges,
GIDMaps: subgidRanges,
}, nil
}
func lookupSubRangesFile(path string, usr User) ([]IDMap, error) {
uidstr := strconv.Itoa(usr.Uid)
rangeList, err := ParseSubIDFileFilter(path, func(sid SubID) bool {
return sid.Name == usr.Name || sid.Name == uidstr
})
if err != nil {
return nil, err
}
if len(rangeList) == 0 {
return nil, fmt.Errorf("no subuid ranges found for user %q", usr.Name)
}
idMap := []IDMap{}
var containerID int64
for _, idrange := range rangeList {
idMap = append(idMap, IDMap{
ID: containerID,
ParentID: idrange.SubID,
Count: idrange.Count,
})
containerID = containerID + idrange.Count
}
return idMap, nil
}

13
vendor/github.com/moby/sys/user/idtools_windows.go generated vendored Normal file
View File

@ -0,0 +1,13 @@
package user
import (
"os"
)
// This is currently a wrapper around [os.MkdirAll] since currently
// permissions aren't set through this path, the identity isn't utilized.
// Ownership is handled elsewhere, but in the future could be support here
// too.
func mkdirAs(path string, _ os.FileMode, _, _ int, _, _ bool) error {
return os.MkdirAll(path, 0)
}

View File

@ -22,7 +22,7 @@ const (
// VersionMinor is for functionality in a backwards-compatible manner // VersionMinor is for functionality in a backwards-compatible manner
VersionMinor = 1 VersionMinor = 1
// VersionPatch is for backwards-compatible bug fixes // VersionPatch is for backwards-compatible bug fixes
VersionPatch = 0 VersionPatch = 1
// VersionDev indicates development branch. Releases will be empty string. // VersionDev indicates development branch. Releases will be empty string.
VersionDev = "" VersionDev = ""

View File

@ -262,8 +262,10 @@ func ParseWithUmask(s string, umask uint) (Set, error) {
case 'w': case 'w':
perm |= iWUser | iWGroup | iWOther perm |= iWUser | iWGroup | iWOther
case 'X': case 'X':
if op == '+' { if op != '-' {
permX = iXUser | iXGroup | iXOther permX = iXUser | iXGroup | iXOther
} else {
perm |= iXUser | iXGroup | iXOther
} }
case 'x': case 'x':
perm |= iXUser | iXGroup | iXOther perm |= iXUser | iXGroup | iXOther

View File

@ -18,6 +18,8 @@ import (
"github.com/tonistiigi/fsutil" "github.com/tonistiigi/fsutil"
) )
const defaultDirectoryMode = 0755
var bufferPool = &sync.Pool{ var bufferPool = &sync.Pool{
New: func() interface{} { New: func() interface{} {
buffer := make([]byte, 32*1024) buffer := make([]byte, 32*1024)
@ -80,7 +82,11 @@ func Copy(ctx context.Context, srcRoot, src, dstRoot, dst string, opts ...Opt) e
if err != nil { if err != nil {
return err return err
} }
if createdDirs, err := MkdirAll(ensureDstPath, 0755, ci.Chown, ci.Utime); err != nil { perm := defaultDirectoryMode
if ci.Mode != nil {
perm = *ci.Mode
}
if createdDirs, err := MkdirAll(ensureDstPath, os.FileMode(perm), ci.Chown, ci.Utime); err != nil {
return err return err
} else { } else {
defer fixCreatedParentDirs(createdDirs, ci.Utime) defer fixCreatedParentDirs(createdDirs, ci.Utime)
@ -159,7 +165,11 @@ func (c *copier) prepareTargetDir(srcFollowed, src, destPath string, copyDirCont
target = destPath target = destPath
} }
var createdDirs []string var createdDirs []string
if dirs, err := MkdirAll(target, 0755, c.chown, c.utime); err != nil { mode := defaultDirectoryMode
if c.mode != nil {
mode = *c.mode
}
if dirs, err := MkdirAll(target, os.FileMode(mode), c.chown, c.utime); err != nil {
return "", nil, err return "", nil, err
} else { } else {
createdDirs = dirs createdDirs = dirs
@ -335,13 +345,13 @@ func (c *copier) copy(ctx context.Context, src, srcComponents, target string, ov
if srcComponents != "" { if srcComponents != "" {
matchesIncludePattern := false matchesIncludePattern := false
matchesExcludePattern := false matchesExcludePattern := false
matchesIncludePattern, includeMatchInfo, err = c.include(srcComponents, fi, parentIncludeMatchInfo) matchesIncludePattern, includeMatchInfo, err = c.include(srcComponents, parentIncludeMatchInfo)
if err != nil { if err != nil {
return err return err
} }
include = matchesIncludePattern include = matchesIncludePattern
matchesExcludePattern, excludeMatchInfo, err = c.exclude(srcComponents, fi, parentExcludeMatchInfo) matchesExcludePattern, excludeMatchInfo, err = c.exclude(srcComponents, parentExcludeMatchInfo)
if err != nil { if err != nil {
return err return err
} }
@ -351,11 +361,11 @@ func (c *copier) copy(ctx context.Context, src, srcComponents, target string, ov
} }
if include { if include {
if err := c.removeTargetIfNeeded(src, target, fi, targetFi); err != nil { if err := c.removeTargetIfNeeded(target, fi, targetFi); err != nil {
return err return err
} }
if err := c.createParentDirs(src, srcComponents, target, overwriteTargetMetadata); err != nil { if err := c.createParentDirs(src, overwriteTargetMetadata); err != nil {
return err return err
} }
} }
@ -447,7 +457,7 @@ func (c *copier) notifyChange(target string, fi os.FileInfo) error {
return nil return nil
} }
func (c *copier) include(path string, fi os.FileInfo, parentIncludeMatchInfo patternmatcher.MatchInfo) (bool, patternmatcher.MatchInfo, error) { func (c *copier) include(path string, parentIncludeMatchInfo patternmatcher.MatchInfo) (bool, patternmatcher.MatchInfo, error) {
if c.includePatternMatcher == nil { if c.includePatternMatcher == nil {
return true, patternmatcher.MatchInfo{}, nil return true, patternmatcher.MatchInfo{}, nil
} }
@ -459,7 +469,7 @@ func (c *copier) include(path string, fi os.FileInfo, parentIncludeMatchInfo pat
return m, matchInfo, nil return m, matchInfo, nil
} }
func (c *copier) exclude(path string, fi os.FileInfo, parentExcludeMatchInfo patternmatcher.MatchInfo) (bool, patternmatcher.MatchInfo, error) { func (c *copier) exclude(path string, parentExcludeMatchInfo patternmatcher.MatchInfo) (bool, patternmatcher.MatchInfo, error) {
if c.excludePatternMatcher == nil { if c.excludePatternMatcher == nil {
return false, patternmatcher.MatchInfo{}, nil return false, patternmatcher.MatchInfo{}, nil
} }
@ -471,7 +481,7 @@ func (c *copier) exclude(path string, fi os.FileInfo, parentExcludeMatchInfo pat
return m, matchInfo, nil return m, matchInfo, nil
} }
func (c *copier) removeTargetIfNeeded(src, target string, srcFi, targetFi os.FileInfo) error { func (c *copier) removeTargetIfNeeded(target string, srcFi, targetFi os.FileInfo) error {
if !c.alwaysReplaceExistingDestPaths { if !c.alwaysReplaceExistingDestPaths {
return nil return nil
} }
@ -488,7 +498,7 @@ func (c *copier) removeTargetIfNeeded(src, target string, srcFi, targetFi os.Fil
// Delayed creation of parent directories when a file or dir matches an include // Delayed creation of parent directories when a file or dir matches an include
// pattern. // pattern.
func (c *copier) createParentDirs(src, srcComponents, target string, overwriteTargetMetadata bool) error { func (c *copier) createParentDirs(src string, overwriteTargetMetadata bool) error {
for i, parentDir := range c.parentDirs { for i, parentDir := range c.parentDirs {
if parentDir.copied { if parentDir.copied {
continue continue
@ -502,7 +512,7 @@ func (c *copier) createParentDirs(src, srcComponents, target string, overwriteTa
return errors.Errorf("%s is not a directory", parentDir.srcPath) return errors.Errorf("%s is not a directory", parentDir.srcPath)
} }
created, err := copyDirectoryOnly(parentDir.srcPath, parentDir.dstPath, fi, overwriteTargetMetadata) created, err := copyDirectoryOnly(parentDir.dstPath, fi, overwriteTargetMetadata)
if err != nil { if err != nil {
return err return err
} }
@ -549,7 +559,7 @@ func (c *copier) copyDirectory(
// encounter a/b/c. // encounter a/b/c.
if include { if include {
var err error var err error
created, err = copyDirectoryOnly(src, dst, stat, overwriteTargetMetadata) created, err = copyDirectoryOnly(dst, stat, overwriteTargetMetadata)
if err != nil { if err != nil {
return created, err return created, err
} }
@ -586,7 +596,7 @@ func (c *copier) copyDirectory(
return created, nil return created, nil
} }
func copyDirectoryOnly(src, dst string, stat os.FileInfo, overwriteTargetMetadata bool) (bool, error) { func copyDirectoryOnly(dst string, stat os.FileInfo, overwriteTargetMetadata bool) (bool, error) {
if st, err := os.Lstat(dst); err != nil { if st, err := os.Lstat(dst); err != nil {
if !os.IsNotExist(err) { if !os.IsNotExist(err) {
return false, err return false, err

View File

@ -15,7 +15,7 @@ func getUIDGID(fi os.FileInfo) (uid, gid int) {
return int(st.Uid), int(st.Gid) return int(st.Uid), int(st.Gid)
} }
func (c *copier) copyFileInfo(fi os.FileInfo, src, name string) error { func (c *copier) copyFileInfo(fi os.FileInfo, _, name string) error {
chown := c.chown chown := c.chown
uid, gid := getUIDGID(fi) uid, gid := getUIDGID(fi)
old := &User{UID: uid, GID: gid} old := &User{UID: uid, GID: gid}

View File

@ -16,7 +16,7 @@ func getUIDGID(fi os.FileInfo) (uid, gid int) {
return int(st.Uid), int(st.Gid) return int(st.Uid), int(st.Gid)
} }
func (c *copier) copyFileInfo(fi os.FileInfo, src, name string) error { func (c *copier) copyFileInfo(fi os.FileInfo, _, name string) error {
chown := c.chown chown := c.chown
uid, gid := getUIDGID(fi) uid, gid := getUIDGID(fi)
old := &User{UID: uid, GID: gid} old := &User{UID: uid, GID: gid}

View File

@ -13,7 +13,7 @@ const (
seTakeOwnershipPrivilege = "SeTakeOwnershipPrivilege" seTakeOwnershipPrivilege = "SeTakeOwnershipPrivilege"
) )
func getUIDGID(fi os.FileInfo) (uid, gid int) { func getUIDGID(_ os.FileInfo) (uid, gid int) {
return 0, 0 return 0, 0
} }
@ -119,10 +119,10 @@ func copyFileContent(dst, src *os.File) error {
return err return err
} }
func copyXAttrs(dst, src string, xeh XAttrErrorHandler) error { func copyXAttrs(_, _ string, _ XAttrErrorHandler) error {
return nil return nil
} }
func copyDevice(dst string, fi os.FileInfo) error { func copyDevice(_ string, _ os.FileInfo) error {
return errors.New("device copy not supported") return errors.New("device copy not supported")
} }

View File

@ -2,6 +2,6 @@ package fs
import "os" import "os"
func getLinkInfo(fi os.FileInfo) (uint64, bool) { func getLinkInfo(_ os.FileInfo) (uint64, bool) {
return 0, false return 0, false
} }

View File

@ -20,7 +20,7 @@ func rewriteMetadata(p string, stat *types.Stat) error {
// handleTarTypeBlockCharFifo is an OS-specific helper function used by // handleTarTypeBlockCharFifo is an OS-specific helper function used by
// createTarFile to handle the following types of header: Block; Char; Fifo // createTarFile to handle the following types of header: Block; Char; Fifo
func handleTarTypeBlockCharFifo(path string, stat *types.Stat) error { func handleTarTypeBlockCharFifo(_ string, _ *types.Stat) error {
return errors.New("Not implemented on windows") return errors.New("Not implemented on windows")
} }

View File

@ -60,7 +60,15 @@ target "test-noroot" {
output = ["${DESTDIR}/coverage"] output = ["${DESTDIR}/coverage"]
} }
target "lint" { group "lint" {
targets = ["lint-golangci", "lint-gopls"]
}
group "lint-cross" {
targets = ["lint-golangci-cross", "lint-gopls-cross"]
}
target "lint-golangci" {
inherits = ["_common"] inherits = ["_common"]
dockerfile = "./hack/dockerfiles/lint.Dockerfile" dockerfile = "./hack/dockerfiles/lint.Dockerfile"
output = ["type=cacheonly"] output = ["type=cacheonly"]
@ -69,8 +77,17 @@ target "lint" {
} }
} }
target "lint-cross" { target "lint-gopls" {
inherits = ["lint", "_platforms"] inherits = ["lint-golangci"]
target = "gopls-analyze"
}
target "lint-golangci-cross" {
inherits = ["lint-golangci", "_platforms"]
}
target "lint-gopls-cross" {
inherits = ["lint-gopls", "_platforms"]
} }
target "validate-generated-files" { target "validate-generated-files" {

View File

@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
//go:build (!amd64 && !ppc64le && !ppc64 && !s390x) || !gc || purego //go:build (!amd64 && !loong64 && !ppc64le && !ppc64 && !s390x) || !gc || purego
package poly1305 package poly1305

View File

@ -2,7 +2,7 @@
// Use of this source code is governed by a BSD-style // Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file. // license that can be found in the LICENSE file.
//go:build gc && !purego //go:build gc && !purego && (amd64 || loong64 || ppc64 || ppc64le)
package poly1305 package poly1305

View File

@ -0,0 +1,123 @@
// Copyright 2025 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build gc && !purego
// func update(state *macState, msg []byte)
TEXT ·update(SB), $0-32
MOVV state+0(FP), R4
MOVV msg_base+8(FP), R5
MOVV msg_len+16(FP), R6
MOVV $0x10, R7
MOVV (R4), R8 // h0
MOVV 8(R4), R9 // h1
MOVV 16(R4), R10 // h2
MOVV 24(R4), R11 // r0
MOVV 32(R4), R12 // r1
BLT R6, R7, bytes_between_0_and_15
loop:
MOVV (R5), R14 // msg[0:8]
MOVV 8(R5), R16 // msg[8:16]
ADDV R14, R8, R8 // h0 (x1 + y1 = z1', if z1' < x1 then z1' overflow)
ADDV R16, R9, R27
SGTU R14, R8, R24 // h0.carry
SGTU R9, R27, R28
ADDV R27, R24, R9 // h1
SGTU R27, R9, R24
OR R24, R28, R24 // h1.carry
ADDV $0x01, R24, R24
ADDV R10, R24, R10 // h2
ADDV $16, R5, R5 // msg = msg[16:]
multiply:
MULV R8, R11, R14 // h0r0.lo
MULHVU R8, R11, R15 // h0r0.hi
MULV R9, R11, R13 // h1r0.lo
MULHVU R9, R11, R16 // h1r0.hi
ADDV R13, R15, R15
SGTU R13, R15, R24
ADDV R24, R16, R16
MULV R10, R11, R25
ADDV R16, R25, R25
MULV R8, R12, R13 // h0r1.lo
MULHVU R8, R12, R16 // h0r1.hi
ADDV R13, R15, R15
SGTU R13, R15, R24
ADDV R24, R16, R16
MOVV R16, R8
MULV R10, R12, R26 // h2r1
MULV R9, R12, R13 // h1r1.lo
MULHVU R9, R12, R16 // h1r1.hi
ADDV R13, R25, R25
ADDV R16, R26, R27
SGTU R13, R25, R24
ADDV R27, R24, R26
ADDV R8, R25, R25
SGTU R8, R25, R24
ADDV R24, R26, R26
AND $3, R25, R10
AND $-4, R25, R17
ADDV R17, R14, R8
ADDV R26, R15, R27
SGTU R17, R8, R24
SGTU R26, R27, R28
ADDV R27, R24, R9
SGTU R27, R9, R24
OR R24, R28, R24
ADDV R24, R10, R10
SLLV $62, R26, R27
SRLV $2, R25, R28
SRLV $2, R26, R26
OR R27, R28, R25
ADDV R25, R8, R8
ADDV R26, R9, R27
SGTU R25, R8, R24
SGTU R26, R27, R28
ADDV R27, R24, R9
SGTU R27, R9, R24
OR R24, R28, R24
ADDV R24, R10, R10
SUBV $16, R6, R6
BGE R6, R7, loop
bytes_between_0_and_15:
BEQ R6, R0, done
MOVV $1, R14
XOR R15, R15
ADDV R6, R5, R5
flush_buffer:
MOVBU -1(R5), R25
SRLV $56, R14, R24
SLLV $8, R15, R28
SLLV $8, R14, R14
OR R24, R28, R15
XOR R25, R14, R14
SUBV $1, R6, R6
SUBV $1, R5, R5
BNE R6, R0, flush_buffer
ADDV R14, R8, R8
SGTU R14, R8, R24
ADDV R15, R9, R27
SGTU R15, R27, R28
ADDV R27, R24, R9
SGTU R27, R9, R24
OR R24, R28, R24
ADDV R10, R24, R10
MOVV $16, R6
JMP multiply
done:
MOVV R8, (R4)
MOVV R9, 8(R4)
MOVV R10, 16(R4)
RET

View File

@ -1,47 +0,0 @@
// Copyright 2019 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//go:build gc && !purego && (ppc64 || ppc64le)
package poly1305
//go:noescape
func update(state *macState, msg []byte)
// mac is a wrapper for macGeneric that redirects calls that would have gone to
// updateGeneric to update.
//
// Its Write and Sum methods are otherwise identical to the macGeneric ones, but
// using function pointers would carry a major performance cost.
type mac struct{ macGeneric }
func (h *mac) Write(p []byte) (int, error) {
nn := len(p)
if h.offset > 0 {
n := copy(h.buffer[h.offset:], p)
if h.offset+n < TagSize {
h.offset += n
return nn, nil
}
p = p[n:]
h.offset = 0
update(&h.macState, h.buffer[:])
}
if n := len(p) - (len(p) % TagSize); n > 0 {
update(&h.macState, p[:n])
p = p[n:]
}
if len(p) > 0 {
h.offset += copy(h.buffer[h.offset:], p)
}
return nn, nil
}
func (h *mac) Sum(out *[16]byte) {
state := h.macState
if h.offset > 0 {
update(&state, h.buffer[:h.offset])
}
finalize(out, &state.h, &state.s)
}

View File

@ -5,7 +5,6 @@
package ssh package ssh
import ( import (
"crypto/rand"
"errors" "errors"
"fmt" "fmt"
"io" "io"
@ -25,6 +24,11 @@ const debugHandshake = false
// quickly. // quickly.
const chanSize = 16 const chanSize = 16
// maxPendingPackets sets the maximum number of packets to queue while waiting
// for KEX to complete. This limits the total pending data to maxPendingPackets
// * maxPacket bytes, which is ~16.8MB.
const maxPendingPackets = 64
// keyingTransport is a packet based transport that supports key // keyingTransport is a packet based transport that supports key
// changes. It need not be thread-safe. It should pass through // changes. It need not be thread-safe. It should pass through
// msgNewKeys in both directions. // msgNewKeys in both directions.
@ -73,13 +77,22 @@ type handshakeTransport struct {
incoming chan []byte incoming chan []byte
readError error readError error
mu sync.Mutex mu sync.Mutex
writeError error // Condition for the above mutex. It is used to notify a completed key
sentInitPacket []byte // exchange or a write failure. Writes can wait for this condition while a
sentInitMsg *kexInitMsg // key exchange is in progress.
pendingPackets [][]byte // Used when a key exchange is in progress. writeCond *sync.Cond
writeError error
sentInitPacket []byte
sentInitMsg *kexInitMsg
// Used to queue writes when a key exchange is in progress. The length is
// limited by pendingPacketsSize. Once full, writes will block until the key
// exchange is completed or an error occurs. If not empty, it is emptied
// all at once when the key exchange is completed in kexLoop.
pendingPackets [][]byte
writePacketsLeft uint32 writePacketsLeft uint32
writeBytesLeft int64 writeBytesLeft int64
userAuthComplete bool // whether the user authentication phase is complete
// If the read loop wants to schedule a kex, it pings this // If the read loop wants to schedule a kex, it pings this
// channel, and the write loop will send out a kex // channel, and the write loop will send out a kex
@ -133,6 +146,7 @@ func newHandshakeTransport(conn keyingTransport, config *Config, clientVersion,
config: config, config: config,
} }
t.writeCond = sync.NewCond(&t.mu)
t.resetReadThresholds() t.resetReadThresholds()
t.resetWriteThresholds() t.resetWriteThresholds()
@ -259,6 +273,7 @@ func (t *handshakeTransport) recordWriteError(err error) {
defer t.mu.Unlock() defer t.mu.Unlock()
if t.writeError == nil && err != nil { if t.writeError == nil && err != nil {
t.writeError = err t.writeError = err
t.writeCond.Broadcast()
} }
} }
@ -362,6 +377,8 @@ write:
} }
} }
t.pendingPackets = t.pendingPackets[:0] t.pendingPackets = t.pendingPackets[:0]
// Unblock writePacket if waiting for KEX.
t.writeCond.Broadcast()
t.mu.Unlock() t.mu.Unlock()
} }
@ -483,7 +500,7 @@ func (t *handshakeTransport) sendKexInit() error {
CompressionClientServer: supportedCompressions, CompressionClientServer: supportedCompressions,
CompressionServerClient: supportedCompressions, CompressionServerClient: supportedCompressions,
} }
io.ReadFull(rand.Reader, msg.Cookie[:]) io.ReadFull(t.config.Rand, msg.Cookie[:])
// We mutate the KexAlgos slice, in order to add the kex-strict extension algorithm, // We mutate the KexAlgos slice, in order to add the kex-strict extension algorithm,
// and possibly to add the ext-info extension algorithm. Since the slice may be the // and possibly to add the ext-info extension algorithm. Since the slice may be the
@ -552,26 +569,44 @@ func (t *handshakeTransport) sendKexInit() error {
return nil return nil
} }
var errSendBannerPhase = errors.New("ssh: SendAuthBanner outside of authentication phase")
func (t *handshakeTransport) writePacket(p []byte) error { func (t *handshakeTransport) writePacket(p []byte) error {
t.mu.Lock()
defer t.mu.Unlock()
switch p[0] { switch p[0] {
case msgKexInit: case msgKexInit:
return errors.New("ssh: only handshakeTransport can send kexInit") return errors.New("ssh: only handshakeTransport can send kexInit")
case msgNewKeys: case msgNewKeys:
return errors.New("ssh: only handshakeTransport can send newKeys") return errors.New("ssh: only handshakeTransport can send newKeys")
case msgUserAuthBanner:
if t.userAuthComplete {
return errSendBannerPhase
}
case msgUserAuthSuccess:
t.userAuthComplete = true
} }
t.mu.Lock()
defer t.mu.Unlock()
if t.writeError != nil { if t.writeError != nil {
return t.writeError return t.writeError
} }
if t.sentInitMsg != nil { if t.sentInitMsg != nil {
// Copy the packet so the writer can reuse the buffer. if len(t.pendingPackets) < maxPendingPackets {
cp := make([]byte, len(p)) // Copy the packet so the writer can reuse the buffer.
copy(cp, p) cp := make([]byte, len(p))
t.pendingPackets = append(t.pendingPackets, cp) copy(cp, p)
return nil t.pendingPackets = append(t.pendingPackets, cp)
return nil
}
for t.sentInitMsg != nil {
// Block and wait for KEX to complete or an error.
t.writeCond.Wait()
if t.writeError != nil {
return t.writeError
}
}
} }
if t.writeBytesLeft > 0 { if t.writeBytesLeft > 0 {
@ -588,6 +623,7 @@ func (t *handshakeTransport) writePacket(p []byte) error {
if err := t.pushPacket(p); err != nil { if err := t.pushPacket(p); err != nil {
t.writeError = err t.writeError = err
t.writeCond.Broadcast()
} }
return nil return nil

View File

@ -818,6 +818,8 @@ func decode(packet []byte) (interface{}, error) {
return new(userAuthSuccessMsg), nil return new(userAuthSuccessMsg), nil
case msgUserAuthFailure: case msgUserAuthFailure:
msg = new(userAuthFailureMsg) msg = new(userAuthFailureMsg)
case msgUserAuthBanner:
msg = new(userAuthBannerMsg)
case msgUserAuthPubKeyOk: case msgUserAuthPubKeyOk:
msg = new(userAuthPubKeyOkMsg) msg = new(userAuthPubKeyOkMsg)
case msgGlobalRequest: case msgGlobalRequest:

View File

@ -59,6 +59,27 @@ type GSSAPIWithMICConfig struct {
Server GSSAPIServer Server GSSAPIServer
} }
// SendAuthBanner implements [ServerPreAuthConn].
func (s *connection) SendAuthBanner(msg string) error {
return s.transport.writePacket(Marshal(&userAuthBannerMsg{
Message: msg,
}))
}
func (*connection) unexportedMethodForFutureProofing() {}
// ServerPreAuthConn is the interface available on an incoming server
// connection before authentication has completed.
type ServerPreAuthConn interface {
unexportedMethodForFutureProofing() // permits growing ServerPreAuthConn safely later, ala testing.TB
ConnMetadata
// SendAuthBanner sends a banner message to the client.
// It returns an error once the authentication phase has ended.
SendAuthBanner(string) error
}
// ServerConfig holds server specific configuration data. // ServerConfig holds server specific configuration data.
type ServerConfig struct { type ServerConfig struct {
// Config contains configuration shared between client and server. // Config contains configuration shared between client and server.
@ -118,6 +139,12 @@ type ServerConfig struct {
// attempts. // attempts.
AuthLogCallback func(conn ConnMetadata, method string, err error) AuthLogCallback func(conn ConnMetadata, method string, err error)
// PreAuthConnCallback, if non-nil, is called upon receiving a new connection
// before any authentication has started. The provided ServerPreAuthConn
// can be used at any time before authentication is complete, including
// after this callback has returned.
PreAuthConnCallback func(ServerPreAuthConn)
// ServerVersion is the version identification string to announce in // ServerVersion is the version identification string to announce in
// the public handshake. // the public handshake.
// If empty, a reasonable default is used. // If empty, a reasonable default is used.
@ -488,6 +515,10 @@ func (b *BannerError) Error() string {
} }
func (s *connection) serverAuthenticate(config *ServerConfig) (*Permissions, error) { func (s *connection) serverAuthenticate(config *ServerConfig) (*Permissions, error) {
if config.PreAuthConnCallback != nil {
config.PreAuthConnCallback(s)
}
sessionID := s.transport.getSessionID() sessionID := s.transport.getSessionID()
var cache pubKeyCache var cache pubKeyCache
var perms *Permissions var perms *Permissions
@ -495,7 +526,7 @@ func (s *connection) serverAuthenticate(config *ServerConfig) (*Permissions, err
authFailures := 0 authFailures := 0
noneAuthCount := 0 noneAuthCount := 0
var authErrs []error var authErrs []error
var displayedBanner bool var calledBannerCallback bool
partialSuccessReturned := false partialSuccessReturned := false
// Set the initial authentication callbacks from the config. They can be // Set the initial authentication callbacks from the config. They can be
// changed if a PartialSuccessError is returned. // changed if a PartialSuccessError is returned.
@ -542,14 +573,10 @@ userAuthLoop:
s.user = userAuthReq.User s.user = userAuthReq.User
if !displayedBanner && config.BannerCallback != nil { if !calledBannerCallback && config.BannerCallback != nil {
displayedBanner = true calledBannerCallback = true
msg := config.BannerCallback(s) if msg := config.BannerCallback(s); msg != "" {
if msg != "" { if err := s.SendAuthBanner(msg); err != nil {
bannerMsg := &userAuthBannerMsg{
Message: msg,
}
if err := s.transport.writePacket(Marshal(bannerMsg)); err != nil {
return nil, err return nil, err
} }
} }
@ -762,10 +789,7 @@ userAuthLoop:
var bannerErr *BannerError var bannerErr *BannerError
if errors.As(authErr, &bannerErr) { if errors.As(authErr, &bannerErr) {
if bannerErr.Message != "" { if bannerErr.Message != "" {
bannerMsg := &userAuthBannerMsg{ if err := s.SendAuthBanner(bannerErr.Message); err != nil {
Message: bannerErr.Message,
}
if err := s.transport.writePacket(Marshal(bannerMsg)); err != nil {
return nil, err return nil, err
} }
} }

View File

@ -459,7 +459,7 @@ func (c *Client) dial(laddr string, lport int, raddr string, rport int) (Channel
return nil, err return nil, err
} }
go DiscardRequests(in) go DiscardRequests(in)
return ch, err return ch, nil
} }
type tcpChan struct { type tcpChan struct {

View File

@ -6,6 +6,8 @@
// with type parameters. // with type parameters.
package constraints package constraints
import "cmp"
// Signed is a constraint that permits any signed integer type. // Signed is a constraint that permits any signed integer type.
// If future releases of Go add new predeclared signed integer types, // If future releases of Go add new predeclared signed integer types,
// this constraint will be modified to include them. // this constraint will be modified to include them.
@ -45,6 +47,8 @@ type Complex interface {
// that supports the operators < <= >= >. // that supports the operators < <= >= >.
// If future releases of Go add new ordered types, // If future releases of Go add new ordered types,
// this constraint will be modified to include them. // this constraint will be modified to include them.
type Ordered interface { //
Integer | Float | ~string // This type is redundant since Go 1.21 introduced [cmp.Ordered].
} //
//go:fix inline
type Ordered = cmp.Ordered

68
vendor/golang.org/x/exp/maps/maps.go generated vendored
View File

@ -5,9 +5,16 @@
// Package maps defines various functions useful with maps of any type. // Package maps defines various functions useful with maps of any type.
package maps package maps
import "maps"
// Keys returns the keys of the map m. // Keys returns the keys of the map m.
// The keys will be in an indeterminate order. // The keys will be in an indeterminate order.
//
// The simplest true equivalent using the standard library is:
//
// slices.AppendSeq(make([]K, 0, len(m)), maps.Keys(m))
func Keys[M ~map[K]V, K comparable, V any](m M) []K { func Keys[M ~map[K]V, K comparable, V any](m M) []K {
r := make([]K, 0, len(m)) r := make([]K, 0, len(m))
for k := range m { for k := range m {
r = append(r, k) r = append(r, k)
@ -17,7 +24,12 @@ func Keys[M ~map[K]V, K comparable, V any](m M) []K {
// Values returns the values of the map m. // Values returns the values of the map m.
// The values will be in an indeterminate order. // The values will be in an indeterminate order.
//
// The simplest true equivalent using the standard library is:
//
// slices.AppendSeq(make([]V, 0, len(m)), maps.Values(m))
func Values[M ~map[K]V, K comparable, V any](m M) []V { func Values[M ~map[K]V, K comparable, V any](m M) []V {
r := make([]V, 0, len(m)) r := make([]V, 0, len(m))
for _, v := range m { for _, v := range m {
r = append(r, v) r = append(r, v)
@ -27,68 +39,48 @@ func Values[M ~map[K]V, K comparable, V any](m M) []V {
// Equal reports whether two maps contain the same key/value pairs. // Equal reports whether two maps contain the same key/value pairs.
// Values are compared using ==. // Values are compared using ==.
//
//go:fix inline
func Equal[M1, M2 ~map[K]V, K, V comparable](m1 M1, m2 M2) bool { func Equal[M1, M2 ~map[K]V, K, V comparable](m1 M1, m2 M2) bool {
if len(m1) != len(m2) { return maps.Equal(m1, m2)
return false
}
for k, v1 := range m1 {
if v2, ok := m2[k]; !ok || v1 != v2 {
return false
}
}
return true
} }
// EqualFunc is like Equal, but compares values using eq. // EqualFunc is like Equal, but compares values using eq.
// Keys are still compared with ==. // Keys are still compared with ==.
//
//go:fix inline
func EqualFunc[M1 ~map[K]V1, M2 ~map[K]V2, K comparable, V1, V2 any](m1 M1, m2 M2, eq func(V1, V2) bool) bool { func EqualFunc[M1 ~map[K]V1, M2 ~map[K]V2, K comparable, V1, V2 any](m1 M1, m2 M2, eq func(V1, V2) bool) bool {
if len(m1) != len(m2) { return maps.EqualFunc(m1, m2, eq)
return false
}
for k, v1 := range m1 {
if v2, ok := m2[k]; !ok || !eq(v1, v2) {
return false
}
}
return true
} }
// Clear removes all entries from m, leaving it empty. // Clear removes all entries from m, leaving it empty.
//
//go:fix inline
func Clear[M ~map[K]V, K comparable, V any](m M) { func Clear[M ~map[K]V, K comparable, V any](m M) {
for k := range m { clear(m)
delete(m, k)
}
} }
// Clone returns a copy of m. This is a shallow clone: // Clone returns a copy of m. This is a shallow clone:
// the new keys and values are set using ordinary assignment. // the new keys and values are set using ordinary assignment.
//
//go:fix inline
func Clone[M ~map[K]V, K comparable, V any](m M) M { func Clone[M ~map[K]V, K comparable, V any](m M) M {
// Preserve nil in case it matters. return maps.Clone(m)
if m == nil {
return nil
}
r := make(M, len(m))
for k, v := range m {
r[k] = v
}
return r
} }
// Copy copies all key/value pairs in src adding them to dst. // Copy copies all key/value pairs in src adding them to dst.
// When a key in src is already present in dst, // When a key in src is already present in dst,
// the value in dst will be overwritten by the value associated // the value in dst will be overwritten by the value associated
// with the key in src. // with the key in src.
//
//go:fix inline
func Copy[M1 ~map[K]V, M2 ~map[K]V, K comparable, V any](dst M1, src M2) { func Copy[M1 ~map[K]V, M2 ~map[K]V, K comparable, V any](dst M1, src M2) {
for k, v := range src { maps.Copy(dst, src)
dst[k] = v
}
} }
// DeleteFunc deletes any key/value pairs from m for which del returns true. // DeleteFunc deletes any key/value pairs from m for which del returns true.
//
//go:fix inline
func DeleteFunc[M ~map[K]V, K comparable, V any](m M, del func(K, V) bool) { func DeleteFunc[M ~map[K]V, K comparable, V any](m M, del func(K, V) bool) {
for k, v := range m { maps.DeleteFunc(m, del)
if del(k, v) {
delete(m, k)
}
}
} }

Some files were not shown because too many files have changed in this diff Show More