mirror of
https://gitea.com/Lydanne/buildx.git
synced 2025-05-18 00:47:48 +08:00
vendor: update buildkit to 3e38a2d
Signed-off-by: CrazyMax <crazy-max@users.noreply.github.com>
This commit is contained in:
parent
c3db06cda0
commit
a49ad031a5
12
go.mod
12
go.mod
@ -10,8 +10,8 @@ require (
|
||||
github.com/cloudflare/cfssl v0.0.0-20181213083726-b94e044bb51e // indirect
|
||||
github.com/compose-spec/compose-go v1.2.1
|
||||
github.com/containerd/console v1.0.3
|
||||
github.com/containerd/containerd v1.6.1
|
||||
github.com/docker/cli v20.10.12+incompatible
|
||||
github.com/containerd/containerd v1.6.3-0.20220401172941-5ff8fce1fcc6
|
||||
github.com/docker/cli v20.10.13+incompatible
|
||||
github.com/docker/cli-docs-tool v0.4.0
|
||||
github.com/docker/distribution v2.8.1+incompatible
|
||||
github.com/docker/docker v20.10.7+incompatible
|
||||
@ -30,10 +30,10 @@ require (
|
||||
github.com/jinzhu/gorm v1.9.2 // indirect
|
||||
github.com/jinzhu/inflection v0.0.0-20180308033659-04140366298a // indirect
|
||||
github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0 // indirect
|
||||
github.com/moby/buildkit v0.10.0
|
||||
github.com/moby/buildkit v0.10.1-0.20220402051847-3e38a2d34830
|
||||
github.com/morikuni/aec v1.0.0
|
||||
github.com/opencontainers/go-digest v1.0.0
|
||||
github.com/opencontainers/image-spec v1.0.2-0.20211117181255-693428a734f5
|
||||
github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799
|
||||
github.com/pelletier/go-toml v1.9.4
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/serialx/hashring v0.0.0-20190422032157-8b2912629002
|
||||
@ -47,7 +47,7 @@ require (
|
||||
go.opentelemetry.io/otel v1.4.1
|
||||
go.opentelemetry.io/otel/trace v1.4.1
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
|
||||
google.golang.org/grpc v1.44.0
|
||||
google.golang.org/grpc v1.45.0
|
||||
gopkg.in/dancannon/gorethink.v3 v3.0.5 // indirect
|
||||
gopkg.in/fatih/pool.v2 v2.0.0 // indirect
|
||||
gopkg.in/gorethink/gorethink.v3 v3.0.5 // indirect
|
||||
@ -58,7 +58,7 @@ require (
|
||||
|
||||
replace (
|
||||
github.com/docker/cli => github.com/docker/cli v20.10.3-0.20220226190722-8667ccd1124c+incompatible
|
||||
github.com/docker/docker => github.com/docker/docker v20.10.3-0.20220121014307-40bb9831756f+incompatible
|
||||
github.com/docker/docker => github.com/docker/docker v20.10.3-0.20220224222438-c78f6963a1c0+incompatible
|
||||
k8s.io/api => k8s.io/api v0.22.4
|
||||
k8s.io/apimachinery => k8s.io/apimachinery v0.22.4
|
||||
k8s.io/client-go => k8s.io/client-go v0.22.4
|
||||
|
47
go.sum
47
go.sum
@ -324,8 +324,9 @@ github.com/containerd/containerd v1.5.0-rc.0/go.mod h1:V/IXoMqNGgBlabz3tHD2TWDoT
|
||||
github.com/containerd/containerd v1.5.1/go.mod h1:0DOxVqwDy2iZvrZp2JUx/E+hS0UNTVn7dJnIOwtYR4g=
|
||||
github.com/containerd/containerd v1.5.7/go.mod h1:gyvv6+ugqY25TiXxcZC3L5yOeYgEw0QMhscqVp1AR9c=
|
||||
github.com/containerd/containerd v1.5.8/go.mod h1:YdFSv5bTFLpG2HIYmfqDpSYYTDX+mc5qtSuYx1YUb/s=
|
||||
github.com/containerd/containerd v1.6.1 h1:oa2uY0/0G+JX4X7hpGCYvkp9FjUancz56kSNnb1sG3o=
|
||||
github.com/containerd/containerd v1.6.1/go.mod h1:1nJz5xCZPusx6jJU8Frfct988y0NpumIq9ODB0kLtoE=
|
||||
github.com/containerd/containerd v1.6.3-0.20220401172941-5ff8fce1fcc6 h1:nig7zto6cp3Wt1lPMK8EmyP6f/ZNmn/tL6ASQ7stews=
|
||||
github.com/containerd/containerd v1.6.3-0.20220401172941-5ff8fce1fcc6/go.mod h1:WSt2SnDLAGWlu+Vl+EWay37seZLKqgRt6XLjIMy8SYM=
|
||||
github.com/containerd/continuity v0.0.0-20190426062206-aaeac12a7ffc/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
|
||||
github.com/containerd/continuity v0.0.0-20190815185530-f2a389ac0a02/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
|
||||
github.com/containerd/continuity v0.0.0-20191127005431-f65d91d395eb/go.mod h1:GL3xCUCBDV3CZiTSEKksMWbLE66hEyuu9qyDOOqM47Y=
|
||||
@ -333,8 +334,9 @@ github.com/containerd/continuity v0.0.0-20200710164510-efbc4488d8fe/go.mod h1:cE
|
||||
github.com/containerd/continuity v0.0.0-20201208142359-180525291bb7/go.mod h1:kR3BEg7bDFaEddKm54WSmrol1fKWDU1nKYkgrcgZT7Y=
|
||||
github.com/containerd/continuity v0.0.0-20210208174643-50096c924a4e/go.mod h1:EXlVlkqNba9rJe3j7w3Xa924itAMLgZH4UD/Q4PExuQ=
|
||||
github.com/containerd/continuity v0.1.0/go.mod h1:ICJu0PwR54nI0yPEnJ6jcS+J7CZAUXrLh8lPo2knzsM=
|
||||
github.com/containerd/continuity v0.2.2 h1:QSqfxcn8c+12slxwu00AtzXrsami0MJb/MQs9lOLHLA=
|
||||
github.com/containerd/continuity v0.2.2/go.mod h1:pWygW9u7LtS1o4N/Tn0FoCFDIXZ7rxcMX7HX1Dmibvk=
|
||||
github.com/containerd/continuity v0.2.3-0.20220330195504-d132b287edc8 h1:yGFEcFNMhze29DxAAB33v/1OMRYF/cM9iwwgV2P0ZrE=
|
||||
github.com/containerd/continuity v0.2.3-0.20220330195504-d132b287edc8/go.mod h1:pWygW9u7LtS1o4N/Tn0FoCFDIXZ7rxcMX7HX1Dmibvk=
|
||||
github.com/containerd/fifo v0.0.0-20180307165137-3d5202aec260/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI=
|
||||
github.com/containerd/fifo v0.0.0-20190226154929-a9fb20d87448/go.mod h1:ODA38xgv3Kuk8dQz2ZQXpnv/UZZUHUCL7pnLehbXgQI=
|
||||
github.com/containerd/fifo v0.0.0-20200410184934-f15a3290365b/go.mod h1:jPQ2IAeZRCYxpS/Cm1495vGFww6ecHmMk1YJH2Q5ln0=
|
||||
@ -347,6 +349,7 @@ github.com/containerd/go-cni v1.0.1/go.mod h1:+vUpYxKvAF72G9i1WoDOiPGRtQpqsNW/ZH
|
||||
github.com/containerd/go-cni v1.0.2/go.mod h1:nrNABBHzu0ZwCug9Ije8hL2xBCYh/pjfMb1aZGrrohk=
|
||||
github.com/containerd/go-cni v1.1.0/go.mod h1:Rflh2EJ/++BA2/vY5ao3K6WJRR/bZKsX123aPk+kUtA=
|
||||
github.com/containerd/go-cni v1.1.3/go.mod h1:Rflh2EJ/++BA2/vY5ao3K6WJRR/bZKsX123aPk+kUtA=
|
||||
github.com/containerd/go-cni v1.1.4/go.mod h1:Rflh2EJ/++BA2/vY5ao3K6WJRR/bZKsX123aPk+kUtA=
|
||||
github.com/containerd/go-runc v0.0.0-20180907222934-5a6d9f37cfa3/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0=
|
||||
github.com/containerd/go-runc v0.0.0-20190911050354-e029b79d8cda/go.mod h1:IV7qH3hrUgRmyYrtgEeGWJfWbgcHL9CSRruz2Vqcph0=
|
||||
github.com/containerd/go-runc v0.0.0-20200220073739-7016d3ce2328/go.mod h1:PpyHrqVs8FTi9vpyHwPwiNEGaACDxT/N/pLcvMSRA9g=
|
||||
@ -357,13 +360,14 @@ github.com/containerd/imgcrypt v1.0.4-0.20210301171431-0ae5c75f59ba/go.mod h1:6T
|
||||
github.com/containerd/imgcrypt v1.1.1-0.20210312161619-7ed62a527887/go.mod h1:5AZJNI6sLHJljKuI9IHnw1pWqo/F0nGDOuR9zgTs7ow=
|
||||
github.com/containerd/imgcrypt v1.1.1/go.mod h1:xpLnwiQmEUJPvQoAapeb2SNCxz7Xr6PJrXQb0Dpc4ms=
|
||||
github.com/containerd/imgcrypt v1.1.3/go.mod h1:/TPA1GIDXMzbj01yd8pIbQiLdQxed5ue1wb8bP7PQu4=
|
||||
github.com/containerd/imgcrypt v1.1.4/go.mod h1:LorQnPtzL/T0IyCeftcsMEO7AqxUDbdO8j/tSUpgxvo=
|
||||
github.com/containerd/nri v0.0.0-20201007170849-eb1350a75164/go.mod h1:+2wGSDGFYfE5+So4M5syatU0N0f0LbWpuqyMi4/BE8c=
|
||||
github.com/containerd/nri v0.0.0-20210316161719-dbaa18c31c14/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY=
|
||||
github.com/containerd/nri v0.1.0/go.mod h1:lmxnXF6oMkbqs39FiCt1s0R2HSMhcLel9vNL3m4AaeY=
|
||||
github.com/containerd/stargz-snapshotter v0.0.0-20201027054423-3a04e4c2c116/go.mod h1:o59b3PCKVAf9jjiKtCc/9hLAd+5p/rfhBfm6aBcTEr4=
|
||||
github.com/containerd/stargz-snapshotter v0.11.2/go.mod h1:HfhsbZ98KIoqA2GLmibTpRwMF/lq3utZ0ElV9ARqU7M=
|
||||
github.com/containerd/stargz-snapshotter v0.11.3/go.mod h1:2j2EAUyvrLU4D9unYlTIwGhDKQIk74KJ9E71lJsQCVM=
|
||||
github.com/containerd/stargz-snapshotter/estargz v0.4.1/go.mod h1:x7Q9dg9QYb4+ELgxmo4gBUeJB0tl5dqH1Sdz0nJU1QM=
|
||||
github.com/containerd/stargz-snapshotter/estargz v0.11.2/go.mod h1:rjbdAXaytDSIrAy2WAy2kUrJ4ehzDS0eUQLlIb5UCY0=
|
||||
github.com/containerd/stargz-snapshotter/estargz v0.11.3/go.mod h1:7vRJIcImfY8bpifnMjt+HTJoQxASq7T28MYbP15/Nf0=
|
||||
github.com/containerd/ttrpc v0.0.0-20190828154514-0e0f228740de/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
|
||||
github.com/containerd/ttrpc v0.0.0-20190828172938-92c8520ef9f8/go.mod h1:PvCDdDGpgqzQIzDW1TphrGLssLDZp2GuS+X5DkEJB8o=
|
||||
github.com/containerd/ttrpc v0.0.0-20191028202541-4f1b8fe65a5c/go.mod h1:LPm1u0xBw8r8NOKoOdNMeVHSawSsltak+Ihv+etqsE8=
|
||||
@ -388,10 +392,12 @@ github.com/containernetworking/cni v1.0.1/go.mod h1:AKuhXbN5EzmD4yTNtfSsX3tPcmtr
|
||||
github.com/containernetworking/plugins v0.8.6/go.mod h1:qnw5mN19D8fIwkqW7oHHYDHVlzhJpcY6TQxn/fUyDDM=
|
||||
github.com/containernetworking/plugins v0.9.1/go.mod h1:xP/idU2ldlzN6m4p5LmGiwRDjeJr6FLK6vuiUwoH7P8=
|
||||
github.com/containernetworking/plugins v1.0.1/go.mod h1:QHCfGpaTwYTbbH+nZXKVTxNBDZcxSOplJT5ico8/FLE=
|
||||
github.com/containernetworking/plugins v1.1.1/go.mod h1:Sr5TH/eBsGLXK/h71HeLfX19sZPp3ry5uHSkI4LPxV8=
|
||||
github.com/containers/ocicrypt v1.0.1/go.mod h1:MeJDzk1RJHv89LjsH0Sp5KTY3ZYkjXO/C+bKAeWFIrc=
|
||||
github.com/containers/ocicrypt v1.1.0/go.mod h1:b8AOe0YR67uU8OqfVNcznfFpAzu3rdgUV4GP9qXPfu4=
|
||||
github.com/containers/ocicrypt v1.1.1/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY=
|
||||
github.com/containers/ocicrypt v1.1.2/go.mod h1:Dm55fwWm1YZAjYRaJ94z2mfZikIyIN4B0oB3dj3jFxY=
|
||||
github.com/containers/ocicrypt v1.1.3/go.mod h1:xpdkbVAuaH3WzbEabUd5yDsl9SwJA5pABH85425Es2g=
|
||||
github.com/coreos/bbolt v1.3.2/go.mod h1:iRUV2dpdMOn7Bo10OQBFzIJO9kkE559Wcmn+qkEiiKk=
|
||||
github.com/coreos/etcd v3.3.10+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||
github.com/coreos/etcd v3.3.13+incompatible/go.mod h1:uF7uidLiAD3TWHmW31ZFd/JWoc32PjwdhPthX9715RE=
|
||||
@ -448,11 +454,10 @@ github.com/docker/distribution v0.0.0-20190905152932-14b96e55d84c/go.mod h1:0+TT
|
||||
github.com/docker/distribution v2.6.0-rc.1.0.20180327202408-83389a148052+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||
github.com/docker/distribution v2.7.1-0.20190205005809-0d3efadf0154+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||
github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||
github.com/docker/distribution v2.8.0+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||
github.com/docker/distribution v2.8.1+incompatible h1:Q50tZOPR6T/hjNsyc9g8/syEs6bk8XXApsHjKukMl68=
|
||||
github.com/docker/distribution v2.8.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||
github.com/docker/docker v20.10.3-0.20220121014307-40bb9831756f+incompatible h1:IDzw9qR4h7PF3aEriDajLKrkvc3owPWHasPKUEliWUE=
|
||||
github.com/docker/docker v20.10.3-0.20220121014307-40bb9831756f+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/docker v20.10.3-0.20220224222438-c78f6963a1c0+incompatible h1:Ptj2To+ezU/mCBUKdYXBQ2r3/2EJojAlOZrsgprF+is=
|
||||
github.com/docker/docker v20.10.3-0.20220224222438-c78f6963a1c0+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||
github.com/docker/docker-credential-helpers v0.6.3/go.mod h1:WRaJzqw3CTB9bk10avuGsjVBZsD05qeibJ1/TYlvc0Y=
|
||||
github.com/docker/docker-credential-helpers v0.6.4 h1:axCks+yV+2MR3/kZhAmy07yC56WZ2Pwu/fKWtKuZB0o=
|
||||
github.com/docker/docker-credential-helpers v0.6.4/go.mod h1:ofX3UI0Gz1TteYBjtgs07O36Pyasyp66D2uKT7H8W1c=
|
||||
@ -879,8 +884,8 @@ github.com/klauspost/compress v1.4.0/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0
|
||||
github.com/klauspost/compress v1.4.1/go.mod h1:RyIbtBH6LamlWaDj8nUwkbUhJ87Yi3uG0guNDohfE1A=
|
||||
github.com/klauspost/compress v1.11.3/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
|
||||
github.com/klauspost/compress v1.11.13/go.mod h1:aoV0uJVorq1K+umq18yTdKaF57EivdYsUV+/s2qKfXs=
|
||||
github.com/klauspost/compress v1.15.0 h1:xqfchp4whNFxn5A4XFyyYtitiWI8Hy5EW59jEwcyL6U=
|
||||
github.com/klauspost/compress v1.15.0/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
|
||||
github.com/klauspost/compress v1.15.1 h1:y9FcTHGyrebwfP0ZZqFiaxTaiDnUrGkJkI+f583BL1A=
|
||||
github.com/klauspost/compress v1.15.1/go.mod h1:/3/Vjq9QcHkK5uEr5lBEmyoZ1iFhe47etQ6QUkpK6sk=
|
||||
github.com/klauspost/cpuid v0.0.0-20180405133222-e7e905edc00e/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
|
||||
github.com/klauspost/cpuid v1.2.0/go.mod h1:Pj4uuM528wm8OyEC2QMXAi2YiTZ96dNQPGgoMS4s3ek=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
@ -947,8 +952,9 @@ github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182aff
|
||||
github.com/maxbrunsfeld/counterfeiter/v6 v6.2.2/go.mod h1:eD9eIE7cdwcMi9rYluz88Jz2VyhSmden33/aXg4oVIY=
|
||||
github.com/mgutz/ansi v0.0.0-20170206155736-9520e82c474b/go.mod h1:01TrycV0kFyexm33Z7vhZRXopbI8J3TDReVlkTgMUxE=
|
||||
github.com/miekg/dns v1.0.14/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=
|
||||
github.com/miekg/pkcs11 v1.0.3 h1:iMwmD7I5225wv84WxIG/bmxz9AXjWvTWIbM/TYHvWtw=
|
||||
github.com/miekg/pkcs11 v1.0.3/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs=
|
||||
github.com/miekg/pkcs11 v1.1.1 h1:Ugu9pdy6vAYku5DEpVWVFPYnzV+bxB+iRdbuFSu7TvU=
|
||||
github.com/miekg/pkcs11 v1.1.1/go.mod h1:XsNlhZGX73bx86s2hdc/FuaLm2CPZJemRLMA+WTFxgs=
|
||||
github.com/mistifyio/go-zfs v2.1.2-0.20190413222219-f784269be439+incompatible/go.mod h1:8AuVvqP/mXw1px98n46wfvcGfQ4ci2FwoAjKYxuo3Z4=
|
||||
github.com/mitchellh/cli v1.0.0/go.mod h1:hNIlj7HEI86fIcpObd7a0FcrxTWetlwJDGcceTlRvqc=
|
||||
github.com/mitchellh/go-homedir v1.0.0/go.mod h1:SfyaCUpYCn1Vlf4IUYiD9fPX4A5wJrkLzIz1N1q0pr0=
|
||||
@ -969,8 +975,8 @@ github.com/mitchellh/mapstructure v1.4.3 h1:OVowDSCllw/YjdLkam3/sm7wEtOy59d8ndGg
|
||||
github.com/mitchellh/mapstructure v1.4.3/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
|
||||
github.com/mitchellh/osext v0.0.0-20151018003038-5e2d6d41470f/go.mod h1:OkQIRizQZAeMln+1tSwduZz7+Af5oFlKirV/MSYes2A=
|
||||
github.com/moby/buildkit v0.8.1/go.mod h1:/kyU1hKy/aYCuP39GZA9MaKioovHku57N6cqlKZIaiQ=
|
||||
github.com/moby/buildkit v0.10.0 h1:ElHQJJdnj/VR/pfNJwhrjQJj8GXFIwVNGZh/Qbd5tVo=
|
||||
github.com/moby/buildkit v0.10.0/go.mod h1:WvwAZv8aRScHkqc/+X46cRC2CKMKpqcaX+pRvUTtPes=
|
||||
github.com/moby/buildkit v0.10.1-0.20220402051847-3e38a2d34830 h1:guxGTZCTDl42mlHoUOAlLaZOoCvgHlg7TG5+OJ1moyQ=
|
||||
github.com/moby/buildkit v0.10.1-0.20220402051847-3e38a2d34830/go.mod h1:3gd2suj256Va94VdkNsw64sCAMp0mzUyqZOJNAUQb38=
|
||||
github.com/moby/locker v1.0.1 h1:fOXqR41zeveg4fFODix+1Ch4mj/gT0NE1XJbp/epuBg=
|
||||
github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc=
|
||||
github.com/moby/spdystream v0.2.0 h1:cjW1zVyyoiM0T7b6UoySUFqzXMoqRckQtXwGPiBhOM8=
|
||||
@ -1017,6 +1023,7 @@ github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+
|
||||
github.com/nakabonne/nestif v0.3.0/go.mod h1:dI314BppzXjJ4HsCnbo7XzrJHPszZsjnk5wEBSYHI2c=
|
||||
github.com/nbutton23/zxcvbn-go v0.0.0-20180912185939-ae427f1e4c1d/go.mod h1:o96djdrsSGy3AWPyBgZMAGfxZNfgntdJG+11KU4QvbU=
|
||||
github.com/ncw/swift v1.0.47/go.mod h1:23YIA4yWVnGwv2dQlN4bB7egfYX6YLn0Yo/S6zZO/ZM=
|
||||
github.com/networkplumbing/go-nft v0.2.0/go.mod h1:HnnM+tYvlGAsMU7yoYwXEVLLiDW9gdMmb5HoGcwpuQs=
|
||||
github.com/niemeyer/pretty v0.0.0-20200227124842-a10e7caefd8e/go.mod h1:zD1mROLANZcx1PVRCS0qkT7pwLkGfwJo4zjcN/Tysno=
|
||||
github.com/nxadm/tail v1.4.4/go.mod h1:kenIhsEOeOJmVchQTgglprH7qJGnHDVpk1VPCcaMI8A=
|
||||
github.com/nxadm/tail v1.4.8/go.mod h1:+ncqLTQzXmGhMZNUePPaPqPvBxHAIsmXswZKocGu+AU=
|
||||
@ -1055,8 +1062,10 @@ github.com/opencontainers/go-digest v1.0.0 h1:apOUWs51W5PlhuyGyz9FCeeBIOUDA/6nW8
|
||||
github.com/opencontainers/go-digest v1.0.0/go.mod h1:0JzlMkj0TRzQZfJkVvzbP0HBR3IKzErnv2BNG4W4MAM=
|
||||
github.com/opencontainers/image-spec v1.0.0/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
|
||||
github.com/opencontainers/image-spec v1.0.1/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
|
||||
github.com/opencontainers/image-spec v1.0.2-0.20211117181255-693428a734f5 h1:q37d91F6BO4Jp1UqWiun0dUFYaqv6WsKTLTCaWv+8LY=
|
||||
github.com/opencontainers/image-spec v1.0.2-0.20211117181255-693428a734f5/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
|
||||
github.com/opencontainers/image-spec v1.0.2/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
|
||||
github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799 h1:rc3tiVYb5z54aKaDfakKn0dDjIyPpTtszkjuMzyt7ec=
|
||||
github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799/go.mod h1:BtxoFyWECRxE4U/7sNtV5W15zMzWCbyJoFRP3s7yZA0=
|
||||
github.com/opencontainers/runc v0.0.0-20190115041553-12f6a991201f/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
|
||||
github.com/opencontainers/runc v0.1.1/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
|
||||
github.com/opencontainers/runc v1.0.0-rc10/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rmF59PVA73FjuZG0U=
|
||||
@ -1065,8 +1074,9 @@ github.com/opencontainers/runc v1.0.0-rc9/go.mod h1:qT5XzbpPznkRYVz/mWwUaVBUv2rm
|
||||
github.com/opencontainers/runc v1.0.0-rc92/go.mod h1:X1zlU4p7wOlX4+WRCz+hvlRv8phdL7UqbYD+vQwNMmE=
|
||||
github.com/opencontainers/runc v1.0.0-rc93/go.mod h1:3NOsor4w32B2tC0Zbl8Knk4Wg84SM2ImC1fxBuqJ/H0=
|
||||
github.com/opencontainers/runc v1.0.2/go.mod h1:aTaHFFwQXuA71CiyxOdFFIorAoemI04suvGRQFzWTD0=
|
||||
github.com/opencontainers/runc v1.1.0 h1:O9+X96OcDjkmmZyfaG996kV7yq8HsoU2h1XRRQcefG8=
|
||||
github.com/opencontainers/runc v1.1.0/go.mod h1:Tj1hFw6eFWp/o33uxGf5yF2BX5yz2Z6iptFpuvbbKqc=
|
||||
github.com/opencontainers/runc v1.1.1 h1:PJ9DSs2sVwE0iVr++pAHE6QkS9tzcVWozlPifdwMgrU=
|
||||
github.com/opencontainers/runc v1.1.1/go.mod h1:Tj1hFw6eFWp/o33uxGf5yF2BX5yz2Z6iptFpuvbbKqc=
|
||||
github.com/opencontainers/runtime-spec v0.1.2-0.20190507144316-5b71a03e2700/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
||||
github.com/opencontainers/runtime-spec v1.0.1/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
||||
github.com/opencontainers/runtime-spec v1.0.2-0.20190207185410-29686dbc5559/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
|
||||
@ -1119,6 +1129,7 @@ github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5Fsn
|
||||
github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g=
|
||||
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
|
||||
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
|
||||
github.com/prometheus/client_golang v1.11.1/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
|
||||
github.com/prometheus/client_golang v1.12.1 h1:ZiaPsmm9uiBeaSMRznKsCDNtPCS0T3JVDGF+06gjBzk=
|
||||
github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
|
||||
github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
@ -1471,8 +1482,8 @@ golang.org/x/crypto v0.0.0-20201117144127-c1f2f97bffc9/go.mod h1:jdWPYTVW3xRLrWP
|
||||
golang.org/x/crypto v0.0.0-20210220033148-5ea612d1eb83/go.mod h1:jdWPYTVW3xRLrWPugEBEK3UY2ZEsg3UU495nc5E+M+I=
|
||||
golang.org/x/crypto v0.0.0-20210322153248-0c34fe9e7dc2/go.mod h1:T9bdIzuCu7OtxOm1hfPfRQxPLYneinmdGuTeoZ9dtd4=
|
||||
golang.org/x/crypto v0.0.0-20210817164053-32db794688a5/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
|
||||
golang.org/x/crypto v0.0.0-20211202192323-5770296d904e h1:MUP6MR3rJ7Gk9LEia0LP2ytiH6MuCfs7qYz+47jGdD8=
|
||||
golang.org/x/crypto v0.0.0-20211202192323-5770296d904e/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/crypto v0.0.0-20220315160706-3147a52a75dd h1:XcWmESyNjXJMLahc3mqVQJcgSTDxFxhETVlfk9uGc38=
|
||||
golang.org/x/crypto v0.0.0-20220315160706-3147a52a75dd/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190125153040-c74c464bbbf2/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
|
||||
@ -1835,6 +1846,7 @@ golang.org/x/tools v0.0.0-20210106214847-113979e3529a/go.mod h1:emZCQorbCU4vsT4f
|
||||
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
|
||||
golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
|
||||
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
@ -1974,8 +1986,9 @@ google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQ
|
||||
google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
|
||||
google.golang.org/grpc v1.42.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
|
||||
google.golang.org/grpc v1.43.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
|
||||
google.golang.org/grpc v1.44.0 h1:weqSxi/TMs1SqFRMHCtBgXRs8k3X39QIDEZ0pRcttUg=
|
||||
google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
|
||||
google.golang.org/grpc v1.45.0 h1:NEpgUqV3Z+ZjkqMsxMg11IaDrXY4RY6CQukSGK0uI1M=
|
||||
google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ=
|
||||
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
|
||||
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
|
||||
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
|
||||
|
2
vendor/github.com/containerd/containerd/version/version.go
generated
vendored
2
vendor/github.com/containerd/containerd/version/version.go
generated
vendored
@ -23,7 +23,7 @@ var (
|
||||
Package = "github.com/containerd/containerd"
|
||||
|
||||
// Version holds the complete version number. Filled in at linking time.
|
||||
Version = "1.6.1+unknown"
|
||||
Version = "1.6.2+unknown"
|
||||
|
||||
// Revision is filled with the VCS (e.g. git) revision being used to build
|
||||
// the program at linking time.
|
||||
|
31
vendor/github.com/docker/docker/api/swagger.yaml
generated
vendored
31
vendor/github.com/docker/docker/api/swagger.yaml
generated
vendored
@ -5915,7 +5915,6 @@ paths:
|
||||
property1: "string"
|
||||
property2: "string"
|
||||
IpcMode: ""
|
||||
LxcConf: []
|
||||
Memory: 0
|
||||
MemorySwap: 0
|
||||
MemoryReservation: 0
|
||||
@ -7019,6 +7018,10 @@ paths:
|
||||
Message:
|
||||
description: "Details of an error"
|
||||
type: "string"
|
||||
400:
|
||||
description: "bad parameter"
|
||||
schema:
|
||||
$ref: "#/definitions/ErrorResponse"
|
||||
404:
|
||||
description: "no such container"
|
||||
schema:
|
||||
@ -7039,9 +7042,14 @@ paths:
|
||||
- name: "condition"
|
||||
in: "query"
|
||||
description: |
|
||||
Wait until a container state reaches the given condition, either
|
||||
'not-running' (default), 'next-exit', or 'removed'.
|
||||
Wait until a container state reaches the given condition.
|
||||
|
||||
Defaults to `not-running` if omitted or empty.
|
||||
type: "string"
|
||||
enum:
|
||||
- "not-running"
|
||||
- "next-exit"
|
||||
- "removed"
|
||||
default: "not-running"
|
||||
tags: ["Container"]
|
||||
/containers/{id}:
|
||||
@ -7679,7 +7687,22 @@ paths:
|
||||
type: "string"
|
||||
- name: "platform"
|
||||
in: "query"
|
||||
description: "Platform in the format os[/arch[/variant]]"
|
||||
description: |
|
||||
Platform in the format os[/arch[/variant]].
|
||||
|
||||
When used in combination with the `fromImage` option, the daemon checks
|
||||
if the given image is present in the local image cache with the given
|
||||
OS and Architecture, and otherwise attempts to pull the image. If the
|
||||
option is not set, the host's native OS and Architecture are used.
|
||||
If the given image does not exist in the local image cache, the daemon
|
||||
attempts to pull the image with the host's native OS and Architecture.
|
||||
If the given image does exists in the local image cache, but its OS or
|
||||
architecture does not match, a warning is produced.
|
||||
|
||||
When used with the `fromSrc` option to import an image from an archive,
|
||||
this option sets the platform information for the imported image. If
|
||||
the option is not set, the host's native OS and Architecture are used
|
||||
for the imported image.
|
||||
type: "string"
|
||||
default: ""
|
||||
tags: ["Image"]
|
||||
|
3
vendor/github.com/docker/docker/client/client.go
generated
vendored
3
vendor/github.com/docker/docker/client/client.go
generated
vendored
@ -135,9 +135,6 @@ func NewClientWithOpts(ops ...Opt) (*Client, error) {
|
||||
}
|
||||
}
|
||||
|
||||
if _, ok := c.client.Transport.(http.RoundTripper); !ok {
|
||||
return nil, fmt.Errorf("unable to verify TLS configuration, invalid transport %v", c.client.Transport)
|
||||
}
|
||||
if c.scheme == "" {
|
||||
c.scheme = "http"
|
||||
|
||||
|
4
vendor/github.com/docker/docker/client/container_wait.go
generated
vendored
4
vendor/github.com/docker/docker/client/container_wait.go
generated
vendored
@ -33,7 +33,9 @@ func (cli *Client) ContainerWait(ctx context.Context, containerID string, condit
|
||||
errC := make(chan error, 1)
|
||||
|
||||
query := url.Values{}
|
||||
query.Set("condition", string(condition))
|
||||
if condition != "" {
|
||||
query.Set("condition", string(condition))
|
||||
}
|
||||
|
||||
resp, err := cli.post(ctx, "/containers/"+containerID+"/wait", query, nil, nil)
|
||||
if err != nil {
|
||||
|
87
vendor/github.com/docker/docker/client/options.go
generated
vendored
87
vendor/github.com/docker/docker/client/options.go
generated
vendored
@ -24,32 +24,13 @@ type Opt func(*Client) error
|
||||
// DOCKER_CERT_PATH to load the TLS certificates from.
|
||||
// DOCKER_TLS_VERIFY to enable or disable TLS verification, off by default.
|
||||
func FromEnv(c *Client) error {
|
||||
if dockerCertPath := os.Getenv("DOCKER_CERT_PATH"); dockerCertPath != "" {
|
||||
options := tlsconfig.Options{
|
||||
CAFile: filepath.Join(dockerCertPath, "ca.pem"),
|
||||
CertFile: filepath.Join(dockerCertPath, "cert.pem"),
|
||||
KeyFile: filepath.Join(dockerCertPath, "key.pem"),
|
||||
InsecureSkipVerify: os.Getenv("DOCKER_TLS_VERIFY") == "",
|
||||
}
|
||||
tlsc, err := tlsconfig.Client(options)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
c.client = &http.Client{
|
||||
Transport: &http.Transport{TLSClientConfig: tlsc},
|
||||
CheckRedirect: CheckRedirect,
|
||||
}
|
||||
ops := []Opt{
|
||||
WithTLSClientConfigFromEnv(),
|
||||
WithHostFromEnv(),
|
||||
WithVersionFromEnv(),
|
||||
}
|
||||
|
||||
if host := os.Getenv("DOCKER_HOST"); host != "" {
|
||||
if err := WithHost(host)(c); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if version := os.Getenv("DOCKER_API_VERSION"); version != "" {
|
||||
if err := WithVersion(version)(c); err != nil {
|
||||
for _, op := range ops {
|
||||
if err := op(c); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
@ -93,6 +74,18 @@ func WithHost(host string) Opt {
|
||||
}
|
||||
}
|
||||
|
||||
// WithHostFromEnv overrides the client host with the host specified in the
|
||||
// DOCKER_HOST environment variable. If DOCKER_HOST is not set, the host is
|
||||
// not modified.
|
||||
func WithHostFromEnv() Opt {
|
||||
return func(c *Client) error {
|
||||
if host := os.Getenv("DOCKER_HOST"); host != "" {
|
||||
return WithHost(host)(c)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithHTTPClient overrides the client http client with the specified one
|
||||
func WithHTTPClient(client *http.Client) Opt {
|
||||
return func(c *Client) error {
|
||||
@ -148,6 +141,38 @@ func WithTLSClientConfig(cacertPath, certPath, keyPath string) Opt {
|
||||
}
|
||||
}
|
||||
|
||||
// WithTLSClientConfigFromEnv configures the client's TLS settings with the
|
||||
// settings in the DOCKER_CERT_PATH and DOCKER_TLS_VERIFY environment variables.
|
||||
// If DOCKER_CERT_PATH is not set or empty, TLS configuration is not modified.
|
||||
//
|
||||
// Supported environment variables:
|
||||
// DOCKER_CERT_PATH directory to load the TLS certificates (ca.pem, cert.pem, key.pem) from.
|
||||
// DOCKER_TLS_VERIFY to enable or disable TLS verification, off by default.
|
||||
func WithTLSClientConfigFromEnv() Opt {
|
||||
return func(c *Client) error {
|
||||
dockerCertPath := os.Getenv("DOCKER_CERT_PATH")
|
||||
if dockerCertPath == "" {
|
||||
return nil
|
||||
}
|
||||
options := tlsconfig.Options{
|
||||
CAFile: filepath.Join(dockerCertPath, "ca.pem"),
|
||||
CertFile: filepath.Join(dockerCertPath, "cert.pem"),
|
||||
KeyFile: filepath.Join(dockerCertPath, "key.pem"),
|
||||
InsecureSkipVerify: os.Getenv("DOCKER_TLS_VERIFY") == "",
|
||||
}
|
||||
tlsc, err := tlsconfig.Client(options)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
c.client = &http.Client{
|
||||
Transport: &http.Transport{TLSClientConfig: tlsc},
|
||||
CheckRedirect: CheckRedirect,
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithVersion overrides the client version with the specified one. If an empty
|
||||
// version is specified, the value will be ignored to allow version negotiation.
|
||||
func WithVersion(version string) Opt {
|
||||
@ -160,6 +185,18 @@ func WithVersion(version string) Opt {
|
||||
}
|
||||
}
|
||||
|
||||
// WithVersionFromEnv overrides the client version with the version specified in
|
||||
// the DOCKER_API_VERSION environment variable. If DOCKER_API_VERSION is not set,
|
||||
// the version is not modified.
|
||||
func WithVersionFromEnv() Opt {
|
||||
return func(c *Client) error {
|
||||
if version := os.Getenv("DOCKER_API_VERSION"); version != "" {
|
||||
return WithVersion(version)(c)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// WithAPIVersionNegotiation enables automatic API version negotiation for the client.
|
||||
// With this option enabled, the client automatically negotiates the API version
|
||||
// to use when making requests. API version negotiation is performed on the first
|
||||
|
6
vendor/github.com/docker/docker/client/request.go
generated
vendored
6
vendor/github.com/docker/docker/client/request.go
generated
vendored
@ -154,10 +154,8 @@ func (cli *Client) doRequest(ctx context.Context, req *http.Request) (serverResp
|
||||
if err.Timeout() {
|
||||
return serverResp, ErrorConnectionFailed(cli.host)
|
||||
}
|
||||
if !err.Temporary() {
|
||||
if strings.Contains(err.Error(), "connection refused") || strings.Contains(err.Error(), "dial unix") {
|
||||
return serverResp, ErrorConnectionFailed(cli.host)
|
||||
}
|
||||
if strings.Contains(err.Error(), "connection refused") || strings.Contains(err.Error(), "dial unix") {
|
||||
return serverResp, ErrorConnectionFailed(cli.host)
|
||||
}
|
||||
}
|
||||
|
||||
|
70
vendor/github.com/docker/docker/pkg/archive/archive.go
generated
vendored
70
vendor/github.com/docker/docker/pkg/archive/archive.go
generated
vendored
@ -403,12 +403,64 @@ func (compression *Compression) Extension() string {
|
||||
return ""
|
||||
}
|
||||
|
||||
// nosysFileInfo hides the system-dependent info of the wrapped FileInfo to
|
||||
// prevent tar.FileInfoHeader from introspecting it and potentially calling into
|
||||
// glibc.
|
||||
type nosysFileInfo struct {
|
||||
os.FileInfo
|
||||
}
|
||||
|
||||
func (fi nosysFileInfo) Sys() interface{} {
|
||||
// A Sys value of type *tar.Header is safe as it is system-independent.
|
||||
// The tar.FileInfoHeader function copies the fields into the returned
|
||||
// header without performing any OS lookups.
|
||||
if sys, ok := fi.FileInfo.Sys().(*tar.Header); ok {
|
||||
return sys
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// sysStat, if non-nil, populates hdr from system-dependent fields of fi.
|
||||
var sysStat func(fi os.FileInfo, hdr *tar.Header) error
|
||||
|
||||
// FileInfoHeaderNoLookups creates a partially-populated tar.Header from fi.
|
||||
//
|
||||
// Compared to the archive/tar.FileInfoHeader function, this function is safe to
|
||||
// call from a chrooted process as it does not populate fields which would
|
||||
// require operating system lookups. It behaves identically to
|
||||
// tar.FileInfoHeader when fi is a FileInfo value returned from
|
||||
// tar.Header.FileInfo().
|
||||
//
|
||||
// When fi is a FileInfo for a native file, such as returned from os.Stat() and
|
||||
// os.Lstat(), the returned Header value differs from one returned from
|
||||
// tar.FileInfoHeader in the following ways. The Uname and Gname fields are not
|
||||
// set as OS lookups would be required to populate them. The AccessTime and
|
||||
// ChangeTime fields are not currently set (not yet implemented) although that
|
||||
// is subject to change. Callers which require the AccessTime or ChangeTime
|
||||
// fields to be zeroed should explicitly zero them out in the returned Header
|
||||
// value to avoid any compatibility issues in the future.
|
||||
func FileInfoHeaderNoLookups(fi os.FileInfo, link string) (*tar.Header, error) {
|
||||
hdr, err := tar.FileInfoHeader(nosysFileInfo{fi}, link)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if sysStat != nil {
|
||||
return hdr, sysStat(fi, hdr)
|
||||
}
|
||||
return hdr, nil
|
||||
}
|
||||
|
||||
// FileInfoHeader creates a populated Header from fi.
|
||||
// Compared to archive pkg this function fills in more information.
|
||||
// Also, regardless of Go version, this function fills file type bits (e.g. hdr.Mode |= modeISDIR),
|
||||
// which have been deleted since Go 1.9 archive/tar.
|
||||
//
|
||||
// Compared to the archive/tar package, this function fills in less information
|
||||
// but is safe to call from a chrooted process. The AccessTime and ChangeTime
|
||||
// fields are not set in the returned header, ModTime is truncated to one-second
|
||||
// precision, and the Uname and Gname fields are only set when fi is a FileInfo
|
||||
// value returned from tar.Header.FileInfo(). Also, regardless of Go version,
|
||||
// this function fills file type bits (e.g. hdr.Mode |= modeISDIR), which have
|
||||
// been deleted since Go 1.9 archive/tar.
|
||||
func FileInfoHeader(name string, fi os.FileInfo, link string) (*tar.Header, error) {
|
||||
hdr, err := tar.FileInfoHeader(fi, link)
|
||||
hdr, err := FileInfoHeaderNoLookups(fi, link)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -418,9 +470,6 @@ func FileInfoHeader(name string, fi os.FileInfo, link string) (*tar.Header, erro
|
||||
hdr.ChangeTime = time.Time{}
|
||||
hdr.Mode = fillGo18FileTypeBits(int64(chmodTarEntry(os.FileMode(hdr.Mode))), fi)
|
||||
hdr.Name = canonicalTarName(name, fi.IsDir())
|
||||
if err := setHeaderForSpecialDevice(hdr, name, fi.Sys()); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return hdr, nil
|
||||
}
|
||||
|
||||
@ -680,6 +729,7 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L
|
||||
}
|
||||
|
||||
case tar.TypeLink:
|
||||
//#nosec G305 -- The target path is checked for path traversal.
|
||||
targetPath := filepath.Join(extractDir, hdr.Linkname)
|
||||
// check for hardlink breakout
|
||||
if !strings.HasPrefix(targetPath, extractDir) {
|
||||
@ -692,7 +742,7 @@ func createTarFile(path, extractDir string, hdr *tar.Header, reader io.Reader, L
|
||||
case tar.TypeSymlink:
|
||||
// path -> hdr.Linkname = targetPath
|
||||
// e.g. /extractDir/path/to/symlink -> ../2/file = /extractDir/path/2/file
|
||||
targetPath := filepath.Join(filepath.Dir(path), hdr.Linkname)
|
||||
targetPath := filepath.Join(filepath.Dir(path), hdr.Linkname) //#nosec G305 -- The target path is checked for path traversal.
|
||||
|
||||
// the reason we don't need to check symlinks in the path (with FollowSymlinkInScope) is because
|
||||
// that symlink would first have to be created, which would be caught earlier, at this very check:
|
||||
@ -1045,6 +1095,7 @@ loop:
|
||||
}
|
||||
}
|
||||
|
||||
//#nosec G305 -- The joined path is checked for path traversal.
|
||||
path := filepath.Join(dest, hdr.Name)
|
||||
rel, err := filepath.Rel(dest, path)
|
||||
if err != nil {
|
||||
@ -1109,6 +1160,7 @@ loop:
|
||||
}
|
||||
|
||||
for _, hdr := range dirs {
|
||||
//#nosec G305 -- The header was checked for path traversal before it was appended to the dirs slice.
|
||||
path := filepath.Join(dest, hdr.Name)
|
||||
|
||||
if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil {
|
||||
@ -1251,7 +1303,7 @@ func (archiver *Archiver) CopyFileWithTar(src, dst string) (err error) {
|
||||
}
|
||||
defer srcF.Close()
|
||||
|
||||
hdr, err := tar.FileInfoHeader(srcSt, "")
|
||||
hdr, err := FileInfoHeaderNoLookups(srcSt, "")
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
2
vendor/github.com/docker/docker/pkg/archive/archive_linux.go
generated
vendored
2
vendor/github.com/docker/docker/pkg/archive/archive_linux.go
generated
vendored
@ -59,7 +59,7 @@ func (overlayWhiteoutConverter) ConvertWrite(hdr *tar.Header, path string, fi os
|
||||
Gname: hdr.Gname,
|
||||
AccessTime: hdr.AccessTime,
|
||||
ChangeTime: hdr.ChangeTime,
|
||||
}
|
||||
} //#nosec G305 -- An archive is being created, not extracted.
|
||||
}
|
||||
}
|
||||
|
||||
|
31
vendor/github.com/docker/docker/pkg/archive/archive_unix.go
generated
vendored
31
vendor/github.com/docker/docker/pkg/archive/archive_unix.go
generated
vendored
@ -17,6 +17,10 @@ import (
|
||||
"golang.org/x/sys/unix"
|
||||
)
|
||||
|
||||
func init() {
|
||||
sysStat = statUnix
|
||||
}
|
||||
|
||||
// fixVolumePathPrefix does platform specific processing to ensure that if
|
||||
// the path being passed in is not in a volume path format, convert it to one.
|
||||
func fixVolumePathPrefix(srcPath string) string {
|
||||
@ -45,19 +49,24 @@ func chmodTarEntry(perm os.FileMode) os.FileMode {
|
||||
return perm // noop for unix as golang APIs provide perm bits correctly
|
||||
}
|
||||
|
||||
func setHeaderForSpecialDevice(hdr *tar.Header, name string, stat interface{}) (err error) {
|
||||
s, ok := stat.(*syscall.Stat_t)
|
||||
|
||||
if ok {
|
||||
// Currently go does not fill in the major/minors
|
||||
if s.Mode&unix.S_IFBLK != 0 ||
|
||||
s.Mode&unix.S_IFCHR != 0 {
|
||||
hdr.Devmajor = int64(unix.Major(uint64(s.Rdev))) //nolint: unconvert
|
||||
hdr.Devminor = int64(unix.Minor(uint64(s.Rdev))) //nolint: unconvert
|
||||
}
|
||||
// statUnix populates hdr from system-dependent fields of fi without performing
|
||||
// any OS lookups.
|
||||
func statUnix(fi os.FileInfo, hdr *tar.Header) error {
|
||||
s, ok := fi.Sys().(*syscall.Stat_t)
|
||||
if !ok {
|
||||
return nil
|
||||
}
|
||||
|
||||
return
|
||||
hdr.Uid = int(s.Uid)
|
||||
hdr.Gid = int(s.Gid)
|
||||
|
||||
if s.Mode&unix.S_IFBLK != 0 ||
|
||||
s.Mode&unix.S_IFCHR != 0 {
|
||||
hdr.Devmajor = int64(unix.Major(uint64(s.Rdev))) //nolint: unconvert
|
||||
hdr.Devminor = int64(unix.Minor(uint64(s.Rdev))) //nolint: unconvert
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func getInodeFromStat(stat interface{}) (inode uint64, err error) {
|
||||
|
2
vendor/github.com/docker/docker/pkg/archive/diff.go
generated
vendored
2
vendor/github.com/docker/docker/pkg/archive/diff.go
generated
vendored
@ -113,6 +113,7 @@ func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64,
|
||||
continue
|
||||
}
|
||||
}
|
||||
//#nosec G305 -- The joined path is guarded against path traversal.
|
||||
path := filepath.Join(dest, hdr.Name)
|
||||
rel, err := filepath.Rel(dest, path)
|
||||
if err != nil {
|
||||
@ -209,6 +210,7 @@ func UnpackLayer(dest string, layer io.Reader, options *TarOptions) (size int64,
|
||||
}
|
||||
|
||||
for _, hdr := range dirs {
|
||||
//#nosec G305 -- The header was checked for path traversal before it was appended to the dirs slice.
|
||||
path := filepath.Join(dest, hdr.Name)
|
||||
if err := system.Chtimes(path, hdr.AccessTime, hdr.ModTime); err != nil {
|
||||
return 0, err
|
||||
|
11
vendor/github.com/docker/docker/pkg/namesgenerator/names-generator.go
generated
vendored
11
vendor/github.com/docker/docker/pkg/namesgenerator/names-generator.go
generated
vendored
@ -1,3 +1,14 @@
|
||||
// Package namesgenerator generates random names.
|
||||
//
|
||||
// This package is officially "frozen" - no new additions will be accepted.
|
||||
//
|
||||
// For a long time, this package provided a lot of joy within the project, but
|
||||
// at some point the conflicts of opinion became greater than the added joy.
|
||||
//
|
||||
// At some future time, this may be replaced with something that sparks less
|
||||
// controversy, but for now it will remain as-is.
|
||||
//
|
||||
// See also https://github.com/moby/moby/pull/43210#issuecomment-1029934277
|
||||
package namesgenerator // import "github.com/docker/docker/pkg/namesgenerator"
|
||||
|
||||
import (
|
||||
|
52
vendor/github.com/docker/docker/pkg/system/syscall_windows.go
generated
vendored
52
vendor/github.com/docker/docker/pkg/system/syscall_windows.go
generated
vendored
@ -1,65 +1,23 @@
|
||||
package system // import "github.com/docker/docker/pkg/system"
|
||||
|
||||
import (
|
||||
"unsafe"
|
||||
|
||||
"github.com/sirupsen/logrus"
|
||||
"golang.org/x/sys/windows"
|
||||
)
|
||||
import "golang.org/x/sys/windows"
|
||||
|
||||
const (
|
||||
// Deprecated: use github.com/docker/pkg/idtools.SeTakeOwnershipPrivilege
|
||||
SeTakeOwnershipPrivilege = "SeTakeOwnershipPrivilege"
|
||||
)
|
||||
|
||||
const (
|
||||
// Deprecated: use github.com/docker/pkg/idtools.ContainerAdministratorSidString
|
||||
ContainerAdministratorSidString = "S-1-5-93-2-1"
|
||||
// Deprecated: use github.com/docker/pkg/idtools.ContainerUserSidString
|
||||
ContainerUserSidString = "S-1-5-93-2-2"
|
||||
)
|
||||
|
||||
var (
|
||||
ntuserApiset = windows.NewLazyDLL("ext-ms-win-ntuser-window-l1-1-0")
|
||||
procGetVersionExW = modkernel32.NewProc("GetVersionExW")
|
||||
)
|
||||
|
||||
// https://docs.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-osversioninfoexa
|
||||
// TODO: use golang.org/x/sys/windows.OsVersionInfoEx (needs OSVersionInfoSize to be exported)
|
||||
type osVersionInfoEx struct {
|
||||
OSVersionInfoSize uint32
|
||||
MajorVersion uint32
|
||||
MinorVersion uint32
|
||||
BuildNumber uint32
|
||||
PlatformID uint32
|
||||
CSDVersion [128]uint16
|
||||
ServicePackMajor uint16
|
||||
ServicePackMinor uint16
|
||||
SuiteMask uint16
|
||||
ProductType byte
|
||||
Reserve byte
|
||||
}
|
||||
// VER_NT_WORKSTATION, see https://docs.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-osversioninfoexa
|
||||
const verNTWorkstation = 0x00000001 // VER_NT_WORKSTATION
|
||||
|
||||
// IsWindowsClient returns true if the SKU is client. It returns false on
|
||||
// Windows server, or if an error occurred when making the GetVersionExW
|
||||
// syscall.
|
||||
func IsWindowsClient() bool {
|
||||
osviex := &osVersionInfoEx{OSVersionInfoSize: 284}
|
||||
r1, _, err := procGetVersionExW.Call(uintptr(unsafe.Pointer(osviex)))
|
||||
if r1 == 0 {
|
||||
logrus.WithError(err).Warn("GetVersionExW failed - assuming server SKU")
|
||||
return false
|
||||
}
|
||||
// VER_NT_WORKSTATION, see https://docs.microsoft.com/en-us/windows/win32/api/winnt/ns-winnt-osversioninfoexa
|
||||
const verNTWorkstation = 0x00000001 // VER_NT_WORKSTATION
|
||||
return osviex.ProductType == verNTWorkstation
|
||||
}
|
||||
|
||||
// HasWin32KSupport determines whether containers that depend on win32k can
|
||||
// run on this machine. Win32k is the driver used to implement windowing.
|
||||
func HasWin32KSupport() bool {
|
||||
// For now, check for ntuser API support on the host. In the future, a host
|
||||
// may support win32k in containers even if the host does not support ntuser
|
||||
// APIs.
|
||||
return ntuserApiset.Load() == nil
|
||||
ver := windows.RtlGetVersion()
|
||||
return ver != nil && ver.ProductType == verNTWorkstation
|
||||
}
|
||||
|
17
vendor/github.com/klauspost/compress/README.md
generated
vendored
17
vendor/github.com/klauspost/compress/README.md
generated
vendored
@ -17,6 +17,23 @@ This package provides various compression algorithms.
|
||||
|
||||
# changelog
|
||||
|
||||
* Mar 3, 2022 (v1.15.0)
|
||||
* zstd: Refactor decoder by @klauspost in [#498](https://github.com/klauspost/compress/pull/498)
|
||||
* zstd: Add stream encoding without goroutines by @klauspost in [#505](https://github.com/klauspost/compress/pull/505)
|
||||
* huff0: Prevent single blocks exceeding 16 bits by @klauspost in[#507](https://github.com/klauspost/compress/pull/507)
|
||||
* flate: Inline literal emission by @klauspost in [#509](https://github.com/klauspost/compress/pull/509)
|
||||
* gzhttp: Add zstd to transport by @klauspost in [#400](https://github.com/klauspost/compress/pull/400)
|
||||
* gzhttp: Make content-type optional by @klauspost in [#510](https://github.com/klauspost/compress/pull/510)
|
||||
|
||||
<details>
|
||||
<summary>See Details</summary>
|
||||
Both compression and decompression now supports "synchronous" stream operations. This means that whenever "concurrency" is set to 1, they will operate without spawning goroutines.
|
||||
|
||||
Stream decompression is now faster on asynchronous, since the goroutine allocation much more effectively splits the workload. On typical streams this will typically use 2 cores fully for decompression. When a stream has finished decoding no goroutines will be left over, so decoders can now safely be pooled and still be garbage collected.
|
||||
|
||||
While the release has been extensively tested, it is recommended to testing when upgrading.
|
||||
</details>
|
||||
|
||||
* Feb 22, 2022 (v1.14.4)
|
||||
* flate: Fix rare huffman only (-2) corruption. [#503](https://github.com/klauspost/compress/pull/503)
|
||||
* zip: Update deprecated CreateHeaderRaw to correctly call CreateRaw by @saracen in [#502](https://github.com/klauspost/compress/pull/502)
|
||||
|
5
vendor/github.com/klauspost/compress/huff0/autogen.go
generated
vendored
Normal file
5
vendor/github.com/klauspost/compress/huff0/autogen.go
generated
vendored
Normal file
@ -0,0 +1,5 @@
|
||||
package huff0
|
||||
|
||||
//go:generate go run generate.go
|
||||
//go:generate asmfmt -w decompress_amd64.s
|
||||
//go:generate asmfmt -w decompress_8b_amd64.s
|
5
vendor/github.com/klauspost/compress/huff0/bitreader.go
generated
vendored
5
vendor/github.com/klauspost/compress/huff0/bitreader.go
generated
vendored
@ -165,6 +165,11 @@ func (b *bitReaderShifted) peekBitsFast(n uint8) uint16 {
|
||||
return uint16(b.value >> ((64 - n) & 63))
|
||||
}
|
||||
|
||||
// peekTopBits(n) is equvialent to peekBitFast(64 - n)
|
||||
func (b *bitReaderShifted) peekTopBits(n uint8) uint16 {
|
||||
return uint16(b.value >> n)
|
||||
}
|
||||
|
||||
func (b *bitReaderShifted) advance(n uint8) {
|
||||
b.bitsRead += n
|
||||
b.value <<= n & 63
|
||||
|
183
vendor/github.com/klauspost/compress/huff0/decompress.go
generated
vendored
183
vendor/github.com/klauspost/compress/huff0/decompress.go
generated
vendored
@ -725,189 +725,6 @@ func (d *Decoder) decompress1X8BitExactly(dst, src []byte) ([]byte, error) {
|
||||
return dst, br.close()
|
||||
}
|
||||
|
||||
// Decompress4X will decompress a 4X encoded stream.
|
||||
// The length of the supplied input must match the end of a block exactly.
|
||||
// The *capacity* of the dst slice must match the destination size of
|
||||
// the uncompressed data exactly.
|
||||
func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) {
|
||||
if len(d.dt.single) == 0 {
|
||||
return nil, errors.New("no table loaded")
|
||||
}
|
||||
if len(src) < 6+(4*1) {
|
||||
return nil, errors.New("input too small")
|
||||
}
|
||||
if use8BitTables && d.actualTableLog <= 8 {
|
||||
return d.decompress4X8bit(dst, src)
|
||||
}
|
||||
|
||||
var br [4]bitReaderShifted
|
||||
// Decode "jump table"
|
||||
start := 6
|
||||
for i := 0; i < 3; i++ {
|
||||
length := int(src[i*2]) | (int(src[i*2+1]) << 8)
|
||||
if start+length >= len(src) {
|
||||
return nil, errors.New("truncated input (or invalid offset)")
|
||||
}
|
||||
err := br[i].init(src[start : start+length])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
start += length
|
||||
}
|
||||
err := br[3].init(src[start:])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// destination, offset to match first output
|
||||
dstSize := cap(dst)
|
||||
dst = dst[:dstSize]
|
||||
out := dst
|
||||
dstEvery := (dstSize + 3) / 4
|
||||
|
||||
const tlSize = 1 << tableLogMax
|
||||
const tlMask = tlSize - 1
|
||||
single := d.dt.single[:tlSize]
|
||||
|
||||
// Use temp table to avoid bound checks/append penalty.
|
||||
buf := d.buffer()
|
||||
var off uint8
|
||||
var decoded int
|
||||
|
||||
// Decode 2 values from each decoder/loop.
|
||||
const bufoff = 256
|
||||
for {
|
||||
if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 {
|
||||
break
|
||||
}
|
||||
|
||||
{
|
||||
const stream = 0
|
||||
const stream2 = 1
|
||||
br[stream].fillFast()
|
||||
br[stream2].fillFast()
|
||||
|
||||
val := br[stream].peekBitsFast(d.actualTableLog)
|
||||
val2 := br[stream2].peekBitsFast(d.actualTableLog)
|
||||
v := single[val&tlMask]
|
||||
v2 := single[val2&tlMask]
|
||||
br[stream].advance(uint8(v.entry))
|
||||
br[stream2].advance(uint8(v2.entry))
|
||||
buf[stream][off] = uint8(v.entry >> 8)
|
||||
buf[stream2][off] = uint8(v2.entry >> 8)
|
||||
|
||||
val = br[stream].peekBitsFast(d.actualTableLog)
|
||||
val2 = br[stream2].peekBitsFast(d.actualTableLog)
|
||||
v = single[val&tlMask]
|
||||
v2 = single[val2&tlMask]
|
||||
br[stream].advance(uint8(v.entry))
|
||||
br[stream2].advance(uint8(v2.entry))
|
||||
buf[stream][off+1] = uint8(v.entry >> 8)
|
||||
buf[stream2][off+1] = uint8(v2.entry >> 8)
|
||||
}
|
||||
|
||||
{
|
||||
const stream = 2
|
||||
const stream2 = 3
|
||||
br[stream].fillFast()
|
||||
br[stream2].fillFast()
|
||||
|
||||
val := br[stream].peekBitsFast(d.actualTableLog)
|
||||
val2 := br[stream2].peekBitsFast(d.actualTableLog)
|
||||
v := single[val&tlMask]
|
||||
v2 := single[val2&tlMask]
|
||||
br[stream].advance(uint8(v.entry))
|
||||
br[stream2].advance(uint8(v2.entry))
|
||||
buf[stream][off] = uint8(v.entry >> 8)
|
||||
buf[stream2][off] = uint8(v2.entry >> 8)
|
||||
|
||||
val = br[stream].peekBitsFast(d.actualTableLog)
|
||||
val2 = br[stream2].peekBitsFast(d.actualTableLog)
|
||||
v = single[val&tlMask]
|
||||
v2 = single[val2&tlMask]
|
||||
br[stream].advance(uint8(v.entry))
|
||||
br[stream2].advance(uint8(v2.entry))
|
||||
buf[stream][off+1] = uint8(v.entry >> 8)
|
||||
buf[stream2][off+1] = uint8(v2.entry >> 8)
|
||||
}
|
||||
|
||||
off += 2
|
||||
|
||||
if off == 0 {
|
||||
if bufoff > dstEvery {
|
||||
d.bufs.Put(buf)
|
||||
return nil, errors.New("corruption detected: stream overrun 1")
|
||||
}
|
||||
copy(out, buf[0][:])
|
||||
copy(out[dstEvery:], buf[1][:])
|
||||
copy(out[dstEvery*2:], buf[2][:])
|
||||
copy(out[dstEvery*3:], buf[3][:])
|
||||
out = out[bufoff:]
|
||||
decoded += bufoff * 4
|
||||
// There must at least be 3 buffers left.
|
||||
if len(out) < dstEvery*3 {
|
||||
d.bufs.Put(buf)
|
||||
return nil, errors.New("corruption detected: stream overrun 2")
|
||||
}
|
||||
}
|
||||
}
|
||||
if off > 0 {
|
||||
ioff := int(off)
|
||||
if len(out) < dstEvery*3+ioff {
|
||||
d.bufs.Put(buf)
|
||||
return nil, errors.New("corruption detected: stream overrun 3")
|
||||
}
|
||||
copy(out, buf[0][:off])
|
||||
copy(out[dstEvery:], buf[1][:off])
|
||||
copy(out[dstEvery*2:], buf[2][:off])
|
||||
copy(out[dstEvery*3:], buf[3][:off])
|
||||
decoded += int(off) * 4
|
||||
out = out[off:]
|
||||
}
|
||||
|
||||
// Decode remaining.
|
||||
remainBytes := dstEvery - (decoded / 4)
|
||||
for i := range br {
|
||||
offset := dstEvery * i
|
||||
endsAt := offset + remainBytes
|
||||
if endsAt > len(out) {
|
||||
endsAt = len(out)
|
||||
}
|
||||
br := &br[i]
|
||||
bitsLeft := br.remaining()
|
||||
for bitsLeft > 0 {
|
||||
br.fill()
|
||||
if offset >= endsAt {
|
||||
d.bufs.Put(buf)
|
||||
return nil, errors.New("corruption detected: stream overrun 4")
|
||||
}
|
||||
|
||||
// Read value and increment offset.
|
||||
val := br.peekBitsFast(d.actualTableLog)
|
||||
v := single[val&tlMask].entry
|
||||
nBits := uint8(v)
|
||||
br.advance(nBits)
|
||||
bitsLeft -= uint(nBits)
|
||||
out[offset] = uint8(v >> 8)
|
||||
offset++
|
||||
}
|
||||
if offset != endsAt {
|
||||
d.bufs.Put(buf)
|
||||
return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt)
|
||||
}
|
||||
decoded += offset - dstEvery*i
|
||||
err = br.close()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
d.bufs.Put(buf)
|
||||
if dstSize != decoded {
|
||||
return nil, errors.New("corruption detected: short output block")
|
||||
}
|
||||
return dst, nil
|
||||
}
|
||||
|
||||
// Decompress4X will decompress a 4X encoded stream.
|
||||
// The length of the supplied input must match the end of a block exactly.
|
||||
// The *capacity* of the dst slice must match the destination size of
|
||||
|
488
vendor/github.com/klauspost/compress/huff0/decompress_8b_amd64.s
generated
vendored
Normal file
488
vendor/github.com/klauspost/compress/huff0/decompress_8b_amd64.s
generated
vendored
Normal file
@ -0,0 +1,488 @@
|
||||
// +build !appengine
|
||||
// +build gc
|
||||
// +build !noasm
|
||||
|
||||
#include "textflag.h"
|
||||
#include "funcdata.h"
|
||||
#include "go_asm.h"
|
||||
|
||||
#define bufoff 256 // see decompress.go, we're using [4][256]byte table
|
||||
|
||||
// func decompress4x_main_loop_x86(pbr0, pbr1, pbr2, pbr3 *bitReaderShifted,
|
||||
// peekBits uint8, buf *byte, tbl *dEntrySingle) (int, bool)
|
||||
TEXT ·decompress4x_8b_loop_x86(SB), NOSPLIT, $8
|
||||
#define off R8
|
||||
#define buffer DI
|
||||
#define table SI
|
||||
|
||||
#define br_bits_read R9
|
||||
#define br_value R10
|
||||
#define br_offset R11
|
||||
#define peek_bits R12
|
||||
#define exhausted DX
|
||||
|
||||
#define br0 R13
|
||||
#define br1 R14
|
||||
#define br2 R15
|
||||
#define br3 BP
|
||||
|
||||
MOVQ BP, 0(SP)
|
||||
|
||||
XORQ exhausted, exhausted // exhausted = false
|
||||
XORQ off, off // off = 0
|
||||
|
||||
MOVBQZX peekBits+32(FP), peek_bits
|
||||
MOVQ buf+40(FP), buffer
|
||||
MOVQ tbl+48(FP), table
|
||||
|
||||
MOVQ pbr0+0(FP), br0
|
||||
MOVQ pbr1+8(FP), br1
|
||||
MOVQ pbr2+16(FP), br2
|
||||
MOVQ pbr3+24(FP), br3
|
||||
|
||||
main_loop:
|
||||
|
||||
// const stream = 0
|
||||
// br0.fillFast()
|
||||
MOVBQZX bitReaderShifted_bitsRead(br0), br_bits_read
|
||||
MOVQ bitReaderShifted_value(br0), br_value
|
||||
MOVQ bitReaderShifted_off(br0), br_offset
|
||||
|
||||
// if b.bitsRead >= 32 {
|
||||
CMPQ br_bits_read, $32
|
||||
JB skip_fill0
|
||||
|
||||
SUBQ $32, br_bits_read // b.bitsRead -= 32
|
||||
SUBQ $4, br_offset // b.off -= 4
|
||||
|
||||
// v := b.in[b.off-4 : b.off]
|
||||
// v = v[:4]
|
||||
// low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
|
||||
MOVQ bitReaderShifted_in(br0), AX
|
||||
MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4])
|
||||
|
||||
// b.value |= uint64(low) << (b.bitsRead & 63)
|
||||
MOVQ br_bits_read, CX
|
||||
SHLQ CL, AX
|
||||
ORQ AX, br_value
|
||||
|
||||
// exhausted = exhausted || (br0.off < 4)
|
||||
CMPQ br_offset, $4
|
||||
SETLT DL
|
||||
ORB DL, DH
|
||||
|
||||
// }
|
||||
skip_fill0:
|
||||
|
||||
// val0 := br0.peekTopBits(peekBits)
|
||||
MOVQ br_value, AX
|
||||
MOVQ peek_bits, CX
|
||||
SHRQ CL, AX // AX = (value >> peek_bits) & mask
|
||||
|
||||
// v0 := table[val0&mask]
|
||||
MOVW 0(table)(AX*2), AX // AX - v0
|
||||
|
||||
// br0.advance(uint8(v0.entry))
|
||||
MOVB AH, BL // BL = uint8(v0.entry >> 8)
|
||||
MOVBQZX AL, CX
|
||||
SHLQ CL, br_value // value <<= n
|
||||
ADDQ CX, br_bits_read // bits_read += n
|
||||
|
||||
// val1 := br0.peekTopBits(peekBits)
|
||||
MOVQ peek_bits, CX
|
||||
MOVQ br_value, AX
|
||||
SHRQ CL, AX // AX = (value >> peek_bits) & mask
|
||||
|
||||
// v1 := table[val1&mask]
|
||||
MOVW 0(table)(AX*2), AX // AX - v1
|
||||
|
||||
// br0.advance(uint8(v1.entry))
|
||||
MOVB AH, BH // BH = uint8(v1.entry >> 8)
|
||||
MOVBQZX AL, CX
|
||||
SHLQ CX, br_value // value <<= n
|
||||
ADDQ CX, br_bits_read // bits_read += n
|
||||
|
||||
// these two writes get coalesced
|
||||
// buf[stream][off] = uint8(v0.entry >> 8)
|
||||
// buf[stream][off+1] = uint8(v1.entry >> 8)
|
||||
MOVW BX, 0(buffer)(off*1)
|
||||
|
||||
// SECOND PART:
|
||||
// val2 := br0.peekTopBits(peekBits)
|
||||
MOVQ br_value, AX
|
||||
MOVQ peek_bits, CX
|
||||
SHRQ CL, AX // AX = (value >> peek_bits) & mask
|
||||
|
||||
// v2 := table[val0&mask]
|
||||
MOVW 0(table)(AX*2), AX // AX - v0
|
||||
|
||||
// br0.advance(uint8(v0.entry))
|
||||
MOVB AH, BL // BL = uint8(v0.entry >> 8)
|
||||
MOVBQZX AL, CX
|
||||
SHLQ CL, br_value // value <<= n
|
||||
ADDQ CX, br_bits_read // bits_read += n
|
||||
|
||||
// val3 := br0.peekTopBits(peekBits)
|
||||
MOVQ peek_bits, CX
|
||||
MOVQ br_value, AX
|
||||
SHRQ CL, AX // AX = (value >> peek_bits) & mask
|
||||
|
||||
// v3 := table[val1&mask]
|
||||
MOVW 0(table)(AX*2), AX // AX - v1
|
||||
|
||||
// br0.advance(uint8(v1.entry))
|
||||
MOVB AH, BH // BH = uint8(v1.entry >> 8)
|
||||
MOVBQZX AL, CX
|
||||
SHLQ CX, br_value // value <<= n
|
||||
ADDQ CX, br_bits_read // bits_read += n
|
||||
|
||||
// these two writes get coalesced
|
||||
// buf[stream][off+2] = uint8(v2.entry >> 8)
|
||||
// buf[stream][off+3] = uint8(v3.entry >> 8)
|
||||
MOVW BX, 0+2(buffer)(off*1)
|
||||
|
||||
// update the bitrader reader structure
|
||||
MOVB br_bits_read, bitReaderShifted_bitsRead(br0)
|
||||
MOVQ br_value, bitReaderShifted_value(br0)
|
||||
MOVQ br_offset, bitReaderShifted_off(br0)
|
||||
|
||||
// const stream = 1
|
||||
// br1.fillFast()
|
||||
MOVBQZX bitReaderShifted_bitsRead(br1), br_bits_read
|
||||
MOVQ bitReaderShifted_value(br1), br_value
|
||||
MOVQ bitReaderShifted_off(br1), br_offset
|
||||
|
||||
// if b.bitsRead >= 32 {
|
||||
CMPQ br_bits_read, $32
|
||||
JB skip_fill1
|
||||
|
||||
SUBQ $32, br_bits_read // b.bitsRead -= 32
|
||||
SUBQ $4, br_offset // b.off -= 4
|
||||
|
||||
// v := b.in[b.off-4 : b.off]
|
||||
// v = v[:4]
|
||||
// low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
|
||||
MOVQ bitReaderShifted_in(br1), AX
|
||||
MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4])
|
||||
|
||||
// b.value |= uint64(low) << (b.bitsRead & 63)
|
||||
MOVQ br_bits_read, CX
|
||||
SHLQ CL, AX
|
||||
ORQ AX, br_value
|
||||
|
||||
// exhausted = exhausted || (br1.off < 4)
|
||||
CMPQ br_offset, $4
|
||||
SETLT DL
|
||||
ORB DL, DH
|
||||
|
||||
// }
|
||||
skip_fill1:
|
||||
|
||||
// val0 := br1.peekTopBits(peekBits)
|
||||
MOVQ br_value, AX
|
||||
MOVQ peek_bits, CX
|
||||
SHRQ CL, AX // AX = (value >> peek_bits) & mask
|
||||
|
||||
// v0 := table[val0&mask]
|
||||
MOVW 0(table)(AX*2), AX // AX - v0
|
||||
|
||||
// br1.advance(uint8(v0.entry))
|
||||
MOVB AH, BL // BL = uint8(v0.entry >> 8)
|
||||
MOVBQZX AL, CX
|
||||
SHLQ CL, br_value // value <<= n
|
||||
ADDQ CX, br_bits_read // bits_read += n
|
||||
|
||||
// val1 := br1.peekTopBits(peekBits)
|
||||
MOVQ peek_bits, CX
|
||||
MOVQ br_value, AX
|
||||
SHRQ CL, AX // AX = (value >> peek_bits) & mask
|
||||
|
||||
// v1 := table[val1&mask]
|
||||
MOVW 0(table)(AX*2), AX // AX - v1
|
||||
|
||||
// br1.advance(uint8(v1.entry))
|
||||
MOVB AH, BH // BH = uint8(v1.entry >> 8)
|
||||
MOVBQZX AL, CX
|
||||
SHLQ CX, br_value // value <<= n
|
||||
ADDQ CX, br_bits_read // bits_read += n
|
||||
|
||||
// these two writes get coalesced
|
||||
// buf[stream][off] = uint8(v0.entry >> 8)
|
||||
// buf[stream][off+1] = uint8(v1.entry >> 8)
|
||||
MOVW BX, 256(buffer)(off*1)
|
||||
|
||||
// SECOND PART:
|
||||
// val2 := br1.peekTopBits(peekBits)
|
||||
MOVQ br_value, AX
|
||||
MOVQ peek_bits, CX
|
||||
SHRQ CL, AX // AX = (value >> peek_bits) & mask
|
||||
|
||||
// v2 := table[val0&mask]
|
||||
MOVW 0(table)(AX*2), AX // AX - v0
|
||||
|
||||
// br1.advance(uint8(v0.entry))
|
||||
MOVB AH, BL // BL = uint8(v0.entry >> 8)
|
||||
MOVBQZX AL, CX
|
||||
SHLQ CL, br_value // value <<= n
|
||||
ADDQ CX, br_bits_read // bits_read += n
|
||||
|
||||
// val3 := br1.peekTopBits(peekBits)
|
||||
MOVQ peek_bits, CX
|
||||
MOVQ br_value, AX
|
||||
SHRQ CL, AX // AX = (value >> peek_bits) & mask
|
||||
|
||||
// v3 := table[val1&mask]
|
||||
MOVW 0(table)(AX*2), AX // AX - v1
|
||||
|
||||
// br1.advance(uint8(v1.entry))
|
||||
MOVB AH, BH // BH = uint8(v1.entry >> 8)
|
||||
MOVBQZX AL, CX
|
||||
SHLQ CX, br_value // value <<= n
|
||||
ADDQ CX, br_bits_read // bits_read += n
|
||||
|
||||
// these two writes get coalesced
|
||||
// buf[stream][off+2] = uint8(v2.entry >> 8)
|
||||
// buf[stream][off+3] = uint8(v3.entry >> 8)
|
||||
MOVW BX, 256+2(buffer)(off*1)
|
||||
|
||||
// update the bitrader reader structure
|
||||
MOVB br_bits_read, bitReaderShifted_bitsRead(br1)
|
||||
MOVQ br_value, bitReaderShifted_value(br1)
|
||||
MOVQ br_offset, bitReaderShifted_off(br1)
|
||||
|
||||
// const stream = 2
|
||||
// br2.fillFast()
|
||||
MOVBQZX bitReaderShifted_bitsRead(br2), br_bits_read
|
||||
MOVQ bitReaderShifted_value(br2), br_value
|
||||
MOVQ bitReaderShifted_off(br2), br_offset
|
||||
|
||||
// if b.bitsRead >= 32 {
|
||||
CMPQ br_bits_read, $32
|
||||
JB skip_fill2
|
||||
|
||||
SUBQ $32, br_bits_read // b.bitsRead -= 32
|
||||
SUBQ $4, br_offset // b.off -= 4
|
||||
|
||||
// v := b.in[b.off-4 : b.off]
|
||||
// v = v[:4]
|
||||
// low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
|
||||
MOVQ bitReaderShifted_in(br2), AX
|
||||
MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4])
|
||||
|
||||
// b.value |= uint64(low) << (b.bitsRead & 63)
|
||||
MOVQ br_bits_read, CX
|
||||
SHLQ CL, AX
|
||||
ORQ AX, br_value
|
||||
|
||||
// exhausted = exhausted || (br2.off < 4)
|
||||
CMPQ br_offset, $4
|
||||
SETLT DL
|
||||
ORB DL, DH
|
||||
|
||||
// }
|
||||
skip_fill2:
|
||||
|
||||
// val0 := br2.peekTopBits(peekBits)
|
||||
MOVQ br_value, AX
|
||||
MOVQ peek_bits, CX
|
||||
SHRQ CL, AX // AX = (value >> peek_bits) & mask
|
||||
|
||||
// v0 := table[val0&mask]
|
||||
MOVW 0(table)(AX*2), AX // AX - v0
|
||||
|
||||
// br2.advance(uint8(v0.entry))
|
||||
MOVB AH, BL // BL = uint8(v0.entry >> 8)
|
||||
MOVBQZX AL, CX
|
||||
SHLQ CL, br_value // value <<= n
|
||||
ADDQ CX, br_bits_read // bits_read += n
|
||||
|
||||
// val1 := br2.peekTopBits(peekBits)
|
||||
MOVQ peek_bits, CX
|
||||
MOVQ br_value, AX
|
||||
SHRQ CL, AX // AX = (value >> peek_bits) & mask
|
||||
|
||||
// v1 := table[val1&mask]
|
||||
MOVW 0(table)(AX*2), AX // AX - v1
|
||||
|
||||
// br2.advance(uint8(v1.entry))
|
||||
MOVB AH, BH // BH = uint8(v1.entry >> 8)
|
||||
MOVBQZX AL, CX
|
||||
SHLQ CX, br_value // value <<= n
|
||||
ADDQ CX, br_bits_read // bits_read += n
|
||||
|
||||
// these two writes get coalesced
|
||||
// buf[stream][off] = uint8(v0.entry >> 8)
|
||||
// buf[stream][off+1] = uint8(v1.entry >> 8)
|
||||
MOVW BX, 512(buffer)(off*1)
|
||||
|
||||
// SECOND PART:
|
||||
// val2 := br2.peekTopBits(peekBits)
|
||||
MOVQ br_value, AX
|
||||
MOVQ peek_bits, CX
|
||||
SHRQ CL, AX // AX = (value >> peek_bits) & mask
|
||||
|
||||
// v2 := table[val0&mask]
|
||||
MOVW 0(table)(AX*2), AX // AX - v0
|
||||
|
||||
// br2.advance(uint8(v0.entry))
|
||||
MOVB AH, BL // BL = uint8(v0.entry >> 8)
|
||||
MOVBQZX AL, CX
|
||||
SHLQ CL, br_value // value <<= n
|
||||
ADDQ CX, br_bits_read // bits_read += n
|
||||
|
||||
// val3 := br2.peekTopBits(peekBits)
|
||||
MOVQ peek_bits, CX
|
||||
MOVQ br_value, AX
|
||||
SHRQ CL, AX // AX = (value >> peek_bits) & mask
|
||||
|
||||
// v3 := table[val1&mask]
|
||||
MOVW 0(table)(AX*2), AX // AX - v1
|
||||
|
||||
// br2.advance(uint8(v1.entry))
|
||||
MOVB AH, BH // BH = uint8(v1.entry >> 8)
|
||||
MOVBQZX AL, CX
|
||||
SHLQ CX, br_value // value <<= n
|
||||
ADDQ CX, br_bits_read // bits_read += n
|
||||
|
||||
// these two writes get coalesced
|
||||
// buf[stream][off+2] = uint8(v2.entry >> 8)
|
||||
// buf[stream][off+3] = uint8(v3.entry >> 8)
|
||||
MOVW BX, 512+2(buffer)(off*1)
|
||||
|
||||
// update the bitrader reader structure
|
||||
MOVB br_bits_read, bitReaderShifted_bitsRead(br2)
|
||||
MOVQ br_value, bitReaderShifted_value(br2)
|
||||
MOVQ br_offset, bitReaderShifted_off(br2)
|
||||
|
||||
// const stream = 3
|
||||
// br3.fillFast()
|
||||
MOVBQZX bitReaderShifted_bitsRead(br3), br_bits_read
|
||||
MOVQ bitReaderShifted_value(br3), br_value
|
||||
MOVQ bitReaderShifted_off(br3), br_offset
|
||||
|
||||
// if b.bitsRead >= 32 {
|
||||
CMPQ br_bits_read, $32
|
||||
JB skip_fill3
|
||||
|
||||
SUBQ $32, br_bits_read // b.bitsRead -= 32
|
||||
SUBQ $4, br_offset // b.off -= 4
|
||||
|
||||
// v := b.in[b.off-4 : b.off]
|
||||
// v = v[:4]
|
||||
// low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
|
||||
MOVQ bitReaderShifted_in(br3), AX
|
||||
MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4])
|
||||
|
||||
// b.value |= uint64(low) << (b.bitsRead & 63)
|
||||
MOVQ br_bits_read, CX
|
||||
SHLQ CL, AX
|
||||
ORQ AX, br_value
|
||||
|
||||
// exhausted = exhausted || (br3.off < 4)
|
||||
CMPQ br_offset, $4
|
||||
SETLT DL
|
||||
ORB DL, DH
|
||||
|
||||
// }
|
||||
skip_fill3:
|
||||
|
||||
// val0 := br3.peekTopBits(peekBits)
|
||||
MOVQ br_value, AX
|
||||
MOVQ peek_bits, CX
|
||||
SHRQ CL, AX // AX = (value >> peek_bits) & mask
|
||||
|
||||
// v0 := table[val0&mask]
|
||||
MOVW 0(table)(AX*2), AX // AX - v0
|
||||
|
||||
// br3.advance(uint8(v0.entry))
|
||||
MOVB AH, BL // BL = uint8(v0.entry >> 8)
|
||||
MOVBQZX AL, CX
|
||||
SHLQ CL, br_value // value <<= n
|
||||
ADDQ CX, br_bits_read // bits_read += n
|
||||
|
||||
// val1 := br3.peekTopBits(peekBits)
|
||||
MOVQ peek_bits, CX
|
||||
MOVQ br_value, AX
|
||||
SHRQ CL, AX // AX = (value >> peek_bits) & mask
|
||||
|
||||
// v1 := table[val1&mask]
|
||||
MOVW 0(table)(AX*2), AX // AX - v1
|
||||
|
||||
// br3.advance(uint8(v1.entry))
|
||||
MOVB AH, BH // BH = uint8(v1.entry >> 8)
|
||||
MOVBQZX AL, CX
|
||||
SHLQ CX, br_value // value <<= n
|
||||
ADDQ CX, br_bits_read // bits_read += n
|
||||
|
||||
// these two writes get coalesced
|
||||
// buf[stream][off] = uint8(v0.entry >> 8)
|
||||
// buf[stream][off+1] = uint8(v1.entry >> 8)
|
||||
MOVW BX, 768(buffer)(off*1)
|
||||
|
||||
// SECOND PART:
|
||||
// val2 := br3.peekTopBits(peekBits)
|
||||
MOVQ br_value, AX
|
||||
MOVQ peek_bits, CX
|
||||
SHRQ CL, AX // AX = (value >> peek_bits) & mask
|
||||
|
||||
// v2 := table[val0&mask]
|
||||
MOVW 0(table)(AX*2), AX // AX - v0
|
||||
|
||||
// br3.advance(uint8(v0.entry))
|
||||
MOVB AH, BL // BL = uint8(v0.entry >> 8)
|
||||
MOVBQZX AL, CX
|
||||
SHLQ CL, br_value // value <<= n
|
||||
ADDQ CX, br_bits_read // bits_read += n
|
||||
|
||||
// val3 := br3.peekTopBits(peekBits)
|
||||
MOVQ peek_bits, CX
|
||||
MOVQ br_value, AX
|
||||
SHRQ CL, AX // AX = (value >> peek_bits) & mask
|
||||
|
||||
// v3 := table[val1&mask]
|
||||
MOVW 0(table)(AX*2), AX // AX - v1
|
||||
|
||||
// br3.advance(uint8(v1.entry))
|
||||
MOVB AH, BH // BH = uint8(v1.entry >> 8)
|
||||
MOVBQZX AL, CX
|
||||
SHLQ CX, br_value // value <<= n
|
||||
ADDQ CX, br_bits_read // bits_read += n
|
||||
|
||||
// these two writes get coalesced
|
||||
// buf[stream][off+2] = uint8(v2.entry >> 8)
|
||||
// buf[stream][off+3] = uint8(v3.entry >> 8)
|
||||
MOVW BX, 768+2(buffer)(off*1)
|
||||
|
||||
// update the bitrader reader structure
|
||||
MOVB br_bits_read, bitReaderShifted_bitsRead(br3)
|
||||
MOVQ br_value, bitReaderShifted_value(br3)
|
||||
MOVQ br_offset, bitReaderShifted_off(br3)
|
||||
|
||||
ADDQ $4, off // off += 2
|
||||
|
||||
TESTB DH, DH // any br[i].ofs < 4?
|
||||
JNZ end
|
||||
|
||||
CMPQ off, $bufoff
|
||||
JL main_loop
|
||||
|
||||
end:
|
||||
MOVQ 0(SP), BP
|
||||
|
||||
MOVB off, ret+56(FP)
|
||||
RET
|
||||
|
||||
#undef off
|
||||
#undef buffer
|
||||
#undef table
|
||||
|
||||
#undef br_bits_read
|
||||
#undef br_value
|
||||
#undef br_offset
|
||||
#undef peek_bits
|
||||
#undef exhausted
|
||||
|
||||
#undef br0
|
||||
#undef br1
|
||||
#undef br2
|
||||
#undef br3
|
197
vendor/github.com/klauspost/compress/huff0/decompress_8b_amd64.s.in
generated
vendored
Normal file
197
vendor/github.com/klauspost/compress/huff0/decompress_8b_amd64.s.in
generated
vendored
Normal file
@ -0,0 +1,197 @@
|
||||
// +build !appengine
|
||||
// +build gc
|
||||
// +build !noasm
|
||||
|
||||
#include "textflag.h"
|
||||
#include "funcdata.h"
|
||||
#include "go_asm.h"
|
||||
|
||||
|
||||
#define bufoff 256 // see decompress.go, we're using [4][256]byte table
|
||||
|
||||
//func decompress4x_main_loop_x86(pbr0, pbr1, pbr2, pbr3 *bitReaderShifted,
|
||||
// peekBits uint8, buf *byte, tbl *dEntrySingle) (int, bool)
|
||||
TEXT ·decompress4x_8b_loop_x86(SB), NOSPLIT, $8
|
||||
#define off R8
|
||||
#define buffer DI
|
||||
#define table SI
|
||||
|
||||
#define br_bits_read R9
|
||||
#define br_value R10
|
||||
#define br_offset R11
|
||||
#define peek_bits R12
|
||||
#define exhausted DX
|
||||
|
||||
#define br0 R13
|
||||
#define br1 R14
|
||||
#define br2 R15
|
||||
#define br3 BP
|
||||
|
||||
MOVQ BP, 0(SP)
|
||||
|
||||
XORQ exhausted, exhausted // exhausted = false
|
||||
XORQ off, off // off = 0
|
||||
|
||||
MOVBQZX peekBits+32(FP), peek_bits
|
||||
MOVQ buf+40(FP), buffer
|
||||
MOVQ tbl+48(FP), table
|
||||
|
||||
MOVQ pbr0+0(FP), br0
|
||||
MOVQ pbr1+8(FP), br1
|
||||
MOVQ pbr2+16(FP), br2
|
||||
MOVQ pbr3+24(FP), br3
|
||||
|
||||
main_loop:
|
||||
{{ define "decode_2_values_x86" }}
|
||||
// const stream = {{ var "id" }}
|
||||
// br{{ var "id"}}.fillFast()
|
||||
MOVBQZX bitReaderShifted_bitsRead(br{{ var "id" }}), br_bits_read
|
||||
MOVQ bitReaderShifted_value(br{{ var "id" }}), br_value
|
||||
MOVQ bitReaderShifted_off(br{{ var "id" }}), br_offset
|
||||
|
||||
// if b.bitsRead >= 32 {
|
||||
CMPQ br_bits_read, $32
|
||||
JB skip_fill{{ var "id" }}
|
||||
|
||||
SUBQ $32, br_bits_read // b.bitsRead -= 32
|
||||
SUBQ $4, br_offset // b.off -= 4
|
||||
|
||||
// v := b.in[b.off-4 : b.off]
|
||||
// v = v[:4]
|
||||
// low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
|
||||
MOVQ bitReaderShifted_in(br{{ var "id" }}), AX
|
||||
MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4])
|
||||
|
||||
// b.value |= uint64(low) << (b.bitsRead & 63)
|
||||
MOVQ br_bits_read, CX
|
||||
SHLQ CL, AX
|
||||
ORQ AX, br_value
|
||||
|
||||
// exhausted = exhausted || (br{{ var "id"}}.off < 4)
|
||||
CMPQ br_offset, $4
|
||||
SETLT DL
|
||||
ORB DL, DH
|
||||
// }
|
||||
skip_fill{{ var "id" }}:
|
||||
|
||||
// val0 := br{{ var "id"}}.peekTopBits(peekBits)
|
||||
MOVQ br_value, AX
|
||||
MOVQ peek_bits, CX
|
||||
SHRQ CL, AX // AX = (value >> peek_bits) & mask
|
||||
|
||||
// v0 := table[val0&mask]
|
||||
MOVW 0(table)(AX*2), AX // AX - v0
|
||||
|
||||
// br{{ var "id"}}.advance(uint8(v0.entry))
|
||||
MOVB AH, BL // BL = uint8(v0.entry >> 8)
|
||||
MOVBQZX AL, CX
|
||||
SHLQ CL, br_value // value <<= n
|
||||
ADDQ CX, br_bits_read // bits_read += n
|
||||
|
||||
// val1 := br{{ var "id"}}.peekTopBits(peekBits)
|
||||
MOVQ peek_bits, CX
|
||||
MOVQ br_value, AX
|
||||
SHRQ CL, AX // AX = (value >> peek_bits) & mask
|
||||
|
||||
// v1 := table[val1&mask]
|
||||
MOVW 0(table)(AX*2), AX // AX - v1
|
||||
|
||||
// br{{ var "id"}}.advance(uint8(v1.entry))
|
||||
MOVB AH, BH // BH = uint8(v1.entry >> 8)
|
||||
MOVBQZX AL, CX
|
||||
SHLQ CX, br_value // value <<= n
|
||||
ADDQ CX, br_bits_read // bits_read += n
|
||||
|
||||
|
||||
// these two writes get coalesced
|
||||
// buf[stream][off] = uint8(v0.entry >> 8)
|
||||
// buf[stream][off+1] = uint8(v1.entry >> 8)
|
||||
MOVW BX, {{ var "bufofs" }}(buffer)(off*1)
|
||||
|
||||
// SECOND PART:
|
||||
// val2 := br{{ var "id"}}.peekTopBits(peekBits)
|
||||
MOVQ br_value, AX
|
||||
MOVQ peek_bits, CX
|
||||
SHRQ CL, AX // AX = (value >> peek_bits) & mask
|
||||
|
||||
// v2 := table[val0&mask]
|
||||
MOVW 0(table)(AX*2), AX // AX - v0
|
||||
|
||||
// br{{ var "id"}}.advance(uint8(v0.entry))
|
||||
MOVB AH, BL // BL = uint8(v0.entry >> 8)
|
||||
MOVBQZX AL, CX
|
||||
SHLQ CL, br_value // value <<= n
|
||||
ADDQ CX, br_bits_read // bits_read += n
|
||||
|
||||
// val3 := br{{ var "id"}}.peekTopBits(peekBits)
|
||||
MOVQ peek_bits, CX
|
||||
MOVQ br_value, AX
|
||||
SHRQ CL, AX // AX = (value >> peek_bits) & mask
|
||||
|
||||
// v3 := table[val1&mask]
|
||||
MOVW 0(table)(AX*2), AX // AX - v1
|
||||
|
||||
// br{{ var "id"}}.advance(uint8(v1.entry))
|
||||
MOVB AH, BH // BH = uint8(v1.entry >> 8)
|
||||
MOVBQZX AL, CX
|
||||
SHLQ CX, br_value // value <<= n
|
||||
ADDQ CX, br_bits_read // bits_read += n
|
||||
|
||||
|
||||
// these two writes get coalesced
|
||||
// buf[stream][off+2] = uint8(v2.entry >> 8)
|
||||
// buf[stream][off+3] = uint8(v3.entry >> 8)
|
||||
MOVW BX, {{ var "bufofs" }}+2(buffer)(off*1)
|
||||
|
||||
// update the bitrader reader structure
|
||||
MOVB br_bits_read, bitReaderShifted_bitsRead(br{{ var "id" }})
|
||||
MOVQ br_value, bitReaderShifted_value(br{{ var "id" }})
|
||||
MOVQ br_offset, bitReaderShifted_off(br{{ var "id" }})
|
||||
{{ end }}
|
||||
|
||||
{{ set "id" "0" }}
|
||||
{{ set "ofs" "0" }}
|
||||
{{ set "bufofs" "0" }} {{/* id * bufoff */}}
|
||||
{{ template "decode_2_values_x86" . }}
|
||||
|
||||
{{ set "id" "1" }}
|
||||
{{ set "ofs" "8" }}
|
||||
{{ set "bufofs" "256" }}
|
||||
{{ template "decode_2_values_x86" . }}
|
||||
|
||||
{{ set "id" "2" }}
|
||||
{{ set "ofs" "16" }}
|
||||
{{ set "bufofs" "512" }}
|
||||
{{ template "decode_2_values_x86" . }}
|
||||
|
||||
{{ set "id" "3" }}
|
||||
{{ set "ofs" "24" }}
|
||||
{{ set "bufofs" "768" }}
|
||||
{{ template "decode_2_values_x86" . }}
|
||||
|
||||
ADDQ $4, off // off += 2
|
||||
|
||||
TESTB DH, DH // any br[i].ofs < 4?
|
||||
JNZ end
|
||||
|
||||
CMPQ off, $bufoff
|
||||
JL main_loop
|
||||
end:
|
||||
MOVQ 0(SP), BP
|
||||
|
||||
MOVB off, ret+56(FP)
|
||||
RET
|
||||
#undef off
|
||||
#undef buffer
|
||||
#undef table
|
||||
|
||||
#undef br_bits_read
|
||||
#undef br_value
|
||||
#undef br_offset
|
||||
#undef peek_bits
|
||||
#undef exhausted
|
||||
|
||||
#undef br0
|
||||
#undef br1
|
||||
#undef br2
|
||||
#undef br3
|
181
vendor/github.com/klauspost/compress/huff0/decompress_amd64.go
generated
vendored
Normal file
181
vendor/github.com/klauspost/compress/huff0/decompress_amd64.go
generated
vendored
Normal file
@ -0,0 +1,181 @@
|
||||
//go:build amd64 && !appengine && !noasm && gc
|
||||
// +build amd64,!appengine,!noasm,gc
|
||||
|
||||
// This file contains the specialisation of Decoder.Decompress4X
|
||||
// that uses an asm implementation of its main loop.
|
||||
package huff0
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// decompress4x_main_loop_x86 is an x86 assembler implementation
|
||||
// of Decompress4X when tablelog > 8.
|
||||
// go:noescape
|
||||
func decompress4x_main_loop_x86(pbr0, pbr1, pbr2, pbr3 *bitReaderShifted,
|
||||
peekBits uint8, buf *byte, tbl *dEntrySingle) uint8
|
||||
|
||||
// decompress4x_8b_loop_x86 is an x86 assembler implementation
|
||||
// of Decompress4X when tablelog <= 8 which decodes 4 entries
|
||||
// per loop.
|
||||
// go:noescape
|
||||
func decompress4x_8b_loop_x86(pbr0, pbr1, pbr2, pbr3 *bitReaderShifted,
|
||||
peekBits uint8, buf *byte, tbl *dEntrySingle) uint8
|
||||
|
||||
// fallback8BitSize is the size where using Go version is faster.
|
||||
const fallback8BitSize = 800
|
||||
|
||||
// Decompress4X will decompress a 4X encoded stream.
|
||||
// The length of the supplied input must match the end of a block exactly.
|
||||
// The *capacity* of the dst slice must match the destination size of
|
||||
// the uncompressed data exactly.
|
||||
func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) {
|
||||
if len(d.dt.single) == 0 {
|
||||
return nil, errors.New("no table loaded")
|
||||
}
|
||||
if len(src) < 6+(4*1) {
|
||||
return nil, errors.New("input too small")
|
||||
}
|
||||
|
||||
use8BitTables := d.actualTableLog <= 8
|
||||
if cap(dst) < fallback8BitSize && use8BitTables {
|
||||
return d.decompress4X8bit(dst, src)
|
||||
}
|
||||
var br [4]bitReaderShifted
|
||||
// Decode "jump table"
|
||||
start := 6
|
||||
for i := 0; i < 3; i++ {
|
||||
length := int(src[i*2]) | (int(src[i*2+1]) << 8)
|
||||
if start+length >= len(src) {
|
||||
return nil, errors.New("truncated input (or invalid offset)")
|
||||
}
|
||||
err := br[i].init(src[start : start+length])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
start += length
|
||||
}
|
||||
err := br[3].init(src[start:])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// destination, offset to match first output
|
||||
dstSize := cap(dst)
|
||||
dst = dst[:dstSize]
|
||||
out := dst
|
||||
dstEvery := (dstSize + 3) / 4
|
||||
|
||||
const tlSize = 1 << tableLogMax
|
||||
const tlMask = tlSize - 1
|
||||
single := d.dt.single[:tlSize]
|
||||
|
||||
// Use temp table to avoid bound checks/append penalty.
|
||||
buf := d.buffer()
|
||||
var off uint8
|
||||
var decoded int
|
||||
|
||||
const debug = false
|
||||
|
||||
// see: bitReaderShifted.peekBitsFast()
|
||||
peekBits := uint8((64 - d.actualTableLog) & 63)
|
||||
|
||||
// Decode 2 values from each decoder/loop.
|
||||
const bufoff = 256
|
||||
for {
|
||||
if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 {
|
||||
break
|
||||
}
|
||||
|
||||
if use8BitTables {
|
||||
off = decompress4x_8b_loop_x86(&br[0], &br[1], &br[2], &br[3], peekBits, &buf[0][0], &single[0])
|
||||
} else {
|
||||
off = decompress4x_main_loop_x86(&br[0], &br[1], &br[2], &br[3], peekBits, &buf[0][0], &single[0])
|
||||
}
|
||||
if debug {
|
||||
fmt.Print("DEBUG: ")
|
||||
fmt.Printf("off=%d,", off)
|
||||
for i := 0; i < 4; i++ {
|
||||
fmt.Printf(" br[%d]={bitsRead=%d, value=%x, off=%d}",
|
||||
i, br[i].bitsRead, br[i].value, br[i].off)
|
||||
}
|
||||
fmt.Println("")
|
||||
}
|
||||
|
||||
if off != 0 {
|
||||
break
|
||||
}
|
||||
|
||||
if bufoff > dstEvery {
|
||||
d.bufs.Put(buf)
|
||||
return nil, errors.New("corruption detected: stream overrun 1")
|
||||
}
|
||||
copy(out, buf[0][:])
|
||||
copy(out[dstEvery:], buf[1][:])
|
||||
copy(out[dstEvery*2:], buf[2][:])
|
||||
copy(out[dstEvery*3:], buf[3][:])
|
||||
out = out[bufoff:]
|
||||
decoded += bufoff * 4
|
||||
// There must at least be 3 buffers left.
|
||||
if len(out) < dstEvery*3 {
|
||||
d.bufs.Put(buf)
|
||||
return nil, errors.New("corruption detected: stream overrun 2")
|
||||
}
|
||||
}
|
||||
if off > 0 {
|
||||
ioff := int(off)
|
||||
if len(out) < dstEvery*3+ioff {
|
||||
d.bufs.Put(buf)
|
||||
return nil, errors.New("corruption detected: stream overrun 3")
|
||||
}
|
||||
copy(out, buf[0][:off])
|
||||
copy(out[dstEvery:], buf[1][:off])
|
||||
copy(out[dstEvery*2:], buf[2][:off])
|
||||
copy(out[dstEvery*3:], buf[3][:off])
|
||||
decoded += int(off) * 4
|
||||
out = out[off:]
|
||||
}
|
||||
|
||||
// Decode remaining.
|
||||
remainBytes := dstEvery - (decoded / 4)
|
||||
for i := range br {
|
||||
offset := dstEvery * i
|
||||
endsAt := offset + remainBytes
|
||||
if endsAt > len(out) {
|
||||
endsAt = len(out)
|
||||
}
|
||||
br := &br[i]
|
||||
bitsLeft := br.remaining()
|
||||
for bitsLeft > 0 {
|
||||
br.fill()
|
||||
if offset >= endsAt {
|
||||
d.bufs.Put(buf)
|
||||
return nil, errors.New("corruption detected: stream overrun 4")
|
||||
}
|
||||
|
||||
// Read value and increment offset.
|
||||
val := br.peekBitsFast(d.actualTableLog)
|
||||
v := single[val&tlMask].entry
|
||||
nBits := uint8(v)
|
||||
br.advance(nBits)
|
||||
bitsLeft -= uint(nBits)
|
||||
out[offset] = uint8(v >> 8)
|
||||
offset++
|
||||
}
|
||||
if offset != endsAt {
|
||||
d.bufs.Put(buf)
|
||||
return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt)
|
||||
}
|
||||
decoded += offset - dstEvery*i
|
||||
err = br.close()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
d.bufs.Put(buf)
|
||||
if dstSize != decoded {
|
||||
return nil, errors.New("corruption detected: short output block")
|
||||
}
|
||||
return dst, nil
|
||||
}
|
506
vendor/github.com/klauspost/compress/huff0/decompress_amd64.s
generated
vendored
Normal file
506
vendor/github.com/klauspost/compress/huff0/decompress_amd64.s
generated
vendored
Normal file
@ -0,0 +1,506 @@
|
||||
// +build !appengine
|
||||
// +build gc
|
||||
// +build !noasm
|
||||
|
||||
#include "textflag.h"
|
||||
#include "funcdata.h"
|
||||
#include "go_asm.h"
|
||||
|
||||
#ifdef GOAMD64_v4
|
||||
#ifndef GOAMD64_v3
|
||||
#define GOAMD64_v3
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#define bufoff 256 // see decompress.go, we're using [4][256]byte table
|
||||
|
||||
// func decompress4x_main_loop_x86(pbr0, pbr1, pbr2, pbr3 *bitReaderShifted,
|
||||
// peekBits uint8, buf *byte, tbl *dEntrySingle) (int, bool)
|
||||
TEXT ·decompress4x_main_loop_x86(SB), NOSPLIT, $8
|
||||
#define off R8
|
||||
#define buffer DI
|
||||
#define table SI
|
||||
|
||||
#define br_bits_read R9
|
||||
#define br_value R10
|
||||
#define br_offset R11
|
||||
#define peek_bits R12
|
||||
#define exhausted DX
|
||||
|
||||
#define br0 R13
|
||||
#define br1 R14
|
||||
#define br2 R15
|
||||
#define br3 BP
|
||||
|
||||
MOVQ BP, 0(SP)
|
||||
|
||||
XORQ exhausted, exhausted // exhausted = false
|
||||
XORQ off, off // off = 0
|
||||
|
||||
MOVBQZX peekBits+32(FP), peek_bits
|
||||
MOVQ buf+40(FP), buffer
|
||||
MOVQ tbl+48(FP), table
|
||||
|
||||
MOVQ pbr0+0(FP), br0
|
||||
MOVQ pbr1+8(FP), br1
|
||||
MOVQ pbr2+16(FP), br2
|
||||
MOVQ pbr3+24(FP), br3
|
||||
|
||||
main_loop:
|
||||
|
||||
// const stream = 0
|
||||
// br0.fillFast()
|
||||
MOVBQZX bitReaderShifted_bitsRead(br0), br_bits_read
|
||||
MOVQ bitReaderShifted_value(br0), br_value
|
||||
MOVQ bitReaderShifted_off(br0), br_offset
|
||||
|
||||
// We must have at least 2 * max tablelog left
|
||||
CMPQ br_bits_read, $64-22
|
||||
JBE skip_fill0
|
||||
|
||||
SUBQ $32, br_bits_read // b.bitsRead -= 32
|
||||
SUBQ $4, br_offset // b.off -= 4
|
||||
|
||||
// v := b.in[b.off-4 : b.off]
|
||||
// v = v[:4]
|
||||
// low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
|
||||
MOVQ bitReaderShifted_in(br0), AX
|
||||
|
||||
// b.value |= uint64(low) << (b.bitsRead & 63)
|
||||
#ifdef GOAMD64_v3
|
||||
SHLXQ br_bits_read, 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4]) << (b.bitsRead & 63)
|
||||
|
||||
#else
|
||||
MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4])
|
||||
MOVQ br_bits_read, CX
|
||||
SHLQ CL, AX
|
||||
|
||||
#endif
|
||||
|
||||
ORQ AX, br_value
|
||||
|
||||
// exhausted = exhausted || (br0.off < 4)
|
||||
CMPQ br_offset, $4
|
||||
SETLT DL
|
||||
ORB DL, DH
|
||||
|
||||
// }
|
||||
skip_fill0:
|
||||
|
||||
// val0 := br0.peekTopBits(peekBits)
|
||||
#ifdef GOAMD64_v3
|
||||
SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask
|
||||
|
||||
#else
|
||||
MOVQ br_value, AX
|
||||
MOVQ peek_bits, CX
|
||||
SHRQ CL, AX // AX = (value >> peek_bits) & mask
|
||||
|
||||
#endif
|
||||
|
||||
// v0 := table[val0&mask]
|
||||
MOVW 0(table)(AX*2), AX // AX - v0
|
||||
|
||||
// br0.advance(uint8(v0.entry))
|
||||
MOVB AH, BL // BL = uint8(v0.entry >> 8)
|
||||
|
||||
#ifdef GOAMD64_v3
|
||||
MOVBQZX AL, CX
|
||||
SHLXQ AX, br_value, br_value // value <<= n
|
||||
|
||||
#else
|
||||
MOVBQZX AL, CX
|
||||
SHLQ CL, br_value // value <<= n
|
||||
|
||||
#endif
|
||||
|
||||
ADDQ CX, br_bits_read // bits_read += n
|
||||
|
||||
#ifdef GOAMD64_v3
|
||||
SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask
|
||||
|
||||
#else
|
||||
// val1 := br0.peekTopBits(peekBits)
|
||||
MOVQ peek_bits, CX
|
||||
MOVQ br_value, AX
|
||||
SHRQ CL, AX // AX = (value >> peek_bits) & mask
|
||||
|
||||
#endif
|
||||
|
||||
// v1 := table[val1&mask]
|
||||
MOVW 0(table)(AX*2), AX // AX - v1
|
||||
|
||||
// br0.advance(uint8(v1.entry))
|
||||
MOVB AH, BH // BH = uint8(v1.entry >> 8)
|
||||
|
||||
#ifdef GOAMD64_v3
|
||||
MOVBQZX AL, CX
|
||||
SHLXQ AX, br_value, br_value // value <<= n
|
||||
|
||||
#else
|
||||
MOVBQZX AL, CX
|
||||
SHLQ CL, br_value // value <<= n
|
||||
|
||||
#endif
|
||||
|
||||
ADDQ CX, br_bits_read // bits_read += n
|
||||
|
||||
// these two writes get coalesced
|
||||
// buf[stream][off] = uint8(v0.entry >> 8)
|
||||
// buf[stream][off+1] = uint8(v1.entry >> 8)
|
||||
MOVW BX, 0(buffer)(off*1)
|
||||
|
||||
// update the bitrader reader structure
|
||||
MOVB br_bits_read, bitReaderShifted_bitsRead(br0)
|
||||
MOVQ br_value, bitReaderShifted_value(br0)
|
||||
MOVQ br_offset, bitReaderShifted_off(br0)
|
||||
|
||||
// const stream = 1
|
||||
// br1.fillFast()
|
||||
MOVBQZX bitReaderShifted_bitsRead(br1), br_bits_read
|
||||
MOVQ bitReaderShifted_value(br1), br_value
|
||||
MOVQ bitReaderShifted_off(br1), br_offset
|
||||
|
||||
// We must have at least 2 * max tablelog left
|
||||
CMPQ br_bits_read, $64-22
|
||||
JBE skip_fill1
|
||||
|
||||
SUBQ $32, br_bits_read // b.bitsRead -= 32
|
||||
SUBQ $4, br_offset // b.off -= 4
|
||||
|
||||
// v := b.in[b.off-4 : b.off]
|
||||
// v = v[:4]
|
||||
// low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
|
||||
MOVQ bitReaderShifted_in(br1), AX
|
||||
|
||||
// b.value |= uint64(low) << (b.bitsRead & 63)
|
||||
#ifdef GOAMD64_v3
|
||||
SHLXQ br_bits_read, 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4]) << (b.bitsRead & 63)
|
||||
|
||||
#else
|
||||
MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4])
|
||||
MOVQ br_bits_read, CX
|
||||
SHLQ CL, AX
|
||||
|
||||
#endif
|
||||
|
||||
ORQ AX, br_value
|
||||
|
||||
// exhausted = exhausted || (br1.off < 4)
|
||||
CMPQ br_offset, $4
|
||||
SETLT DL
|
||||
ORB DL, DH
|
||||
|
||||
// }
|
||||
skip_fill1:
|
||||
|
||||
// val0 := br1.peekTopBits(peekBits)
|
||||
#ifdef GOAMD64_v3
|
||||
SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask
|
||||
|
||||
#else
|
||||
MOVQ br_value, AX
|
||||
MOVQ peek_bits, CX
|
||||
SHRQ CL, AX // AX = (value >> peek_bits) & mask
|
||||
|
||||
#endif
|
||||
|
||||
// v0 := table[val0&mask]
|
||||
MOVW 0(table)(AX*2), AX // AX - v0
|
||||
|
||||
// br1.advance(uint8(v0.entry))
|
||||
MOVB AH, BL // BL = uint8(v0.entry >> 8)
|
||||
|
||||
#ifdef GOAMD64_v3
|
||||
MOVBQZX AL, CX
|
||||
SHLXQ AX, br_value, br_value // value <<= n
|
||||
|
||||
#else
|
||||
MOVBQZX AL, CX
|
||||
SHLQ CL, br_value // value <<= n
|
||||
|
||||
#endif
|
||||
|
||||
ADDQ CX, br_bits_read // bits_read += n
|
||||
|
||||
#ifdef GOAMD64_v3
|
||||
SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask
|
||||
|
||||
#else
|
||||
// val1 := br1.peekTopBits(peekBits)
|
||||
MOVQ peek_bits, CX
|
||||
MOVQ br_value, AX
|
||||
SHRQ CL, AX // AX = (value >> peek_bits) & mask
|
||||
|
||||
#endif
|
||||
|
||||
// v1 := table[val1&mask]
|
||||
MOVW 0(table)(AX*2), AX // AX - v1
|
||||
|
||||
// br1.advance(uint8(v1.entry))
|
||||
MOVB AH, BH // BH = uint8(v1.entry >> 8)
|
||||
|
||||
#ifdef GOAMD64_v3
|
||||
MOVBQZX AL, CX
|
||||
SHLXQ AX, br_value, br_value // value <<= n
|
||||
|
||||
#else
|
||||
MOVBQZX AL, CX
|
||||
SHLQ CL, br_value // value <<= n
|
||||
|
||||
#endif
|
||||
|
||||
ADDQ CX, br_bits_read // bits_read += n
|
||||
|
||||
// these two writes get coalesced
|
||||
// buf[stream][off] = uint8(v0.entry >> 8)
|
||||
// buf[stream][off+1] = uint8(v1.entry >> 8)
|
||||
MOVW BX, 256(buffer)(off*1)
|
||||
|
||||
// update the bitrader reader structure
|
||||
MOVB br_bits_read, bitReaderShifted_bitsRead(br1)
|
||||
MOVQ br_value, bitReaderShifted_value(br1)
|
||||
MOVQ br_offset, bitReaderShifted_off(br1)
|
||||
|
||||
// const stream = 2
|
||||
// br2.fillFast()
|
||||
MOVBQZX bitReaderShifted_bitsRead(br2), br_bits_read
|
||||
MOVQ bitReaderShifted_value(br2), br_value
|
||||
MOVQ bitReaderShifted_off(br2), br_offset
|
||||
|
||||
// We must have at least 2 * max tablelog left
|
||||
CMPQ br_bits_read, $64-22
|
||||
JBE skip_fill2
|
||||
|
||||
SUBQ $32, br_bits_read // b.bitsRead -= 32
|
||||
SUBQ $4, br_offset // b.off -= 4
|
||||
|
||||
// v := b.in[b.off-4 : b.off]
|
||||
// v = v[:4]
|
||||
// low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
|
||||
MOVQ bitReaderShifted_in(br2), AX
|
||||
|
||||
// b.value |= uint64(low) << (b.bitsRead & 63)
|
||||
#ifdef GOAMD64_v3
|
||||
SHLXQ br_bits_read, 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4]) << (b.bitsRead & 63)
|
||||
|
||||
#else
|
||||
MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4])
|
||||
MOVQ br_bits_read, CX
|
||||
SHLQ CL, AX
|
||||
|
||||
#endif
|
||||
|
||||
ORQ AX, br_value
|
||||
|
||||
// exhausted = exhausted || (br2.off < 4)
|
||||
CMPQ br_offset, $4
|
||||
SETLT DL
|
||||
ORB DL, DH
|
||||
|
||||
// }
|
||||
skip_fill2:
|
||||
|
||||
// val0 := br2.peekTopBits(peekBits)
|
||||
#ifdef GOAMD64_v3
|
||||
SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask
|
||||
|
||||
#else
|
||||
MOVQ br_value, AX
|
||||
MOVQ peek_bits, CX
|
||||
SHRQ CL, AX // AX = (value >> peek_bits) & mask
|
||||
|
||||
#endif
|
||||
|
||||
// v0 := table[val0&mask]
|
||||
MOVW 0(table)(AX*2), AX // AX - v0
|
||||
|
||||
// br2.advance(uint8(v0.entry))
|
||||
MOVB AH, BL // BL = uint8(v0.entry >> 8)
|
||||
|
||||
#ifdef GOAMD64_v3
|
||||
MOVBQZX AL, CX
|
||||
SHLXQ AX, br_value, br_value // value <<= n
|
||||
|
||||
#else
|
||||
MOVBQZX AL, CX
|
||||
SHLQ CL, br_value // value <<= n
|
||||
|
||||
#endif
|
||||
|
||||
ADDQ CX, br_bits_read // bits_read += n
|
||||
|
||||
#ifdef GOAMD64_v3
|
||||
SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask
|
||||
|
||||
#else
|
||||
// val1 := br2.peekTopBits(peekBits)
|
||||
MOVQ peek_bits, CX
|
||||
MOVQ br_value, AX
|
||||
SHRQ CL, AX // AX = (value >> peek_bits) & mask
|
||||
|
||||
#endif
|
||||
|
||||
// v1 := table[val1&mask]
|
||||
MOVW 0(table)(AX*2), AX // AX - v1
|
||||
|
||||
// br2.advance(uint8(v1.entry))
|
||||
MOVB AH, BH // BH = uint8(v1.entry >> 8)
|
||||
|
||||
#ifdef GOAMD64_v3
|
||||
MOVBQZX AL, CX
|
||||
SHLXQ AX, br_value, br_value // value <<= n
|
||||
|
||||
#else
|
||||
MOVBQZX AL, CX
|
||||
SHLQ CL, br_value // value <<= n
|
||||
|
||||
#endif
|
||||
|
||||
ADDQ CX, br_bits_read // bits_read += n
|
||||
|
||||
// these two writes get coalesced
|
||||
// buf[stream][off] = uint8(v0.entry >> 8)
|
||||
// buf[stream][off+1] = uint8(v1.entry >> 8)
|
||||
MOVW BX, 512(buffer)(off*1)
|
||||
|
||||
// update the bitrader reader structure
|
||||
MOVB br_bits_read, bitReaderShifted_bitsRead(br2)
|
||||
MOVQ br_value, bitReaderShifted_value(br2)
|
||||
MOVQ br_offset, bitReaderShifted_off(br2)
|
||||
|
||||
// const stream = 3
|
||||
// br3.fillFast()
|
||||
MOVBQZX bitReaderShifted_bitsRead(br3), br_bits_read
|
||||
MOVQ bitReaderShifted_value(br3), br_value
|
||||
MOVQ bitReaderShifted_off(br3), br_offset
|
||||
|
||||
// We must have at least 2 * max tablelog left
|
||||
CMPQ br_bits_read, $64-22
|
||||
JBE skip_fill3
|
||||
|
||||
SUBQ $32, br_bits_read // b.bitsRead -= 32
|
||||
SUBQ $4, br_offset // b.off -= 4
|
||||
|
||||
// v := b.in[b.off-4 : b.off]
|
||||
// v = v[:4]
|
||||
// low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
|
||||
MOVQ bitReaderShifted_in(br3), AX
|
||||
|
||||
// b.value |= uint64(low) << (b.bitsRead & 63)
|
||||
#ifdef GOAMD64_v3
|
||||
SHLXQ br_bits_read, 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4]) << (b.bitsRead & 63)
|
||||
|
||||
#else
|
||||
MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4])
|
||||
MOVQ br_bits_read, CX
|
||||
SHLQ CL, AX
|
||||
|
||||
#endif
|
||||
|
||||
ORQ AX, br_value
|
||||
|
||||
// exhausted = exhausted || (br3.off < 4)
|
||||
CMPQ br_offset, $4
|
||||
SETLT DL
|
||||
ORB DL, DH
|
||||
|
||||
// }
|
||||
skip_fill3:
|
||||
|
||||
// val0 := br3.peekTopBits(peekBits)
|
||||
#ifdef GOAMD64_v3
|
||||
SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask
|
||||
|
||||
#else
|
||||
MOVQ br_value, AX
|
||||
MOVQ peek_bits, CX
|
||||
SHRQ CL, AX // AX = (value >> peek_bits) & mask
|
||||
|
||||
#endif
|
||||
|
||||
// v0 := table[val0&mask]
|
||||
MOVW 0(table)(AX*2), AX // AX - v0
|
||||
|
||||
// br3.advance(uint8(v0.entry))
|
||||
MOVB AH, BL // BL = uint8(v0.entry >> 8)
|
||||
|
||||
#ifdef GOAMD64_v3
|
||||
MOVBQZX AL, CX
|
||||
SHLXQ AX, br_value, br_value // value <<= n
|
||||
|
||||
#else
|
||||
MOVBQZX AL, CX
|
||||
SHLQ CL, br_value // value <<= n
|
||||
|
||||
#endif
|
||||
|
||||
ADDQ CX, br_bits_read // bits_read += n
|
||||
|
||||
#ifdef GOAMD64_v3
|
||||
SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask
|
||||
|
||||
#else
|
||||
// val1 := br3.peekTopBits(peekBits)
|
||||
MOVQ peek_bits, CX
|
||||
MOVQ br_value, AX
|
||||
SHRQ CL, AX // AX = (value >> peek_bits) & mask
|
||||
|
||||
#endif
|
||||
|
||||
// v1 := table[val1&mask]
|
||||
MOVW 0(table)(AX*2), AX // AX - v1
|
||||
|
||||
// br3.advance(uint8(v1.entry))
|
||||
MOVB AH, BH // BH = uint8(v1.entry >> 8)
|
||||
|
||||
#ifdef GOAMD64_v3
|
||||
MOVBQZX AL, CX
|
||||
SHLXQ AX, br_value, br_value // value <<= n
|
||||
|
||||
#else
|
||||
MOVBQZX AL, CX
|
||||
SHLQ CL, br_value // value <<= n
|
||||
|
||||
#endif
|
||||
|
||||
ADDQ CX, br_bits_read // bits_read += n
|
||||
|
||||
// these two writes get coalesced
|
||||
// buf[stream][off] = uint8(v0.entry >> 8)
|
||||
// buf[stream][off+1] = uint8(v1.entry >> 8)
|
||||
MOVW BX, 768(buffer)(off*1)
|
||||
|
||||
// update the bitrader reader structure
|
||||
MOVB br_bits_read, bitReaderShifted_bitsRead(br3)
|
||||
MOVQ br_value, bitReaderShifted_value(br3)
|
||||
MOVQ br_offset, bitReaderShifted_off(br3)
|
||||
|
||||
ADDQ $2, off // off += 2
|
||||
|
||||
TESTB DH, DH // any br[i].ofs < 4?
|
||||
JNZ end
|
||||
|
||||
CMPQ off, $bufoff
|
||||
JL main_loop
|
||||
|
||||
end:
|
||||
MOVQ 0(SP), BP
|
||||
|
||||
MOVB off, ret+56(FP)
|
||||
RET
|
||||
|
||||
#undef off
|
||||
#undef buffer
|
||||
#undef table
|
||||
|
||||
#undef br_bits_read
|
||||
#undef br_value
|
||||
#undef br_offset
|
||||
#undef peek_bits
|
||||
#undef exhausted
|
||||
|
||||
#undef br0
|
||||
#undef br1
|
||||
#undef br2
|
||||
#undef br3
|
195
vendor/github.com/klauspost/compress/huff0/decompress_amd64.s.in
generated
vendored
Normal file
195
vendor/github.com/klauspost/compress/huff0/decompress_amd64.s.in
generated
vendored
Normal file
@ -0,0 +1,195 @@
|
||||
// +build !appengine
|
||||
// +build gc
|
||||
// +build !noasm
|
||||
|
||||
#include "textflag.h"
|
||||
#include "funcdata.h"
|
||||
#include "go_asm.h"
|
||||
|
||||
#ifdef GOAMD64_v4
|
||||
#ifndef GOAMD64_v3
|
||||
#define GOAMD64_v3
|
||||
#endif
|
||||
#endif
|
||||
|
||||
#define bufoff 256 // see decompress.go, we're using [4][256]byte table
|
||||
|
||||
//func decompress4x_main_loop_x86(pbr0, pbr1, pbr2, pbr3 *bitReaderShifted,
|
||||
// peekBits uint8, buf *byte, tbl *dEntrySingle) (int, bool)
|
||||
TEXT ·decompress4x_main_loop_x86(SB), NOSPLIT, $8
|
||||
#define off R8
|
||||
#define buffer DI
|
||||
#define table SI
|
||||
|
||||
#define br_bits_read R9
|
||||
#define br_value R10
|
||||
#define br_offset R11
|
||||
#define peek_bits R12
|
||||
#define exhausted DX
|
||||
|
||||
#define br0 R13
|
||||
#define br1 R14
|
||||
#define br2 R15
|
||||
#define br3 BP
|
||||
|
||||
MOVQ BP, 0(SP)
|
||||
|
||||
XORQ exhausted, exhausted // exhausted = false
|
||||
XORQ off, off // off = 0
|
||||
|
||||
MOVBQZX peekBits+32(FP), peek_bits
|
||||
MOVQ buf+40(FP), buffer
|
||||
MOVQ tbl+48(FP), table
|
||||
|
||||
MOVQ pbr0+0(FP), br0
|
||||
MOVQ pbr1+8(FP), br1
|
||||
MOVQ pbr2+16(FP), br2
|
||||
MOVQ pbr3+24(FP), br3
|
||||
|
||||
main_loop:
|
||||
{{ define "decode_2_values_x86" }}
|
||||
// const stream = {{ var "id" }}
|
||||
// br{{ var "id"}}.fillFast()
|
||||
MOVBQZX bitReaderShifted_bitsRead(br{{ var "id" }}), br_bits_read
|
||||
MOVQ bitReaderShifted_value(br{{ var "id" }}), br_value
|
||||
MOVQ bitReaderShifted_off(br{{ var "id" }}), br_offset
|
||||
|
||||
// We must have at least 2 * max tablelog left
|
||||
CMPQ br_bits_read, $64-22
|
||||
JBE skip_fill{{ var "id" }}
|
||||
|
||||
SUBQ $32, br_bits_read // b.bitsRead -= 32
|
||||
SUBQ $4, br_offset // b.off -= 4
|
||||
|
||||
// v := b.in[b.off-4 : b.off]
|
||||
// v = v[:4]
|
||||
// low := (uint32(v[0])) | (uint32(v[1]) << 8) | (uint32(v[2]) << 16) | (uint32(v[3]) << 24)
|
||||
MOVQ bitReaderShifted_in(br{{ var "id" }}), AX
|
||||
|
||||
// b.value |= uint64(low) << (b.bitsRead & 63)
|
||||
#ifdef GOAMD64_v3
|
||||
SHLXQ br_bits_read, 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4]) << (b.bitsRead & 63)
|
||||
#else
|
||||
MOVL 0(br_offset)(AX*1), AX // AX = uint32(b.in[b.off:b.off+4])
|
||||
MOVQ br_bits_read, CX
|
||||
SHLQ CL, AX
|
||||
#endif
|
||||
|
||||
ORQ AX, br_value
|
||||
|
||||
// exhausted = exhausted || (br{{ var "id"}}.off < 4)
|
||||
CMPQ br_offset, $4
|
||||
SETLT DL
|
||||
ORB DL, DH
|
||||
// }
|
||||
skip_fill{{ var "id" }}:
|
||||
|
||||
// val0 := br{{ var "id"}}.peekTopBits(peekBits)
|
||||
#ifdef GOAMD64_v3
|
||||
SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask
|
||||
#else
|
||||
MOVQ br_value, AX
|
||||
MOVQ peek_bits, CX
|
||||
SHRQ CL, AX // AX = (value >> peek_bits) & mask
|
||||
#endif
|
||||
|
||||
// v0 := table[val0&mask]
|
||||
MOVW 0(table)(AX*2), AX // AX - v0
|
||||
|
||||
// br{{ var "id"}}.advance(uint8(v0.entry))
|
||||
MOVB AH, BL // BL = uint8(v0.entry >> 8)
|
||||
|
||||
#ifdef GOAMD64_v3
|
||||
MOVBQZX AL, CX
|
||||
SHLXQ AX, br_value, br_value // value <<= n
|
||||
#else
|
||||
MOVBQZX AL, CX
|
||||
SHLQ CL, br_value // value <<= n
|
||||
#endif
|
||||
|
||||
ADDQ CX, br_bits_read // bits_read += n
|
||||
|
||||
|
||||
#ifdef GOAMD64_v3
|
||||
SHRXQ peek_bits, br_value, AX // AX = (value >> peek_bits) & mask
|
||||
#else
|
||||
// val1 := br{{ var "id"}}.peekTopBits(peekBits)
|
||||
MOVQ peek_bits, CX
|
||||
MOVQ br_value, AX
|
||||
SHRQ CL, AX // AX = (value >> peek_bits) & mask
|
||||
#endif
|
||||
|
||||
// v1 := table[val1&mask]
|
||||
MOVW 0(table)(AX*2), AX // AX - v1
|
||||
|
||||
// br{{ var "id"}}.advance(uint8(v1.entry))
|
||||
MOVB AH, BH // BH = uint8(v1.entry >> 8)
|
||||
|
||||
#ifdef GOAMD64_v3
|
||||
MOVBQZX AL, CX
|
||||
SHLXQ AX, br_value, br_value // value <<= n
|
||||
#else
|
||||
MOVBQZX AL, CX
|
||||
SHLQ CL, br_value // value <<= n
|
||||
#endif
|
||||
|
||||
ADDQ CX, br_bits_read // bits_read += n
|
||||
|
||||
|
||||
// these two writes get coalesced
|
||||
// buf[stream][off] = uint8(v0.entry >> 8)
|
||||
// buf[stream][off+1] = uint8(v1.entry >> 8)
|
||||
MOVW BX, {{ var "bufofs" }}(buffer)(off*1)
|
||||
|
||||
// update the bitrader reader structure
|
||||
MOVB br_bits_read, bitReaderShifted_bitsRead(br{{ var "id" }})
|
||||
MOVQ br_value, bitReaderShifted_value(br{{ var "id" }})
|
||||
MOVQ br_offset, bitReaderShifted_off(br{{ var "id" }})
|
||||
{{ end }}
|
||||
|
||||
{{ set "id" "0" }}
|
||||
{{ set "ofs" "0" }}
|
||||
{{ set "bufofs" "0" }} {{/* id * bufoff */}}
|
||||
{{ template "decode_2_values_x86" . }}
|
||||
|
||||
{{ set "id" "1" }}
|
||||
{{ set "ofs" "8" }}
|
||||
{{ set "bufofs" "256" }}
|
||||
{{ template "decode_2_values_x86" . }}
|
||||
|
||||
{{ set "id" "2" }}
|
||||
{{ set "ofs" "16" }}
|
||||
{{ set "bufofs" "512" }}
|
||||
{{ template "decode_2_values_x86" . }}
|
||||
|
||||
{{ set "id" "3" }}
|
||||
{{ set "ofs" "24" }}
|
||||
{{ set "bufofs" "768" }}
|
||||
{{ template "decode_2_values_x86" . }}
|
||||
|
||||
ADDQ $2, off // off += 2
|
||||
|
||||
TESTB DH, DH // any br[i].ofs < 4?
|
||||
JNZ end
|
||||
|
||||
CMPQ off, $bufoff
|
||||
JL main_loop
|
||||
end:
|
||||
MOVQ 0(SP), BP
|
||||
|
||||
MOVB off, ret+56(FP)
|
||||
RET
|
||||
#undef off
|
||||
#undef buffer
|
||||
#undef table
|
||||
|
||||
#undef br_bits_read
|
||||
#undef br_value
|
||||
#undef br_offset
|
||||
#undef peek_bits
|
||||
#undef exhausted
|
||||
|
||||
#undef br0
|
||||
#undef br1
|
||||
#undef br2
|
||||
#undef br3
|
193
vendor/github.com/klauspost/compress/huff0/decompress_generic.go
generated
vendored
Normal file
193
vendor/github.com/klauspost/compress/huff0/decompress_generic.go
generated
vendored
Normal file
@ -0,0 +1,193 @@
|
||||
//go:build !amd64 || appengine || !gc || noasm
|
||||
// +build !amd64 appengine !gc noasm
|
||||
|
||||
// This file contains a generic implementation of Decoder.Decompress4X.
|
||||
package huff0
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// Decompress4X will decompress a 4X encoded stream.
|
||||
// The length of the supplied input must match the end of a block exactly.
|
||||
// The *capacity* of the dst slice must match the destination size of
|
||||
// the uncompressed data exactly.
|
||||
func (d *Decoder) Decompress4X(dst, src []byte) ([]byte, error) {
|
||||
if len(d.dt.single) == 0 {
|
||||
return nil, errors.New("no table loaded")
|
||||
}
|
||||
if len(src) < 6+(4*1) {
|
||||
return nil, errors.New("input too small")
|
||||
}
|
||||
if use8BitTables && d.actualTableLog <= 8 {
|
||||
return d.decompress4X8bit(dst, src)
|
||||
}
|
||||
|
||||
var br [4]bitReaderShifted
|
||||
// Decode "jump table"
|
||||
start := 6
|
||||
for i := 0; i < 3; i++ {
|
||||
length := int(src[i*2]) | (int(src[i*2+1]) << 8)
|
||||
if start+length >= len(src) {
|
||||
return nil, errors.New("truncated input (or invalid offset)")
|
||||
}
|
||||
err := br[i].init(src[start : start+length])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
start += length
|
||||
}
|
||||
err := br[3].init(src[start:])
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// destination, offset to match first output
|
||||
dstSize := cap(dst)
|
||||
dst = dst[:dstSize]
|
||||
out := dst
|
||||
dstEvery := (dstSize + 3) / 4
|
||||
|
||||
const tlSize = 1 << tableLogMax
|
||||
const tlMask = tlSize - 1
|
||||
single := d.dt.single[:tlSize]
|
||||
|
||||
// Use temp table to avoid bound checks/append penalty.
|
||||
buf := d.buffer()
|
||||
var off uint8
|
||||
var decoded int
|
||||
|
||||
// Decode 2 values from each decoder/loop.
|
||||
const bufoff = 256
|
||||
for {
|
||||
if br[0].off < 4 || br[1].off < 4 || br[2].off < 4 || br[3].off < 4 {
|
||||
break
|
||||
}
|
||||
|
||||
{
|
||||
const stream = 0
|
||||
const stream2 = 1
|
||||
br[stream].fillFast()
|
||||
br[stream2].fillFast()
|
||||
|
||||
val := br[stream].peekBitsFast(d.actualTableLog)
|
||||
val2 := br[stream2].peekBitsFast(d.actualTableLog)
|
||||
v := single[val&tlMask]
|
||||
v2 := single[val2&tlMask]
|
||||
br[stream].advance(uint8(v.entry))
|
||||
br[stream2].advance(uint8(v2.entry))
|
||||
buf[stream][off] = uint8(v.entry >> 8)
|
||||
buf[stream2][off] = uint8(v2.entry >> 8)
|
||||
|
||||
val = br[stream].peekBitsFast(d.actualTableLog)
|
||||
val2 = br[stream2].peekBitsFast(d.actualTableLog)
|
||||
v = single[val&tlMask]
|
||||
v2 = single[val2&tlMask]
|
||||
br[stream].advance(uint8(v.entry))
|
||||
br[stream2].advance(uint8(v2.entry))
|
||||
buf[stream][off+1] = uint8(v.entry >> 8)
|
||||
buf[stream2][off+1] = uint8(v2.entry >> 8)
|
||||
}
|
||||
|
||||
{
|
||||
const stream = 2
|
||||
const stream2 = 3
|
||||
br[stream].fillFast()
|
||||
br[stream2].fillFast()
|
||||
|
||||
val := br[stream].peekBitsFast(d.actualTableLog)
|
||||
val2 := br[stream2].peekBitsFast(d.actualTableLog)
|
||||
v := single[val&tlMask]
|
||||
v2 := single[val2&tlMask]
|
||||
br[stream].advance(uint8(v.entry))
|
||||
br[stream2].advance(uint8(v2.entry))
|
||||
buf[stream][off] = uint8(v.entry >> 8)
|
||||
buf[stream2][off] = uint8(v2.entry >> 8)
|
||||
|
||||
val = br[stream].peekBitsFast(d.actualTableLog)
|
||||
val2 = br[stream2].peekBitsFast(d.actualTableLog)
|
||||
v = single[val&tlMask]
|
||||
v2 = single[val2&tlMask]
|
||||
br[stream].advance(uint8(v.entry))
|
||||
br[stream2].advance(uint8(v2.entry))
|
||||
buf[stream][off+1] = uint8(v.entry >> 8)
|
||||
buf[stream2][off+1] = uint8(v2.entry >> 8)
|
||||
}
|
||||
|
||||
off += 2
|
||||
|
||||
if off == 0 {
|
||||
if bufoff > dstEvery {
|
||||
d.bufs.Put(buf)
|
||||
return nil, errors.New("corruption detected: stream overrun 1")
|
||||
}
|
||||
copy(out, buf[0][:])
|
||||
copy(out[dstEvery:], buf[1][:])
|
||||
copy(out[dstEvery*2:], buf[2][:])
|
||||
copy(out[dstEvery*3:], buf[3][:])
|
||||
out = out[bufoff:]
|
||||
decoded += bufoff * 4
|
||||
// There must at least be 3 buffers left.
|
||||
if len(out) < dstEvery*3 {
|
||||
d.bufs.Put(buf)
|
||||
return nil, errors.New("corruption detected: stream overrun 2")
|
||||
}
|
||||
}
|
||||
}
|
||||
if off > 0 {
|
||||
ioff := int(off)
|
||||
if len(out) < dstEvery*3+ioff {
|
||||
d.bufs.Put(buf)
|
||||
return nil, errors.New("corruption detected: stream overrun 3")
|
||||
}
|
||||
copy(out, buf[0][:off])
|
||||
copy(out[dstEvery:], buf[1][:off])
|
||||
copy(out[dstEvery*2:], buf[2][:off])
|
||||
copy(out[dstEvery*3:], buf[3][:off])
|
||||
decoded += int(off) * 4
|
||||
out = out[off:]
|
||||
}
|
||||
|
||||
// Decode remaining.
|
||||
remainBytes := dstEvery - (decoded / 4)
|
||||
for i := range br {
|
||||
offset := dstEvery * i
|
||||
endsAt := offset + remainBytes
|
||||
if endsAt > len(out) {
|
||||
endsAt = len(out)
|
||||
}
|
||||
br := &br[i]
|
||||
bitsLeft := br.remaining()
|
||||
for bitsLeft > 0 {
|
||||
br.fill()
|
||||
if offset >= endsAt {
|
||||
d.bufs.Put(buf)
|
||||
return nil, errors.New("corruption detected: stream overrun 4")
|
||||
}
|
||||
|
||||
// Read value and increment offset.
|
||||
val := br.peekBitsFast(d.actualTableLog)
|
||||
v := single[val&tlMask].entry
|
||||
nBits := uint8(v)
|
||||
br.advance(nBits)
|
||||
bitsLeft -= uint(nBits)
|
||||
out[offset] = uint8(v >> 8)
|
||||
offset++
|
||||
}
|
||||
if offset != endsAt {
|
||||
d.bufs.Put(buf)
|
||||
return nil, fmt.Errorf("corruption detected: short output block %d, end %d != %d", i, offset, endsAt)
|
||||
}
|
||||
decoded += offset - dstEvery*i
|
||||
err = br.close()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
d.bufs.Put(buf)
|
||||
if dstSize != decoded {
|
||||
return nil, errors.New("corruption detected: short output block")
|
||||
}
|
||||
return dst, nil
|
||||
}
|
72
vendor/github.com/klauspost/compress/zstd/README.md
generated
vendored
72
vendor/github.com/klauspost/compress/zstd/README.md
generated
vendored
@ -153,10 +153,10 @@ http://sun.aei.polsl.pl/~sdeor/corpus/silesia.zip
|
||||
|
||||
This package:
|
||||
file out level insize outsize millis mb/s
|
||||
silesia.tar zskp 1 211947520 73101992 643 313.87
|
||||
silesia.tar zskp 2 211947520 67504318 969 208.38
|
||||
silesia.tar zskp 3 211947520 64595893 2007 100.68
|
||||
silesia.tar zskp 4 211947520 60995370 8825 22.90
|
||||
silesia.tar zskp 1 211947520 73821326 634 318.47
|
||||
silesia.tar zskp 2 211947520 67655404 1508 133.96
|
||||
silesia.tar zskp 3 211947520 64746933 3000 67.37
|
||||
silesia.tar zskp 4 211947520 60073508 16926 11.94
|
||||
|
||||
cgo zstd:
|
||||
silesia.tar zstd 1 211947520 73605392 543 371.56
|
||||
@ -165,94 +165,94 @@ silesia.tar zstd 6 211947520 62916450 1913 105.66
|
||||
silesia.tar zstd 9 211947520 60212393 5063 39.92
|
||||
|
||||
gzip, stdlib/this package:
|
||||
silesia.tar gzstd 1 211947520 80007735 1654 122.21
|
||||
silesia.tar gzkp 1 211947520 80136201 1152 175.45
|
||||
silesia.tar gzstd 1 211947520 80007735 1498 134.87
|
||||
silesia.tar gzkp 1 211947520 80088272 1009 200.31
|
||||
|
||||
GOB stream of binary data. Highly compressible.
|
||||
https://files.klauspost.com/compress/gob-stream.7z
|
||||
|
||||
file out level insize outsize millis mb/s
|
||||
gob-stream zskp 1 1911399616 235022249 3088 590.30
|
||||
gob-stream zskp 2 1911399616 205669791 3786 481.34
|
||||
gob-stream zskp 3 1911399616 175034659 9636 189.17
|
||||
gob-stream zskp 4 1911399616 165609838 50369 36.19
|
||||
gob-stream zskp 1 1911399616 233948096 3230 564.34
|
||||
gob-stream zskp 2 1911399616 203997694 4997 364.73
|
||||
gob-stream zskp 3 1911399616 173526523 13435 135.68
|
||||
gob-stream zskp 4 1911399616 162195235 47559 38.33
|
||||
|
||||
gob-stream zstd 1 1911399616 249810424 2637 691.26
|
||||
gob-stream zstd 3 1911399616 208192146 3490 522.31
|
||||
gob-stream zstd 6 1911399616 193632038 6687 272.56
|
||||
gob-stream zstd 9 1911399616 177620386 16175 112.70
|
||||
|
||||
gob-stream gzstd 1 1911399616 357382641 10251 177.82
|
||||
gob-stream gzkp 1 1911399616 359753026 5438 335.20
|
||||
gob-stream gzstd 1 1911399616 357382013 9046 201.49
|
||||
gob-stream gzkp 1 1911399616 359136669 4885 373.08
|
||||
|
||||
The test data for the Large Text Compression Benchmark is the first
|
||||
10^9 bytes of the English Wikipedia dump on Mar. 3, 2006.
|
||||
http://mattmahoney.net/dc/textdata.html
|
||||
|
||||
file out level insize outsize millis mb/s
|
||||
enwik9 zskp 1 1000000000 343848582 3609 264.18
|
||||
enwik9 zskp 2 1000000000 317276632 5746 165.97
|
||||
enwik9 zskp 3 1000000000 292243069 12162 78.41
|
||||
enwik9 zskp 4 1000000000 262183768 82837 11.51
|
||||
enwik9 zskp 1 1000000000 343833605 3687 258.64
|
||||
enwik9 zskp 2 1000000000 317001237 7672 124.29
|
||||
enwik9 zskp 3 1000000000 291915823 15923 59.89
|
||||
enwik9 zskp 4 1000000000 261710291 77697 12.27
|
||||
|
||||
enwik9 zstd 1 1000000000 358072021 3110 306.65
|
||||
enwik9 zstd 3 1000000000 313734672 4784 199.35
|
||||
enwik9 zstd 6 1000000000 295138875 10290 92.68
|
||||
enwik9 zstd 9 1000000000 278348700 28549 33.40
|
||||
|
||||
enwik9 gzstd 1 1000000000 382578136 9604 99.30
|
||||
enwik9 gzkp 1 1000000000 383825945 6544 145.73
|
||||
enwik9 gzstd 1 1000000000 382578136 8608 110.78
|
||||
enwik9 gzkp 1 1000000000 382781160 5628 169.45
|
||||
|
||||
Highly compressible JSON file.
|
||||
https://files.klauspost.com/compress/github-june-2days-2019.json.zst
|
||||
|
||||
file out level insize outsize millis mb/s
|
||||
github-june-2days-2019.json zskp 1 6273951764 699045015 10620 563.40
|
||||
github-june-2days-2019.json zskp 2 6273951764 617881763 11687 511.96
|
||||
github-june-2days-2019.json zskp 3 6273951764 524340691 34043 175.75
|
||||
github-june-2days-2019.json zskp 4 6273951764 470320075 170190 35.16
|
||||
github-june-2days-2019.json zskp 1 6273951764 697439532 9789 611.17
|
||||
github-june-2days-2019.json zskp 2 6273951764 610876538 18553 322.49
|
||||
github-june-2days-2019.json zskp 3 6273951764 517662858 44186 135.41
|
||||
github-june-2days-2019.json zskp 4 6273951764 464617114 165373 36.18
|
||||
|
||||
github-june-2days-2019.json zstd 1 6273951764 766284037 8450 708.00
|
||||
github-june-2days-2019.json zstd 3 6273951764 661889476 10927 547.57
|
||||
github-june-2days-2019.json zstd 6 6273951764 642756859 22996 260.18
|
||||
github-june-2days-2019.json zstd 9 6273951764 601974523 52413 114.16
|
||||
|
||||
github-june-2days-2019.json gzstd 1 6273951764 1164400847 29948 199.79
|
||||
github-june-2days-2019.json gzkp 1 6273951764 1125417694 21788 274.61
|
||||
github-june-2days-2019.json gzstd 1 6273951764 1164397768 26793 223.32
|
||||
github-june-2days-2019.json gzkp 1 6273951764 1120631856 17693 338.16
|
||||
|
||||
VM Image, Linux mint with a few installed applications:
|
||||
https://files.klauspost.com/compress/rawstudio-mint14.7z
|
||||
|
||||
file out level insize outsize millis mb/s
|
||||
rawstudio-mint14.tar zskp 1 8558382592 3667489370 20210 403.84
|
||||
rawstudio-mint14.tar zskp 2 8558382592 3364592300 31873 256.07
|
||||
rawstudio-mint14.tar zskp 3 8558382592 3158085214 77675 105.08
|
||||
rawstudio-mint14.tar zskp 4 8558382592 2965110639 857750 9.52
|
||||
rawstudio-mint14.tar zskp 1 8558382592 3718400221 18206 448.29
|
||||
rawstudio-mint14.tar zskp 2 8558382592 3326118337 37074 220.15
|
||||
rawstudio-mint14.tar zskp 3 8558382592 3163842361 87306 93.49
|
||||
rawstudio-mint14.tar zskp 4 8558382592 2970480650 783862 10.41
|
||||
|
||||
rawstudio-mint14.tar zstd 1 8558382592 3609250104 17136 476.27
|
||||
rawstudio-mint14.tar zstd 3 8558382592 3341679997 29262 278.92
|
||||
rawstudio-mint14.tar zstd 6 8558382592 3235846406 77904 104.77
|
||||
rawstudio-mint14.tar zstd 9 8558382592 3160778861 140946 57.91
|
||||
|
||||
rawstudio-mint14.tar gzstd 1 8558382592 3926257486 57722 141.40
|
||||
rawstudio-mint14.tar gzkp 1 8558382592 3962605659 45113 180.92
|
||||
rawstudio-mint14.tar gzstd 1 8558382592 3926234992 51345 158.96
|
||||
rawstudio-mint14.tar gzkp 1 8558382592 3960117298 36722 222.26
|
||||
|
||||
CSV data:
|
||||
https://files.klauspost.com/compress/nyc-taxi-data-10M.csv.zst
|
||||
|
||||
file out level insize outsize millis mb/s
|
||||
nyc-taxi-data-10M.csv zskp 1 3325605752 641339945 8925 355.35
|
||||
nyc-taxi-data-10M.csv zskp 2 3325605752 591748091 11268 281.44
|
||||
nyc-taxi-data-10M.csv zskp 3 3325605752 530289687 25239 125.66
|
||||
nyc-taxi-data-10M.csv zskp 4 3325605752 476268884 135958 23.33
|
||||
nyc-taxi-data-10M.csv zskp 1 3325605752 641319332 9462 335.17
|
||||
nyc-taxi-data-10M.csv zskp 2 3325605752 588976126 17570 180.50
|
||||
nyc-taxi-data-10M.csv zskp 3 3325605752 529329260 32432 97.79
|
||||
nyc-taxi-data-10M.csv zskp 4 3325605752 474949772 138025 22.98
|
||||
|
||||
nyc-taxi-data-10M.csv zstd 1 3325605752 687399637 8233 385.18
|
||||
nyc-taxi-data-10M.csv zstd 3 3325605752 598514411 10065 315.07
|
||||
nyc-taxi-data-10M.csv zstd 6 3325605752 570522953 20038 158.27
|
||||
nyc-taxi-data-10M.csv zstd 9 3325605752 517554797 64565 49.12
|
||||
|
||||
nyc-taxi-data-10M.csv gzstd 1 3325605752 928656485 23876 132.83
|
||||
nyc-taxi-data-10M.csv gzkp 1 3325605752 922257165 16780 189.00
|
||||
nyc-taxi-data-10M.csv gzstd 1 3325605752 928654908 21270 149.11
|
||||
nyc-taxi-data-10M.csv gzkp 1 3325605752 922273214 13929 227.68
|
||||
```
|
||||
|
||||
## Decompressor
|
||||
|
18
vendor/github.com/klauspost/compress/zstd/blockdec.go
generated
vendored
18
vendor/github.com/klauspost/compress/zstd/blockdec.go
generated
vendored
@ -167,6 +167,11 @@ func (b *blockDec) reset(br byteBuffer, windowSize uint64) error {
|
||||
}
|
||||
return ErrCompressedSizeTooBig
|
||||
}
|
||||
// Empty compressed blocks must at least be 2 bytes
|
||||
// for Literals_Block_Type and one for Sequences_Section_Header.
|
||||
if cSize < 2 {
|
||||
return ErrBlockTooSmall
|
||||
}
|
||||
case blockTypeRaw:
|
||||
if cSize > maxCompressedBlockSize || cSize > int(b.WindowSize) {
|
||||
if debugDecoder {
|
||||
@ -491,6 +496,9 @@ func (b *blockDec) decodeCompressed(hist *history) error {
|
||||
}
|
||||
|
||||
func (b *blockDec) prepareSequences(in []byte, hist *history) (err error) {
|
||||
if debugDecoder {
|
||||
printf("prepareSequences: %d byte(s) input\n", len(in))
|
||||
}
|
||||
// Decode Sequences
|
||||
// https://github.com/facebook/zstd/blob/dev/doc/zstd_compression_format.md#sequences-section
|
||||
if len(in) < 1 {
|
||||
@ -499,8 +507,6 @@ func (b *blockDec) prepareSequences(in []byte, hist *history) (err error) {
|
||||
var nSeqs int
|
||||
seqHeader := in[0]
|
||||
switch {
|
||||
case seqHeader == 0:
|
||||
in = in[1:]
|
||||
case seqHeader < 128:
|
||||
nSeqs = int(seqHeader)
|
||||
in = in[1:]
|
||||
@ -517,6 +523,13 @@ func (b *blockDec) prepareSequences(in []byte, hist *history) (err error) {
|
||||
nSeqs = 0x7f00 + int(in[1]) + (int(in[2]) << 8)
|
||||
in = in[3:]
|
||||
}
|
||||
if nSeqs == 0 && len(in) != 0 {
|
||||
// When no sequences, there should not be any more data...
|
||||
if debugDecoder {
|
||||
printf("prepareSequences: 0 sequences, but %d byte(s) left on stream\n", len(in))
|
||||
}
|
||||
return ErrUnexpectedBlockSize
|
||||
}
|
||||
|
||||
var seqs = &hist.decoders
|
||||
seqs.nSeqs = nSeqs
|
||||
@ -635,6 +648,7 @@ func (b *blockDec) decodeSequences(hist *history) error {
|
||||
hist.decoders.seqSize = len(hist.decoders.literals)
|
||||
return nil
|
||||
}
|
||||
hist.decoders.windowSize = hist.windowSize
|
||||
hist.decoders.prevOffset = hist.recentOffsets
|
||||
err := hist.decoders.decode(b.sequence)
|
||||
hist.recentOffsets = hist.decoders.prevOffset
|
||||
|
13
vendor/github.com/klauspost/compress/zstd/decoder.go
generated
vendored
13
vendor/github.com/klauspost/compress/zstd/decoder.go
generated
vendored
@ -348,10 +348,10 @@ func (d *Decoder) DecodeAll(input, dst []byte) ([]byte, error) {
|
||||
frame.history.setDict(&dict)
|
||||
}
|
||||
|
||||
if frame.FrameContentSize > d.o.maxDecodedSize-uint64(len(dst)) {
|
||||
if frame.FrameContentSize != fcsUnknown && frame.FrameContentSize > d.o.maxDecodedSize-uint64(len(dst)) {
|
||||
return dst, ErrDecoderSizeExceeded
|
||||
}
|
||||
if frame.FrameContentSize > 0 && frame.FrameContentSize < 1<<30 {
|
||||
if frame.FrameContentSize < 1<<30 {
|
||||
// Never preallocate more than 1 GB up front.
|
||||
if cap(dst)-len(dst) < int(frame.FrameContentSize) {
|
||||
dst2 := make([]byte, len(dst), len(dst)+int(frame.FrameContentSize))
|
||||
@ -514,7 +514,7 @@ func (d *Decoder) nextBlockSync() (ok bool) {
|
||||
|
||||
// Check frame size (before CRC)
|
||||
d.syncStream.decodedFrame += uint64(len(d.current.b))
|
||||
if d.frame.FrameContentSize > 0 && d.syncStream.decodedFrame > d.frame.FrameContentSize {
|
||||
if d.syncStream.decodedFrame > d.frame.FrameContentSize {
|
||||
if debugDecoder {
|
||||
printf("DecodedFrame (%d) > FrameContentSize (%d)\n", d.syncStream.decodedFrame, d.frame.FrameContentSize)
|
||||
}
|
||||
@ -523,7 +523,7 @@ func (d *Decoder) nextBlockSync() (ok bool) {
|
||||
}
|
||||
|
||||
// Check FCS
|
||||
if d.current.d.Last && d.frame.FrameContentSize > 0 && d.syncStream.decodedFrame != d.frame.FrameContentSize {
|
||||
if d.current.d.Last && d.frame.FrameContentSize != fcsUnknown && d.syncStream.decodedFrame != d.frame.FrameContentSize {
|
||||
if debugDecoder {
|
||||
printf("DecodedFrame (%d) != FrameContentSize (%d)\n", d.syncStream.decodedFrame, d.frame.FrameContentSize)
|
||||
}
|
||||
@ -700,6 +700,7 @@ func (d *Decoder) startStreamDecoder(ctx context.Context, r io.Reader, output ch
|
||||
}
|
||||
hist.decoders = block.async.newHist.decoders
|
||||
hist.recentOffsets = block.async.newHist.recentOffsets
|
||||
hist.windowSize = block.async.newHist.windowSize
|
||||
if block.async.newHist.dict != nil {
|
||||
hist.setDict(block.async.newHist.dict)
|
||||
}
|
||||
@ -811,11 +812,11 @@ func (d *Decoder) startStreamDecoder(ctx context.Context, r io.Reader, output ch
|
||||
}
|
||||
if !hasErr {
|
||||
decodedFrame += uint64(len(do.b))
|
||||
if fcs > 0 && decodedFrame > fcs {
|
||||
if decodedFrame > fcs {
|
||||
println("fcs exceeded", block.Last, fcs, decodedFrame)
|
||||
do.err = ErrFrameSizeExceeded
|
||||
hasErr = true
|
||||
} else if block.Last && fcs > 0 && decodedFrame != fcs {
|
||||
} else if block.Last && fcs != fcsUnknown && decodedFrame != fcs {
|
||||
do.err = ErrFrameSizeMismatch
|
||||
hasErr = true
|
||||
} else {
|
||||
|
13
vendor/github.com/klauspost/compress/zstd/framedec.go
generated
vendored
13
vendor/github.com/klauspost/compress/zstd/framedec.go
generated
vendored
@ -197,7 +197,7 @@ func (d *frameDec) reset(br byteBuffer) error {
|
||||
default:
|
||||
fcsSize = 1 << v
|
||||
}
|
||||
d.FrameContentSize = 0
|
||||
d.FrameContentSize = fcsUnknown
|
||||
if fcsSize > 0 {
|
||||
b, err := br.readSmall(fcsSize)
|
||||
if err != nil {
|
||||
@ -343,12 +343,7 @@ func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) {
|
||||
err = ErrDecoderSizeExceeded
|
||||
break
|
||||
}
|
||||
if d.SingleSegment && uint64(len(d.history.b)) > d.o.maxDecodedSize {
|
||||
println("runDecoder: single segment and", uint64(len(d.history.b)), ">", d.o.maxDecodedSize)
|
||||
err = ErrFrameSizeExceeded
|
||||
break
|
||||
}
|
||||
if d.FrameContentSize > 0 && uint64(len(d.history.b)-crcStart) > d.FrameContentSize {
|
||||
if uint64(len(d.history.b)-crcStart) > d.FrameContentSize {
|
||||
println("runDecoder: FrameContentSize exceeded", uint64(len(d.history.b)-crcStart), ">", d.FrameContentSize)
|
||||
err = ErrFrameSizeExceeded
|
||||
break
|
||||
@ -356,13 +351,13 @@ func (d *frameDec) runDecoder(dst []byte, dec *blockDec) ([]byte, error) {
|
||||
if dec.Last {
|
||||
break
|
||||
}
|
||||
if debugDecoder && d.FrameContentSize > 0 {
|
||||
if debugDecoder {
|
||||
println("runDecoder: FrameContentSize", uint64(len(d.history.b)-crcStart), "<=", d.FrameContentSize)
|
||||
}
|
||||
}
|
||||
dst = d.history.b
|
||||
if err == nil {
|
||||
if d.FrameContentSize > 0 && uint64(len(d.history.b)-crcStart) != d.FrameContentSize {
|
||||
if d.FrameContentSize != fcsUnknown && uint64(len(d.history.b)-crcStart) != d.FrameContentSize {
|
||||
err = ErrFrameSizeMismatch
|
||||
} else if d.HasCheckSum {
|
||||
var n int
|
||||
|
4
vendor/github.com/klauspost/compress/zstd/fuzz.go
generated
vendored
4
vendor/github.com/klauspost/compress/zstd/fuzz.go
generated
vendored
@ -1,5 +1,5 @@
|
||||
//go:build gofuzz
|
||||
// +build gofuzz
|
||||
//go:build ignorecrc
|
||||
// +build ignorecrc
|
||||
|
||||
// Copyright 2019+ Klaus Post. All rights reserved.
|
||||
// License information can be found in the LICENSE file.
|
||||
|
4
vendor/github.com/klauspost/compress/zstd/fuzz_none.go
generated
vendored
4
vendor/github.com/klauspost/compress/zstd/fuzz_none.go
generated
vendored
@ -1,5 +1,5 @@
|
||||
//go:build !gofuzz
|
||||
// +build !gofuzz
|
||||
//go:build !ignorecrc
|
||||
// +build !ignorecrc
|
||||
|
||||
// Copyright 2019+ Klaus Post. All rights reserved.
|
||||
// License information can be found in the LICENSE file.
|
||||
|
20
vendor/github.com/klauspost/compress/zstd/seqdec.go
generated
vendored
20
vendor/github.com/klauspost/compress/zstd/seqdec.go
generated
vendored
@ -107,7 +107,10 @@ func (s *sequenceDecs) decode(seqs []seqVals) error {
|
||||
llState, mlState, ofState := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state
|
||||
s.seqSize = 0
|
||||
litRemain := len(s.literals)
|
||||
|
||||
maxBlockSize := maxCompressedBlockSize
|
||||
if s.windowSize < maxBlockSize {
|
||||
maxBlockSize = s.windowSize
|
||||
}
|
||||
for i := range seqs {
|
||||
var ll, mo, ml int
|
||||
if br.off > 4+((maxOffsetBits+16+16)>>3) {
|
||||
@ -192,7 +195,7 @@ func (s *sequenceDecs) decode(seqs []seqVals) error {
|
||||
}
|
||||
s.seqSize += ll + ml
|
||||
if s.seqSize > maxBlockSize {
|
||||
return fmt.Errorf("output (%d) bigger than max block size", s.seqSize)
|
||||
return fmt.Errorf("output (%d) bigger than max block size (%d)", s.seqSize, maxBlockSize)
|
||||
}
|
||||
litRemain -= ll
|
||||
if litRemain < 0 {
|
||||
@ -230,7 +233,7 @@ func (s *sequenceDecs) decode(seqs []seqVals) error {
|
||||
}
|
||||
s.seqSize += litRemain
|
||||
if s.seqSize > maxBlockSize {
|
||||
return fmt.Errorf("output (%d) bigger than max block size", s.seqSize)
|
||||
return fmt.Errorf("output (%d) bigger than max block size (%d)", s.seqSize, maxBlockSize)
|
||||
}
|
||||
err := br.close()
|
||||
if err != nil {
|
||||
@ -347,6 +350,10 @@ func (s *sequenceDecs) decodeSync(history *history) error {
|
||||
llState, mlState, ofState := s.litLengths.state.state, s.matchLengths.state.state, s.offsets.state.state
|
||||
hist := history.b[history.ignoreBuffer:]
|
||||
out := s.out
|
||||
maxBlockSize := maxCompressedBlockSize
|
||||
if s.windowSize < maxBlockSize {
|
||||
maxBlockSize = s.windowSize
|
||||
}
|
||||
|
||||
for i := seqs - 1; i >= 0; i-- {
|
||||
if br.overread() {
|
||||
@ -426,7 +433,7 @@ func (s *sequenceDecs) decodeSync(history *history) error {
|
||||
}
|
||||
size := ll + ml + len(out)
|
||||
if size-startSize > maxBlockSize {
|
||||
return fmt.Errorf("output (%d) bigger than max block size", size)
|
||||
return fmt.Errorf("output (%d) bigger than max block size (%d)", size, maxBlockSize)
|
||||
}
|
||||
if size > cap(out) {
|
||||
// Not enough size, which can happen under high volume block streaming conditions
|
||||
@ -535,6 +542,11 @@ func (s *sequenceDecs) decodeSync(history *history) error {
|
||||
}
|
||||
}
|
||||
|
||||
// Check if space for literals
|
||||
if len(s.literals)+len(s.out)-startSize > maxBlockSize {
|
||||
return fmt.Errorf("output (%d) bigger than max block size (%d)", len(s.out), maxBlockSize)
|
||||
}
|
||||
|
||||
// Add final literals
|
||||
s.out = append(out, s.literals...)
|
||||
return br.close()
|
||||
|
18
vendor/github.com/klauspost/compress/zstd/zip.go
generated
vendored
18
vendor/github.com/klauspost/compress/zstd/zip.go
generated
vendored
@ -20,7 +20,7 @@ const ZipMethodPKWare = 20
|
||||
|
||||
var zipReaderPool sync.Pool
|
||||
|
||||
// newZipReader cannot be used since we would leak goroutines...
|
||||
// newZipReader creates a pooled zip decompressor.
|
||||
func newZipReader(r io.Reader) io.ReadCloser {
|
||||
dec, ok := zipReaderPool.Get().(*Decoder)
|
||||
if ok {
|
||||
@ -44,10 +44,14 @@ func (r *pooledZipReader) Read(p []byte) (n int, err error) {
|
||||
r.mu.Lock()
|
||||
defer r.mu.Unlock()
|
||||
if r.dec == nil {
|
||||
return 0, errors.New("Read after Close")
|
||||
return 0, errors.New("read after close or EOF")
|
||||
}
|
||||
dec, err := r.dec.Read(p)
|
||||
|
||||
if err == io.EOF {
|
||||
err = r.dec.Reset(nil)
|
||||
zipReaderPool.Put(r.dec)
|
||||
r.dec = nil
|
||||
}
|
||||
return dec, err
|
||||
}
|
||||
|
||||
@ -112,11 +116,5 @@ func ZipCompressor(opts ...EOption) func(w io.Writer) (io.WriteCloser, error) {
|
||||
// ZipDecompressor returns a decompressor that can be registered with zip libraries.
|
||||
// See ZipCompressor for example.
|
||||
func ZipDecompressor() func(r io.Reader) io.ReadCloser {
|
||||
return func(r io.Reader) io.ReadCloser {
|
||||
d, err := NewReader(r, WithDecoderConcurrency(1), WithDecoderLowmem(true))
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return d.IOReadCloser()
|
||||
}
|
||||
return newZipReader
|
||||
}
|
||||
|
7
vendor/github.com/klauspost/compress/zstd/zstd.go
generated
vendored
7
vendor/github.com/klauspost/compress/zstd/zstd.go
generated
vendored
@ -39,6 +39,9 @@ const zstdMinMatch = 3
|
||||
// Reset the buffer offset when reaching this.
|
||||
const bufferReset = math.MaxInt32 - MaxWindowSize
|
||||
|
||||
// fcsUnknown is used for unknown frame content size.
|
||||
const fcsUnknown = math.MaxUint64
|
||||
|
||||
var (
|
||||
// ErrReservedBlockType is returned when a reserved block type is found.
|
||||
// Typically this indicates wrong or corrupted input.
|
||||
@ -52,6 +55,10 @@ var (
|
||||
// Typically returned on invalid input.
|
||||
ErrBlockTooSmall = errors.New("block too small")
|
||||
|
||||
// ErrUnexpectedBlockSize is returned when a block has unexpected size.
|
||||
// Typically returned on invalid input.
|
||||
ErrUnexpectedBlockSize = errors.New("unexpected block size")
|
||||
|
||||
// ErrMagicMismatch is returned when a "magic" number isn't what is expected.
|
||||
// Typically this indicates wrong or corrupted input.
|
||||
ErrMagicMismatch = errors.New("invalid input: magic number mismatch")
|
||||
|
14
vendor/github.com/miekg/pkcs11/.travis.yml
generated
vendored
14
vendor/github.com/miekg/pkcs11/.travis.yml
generated
vendored
@ -1,14 +0,0 @@
|
||||
language: go
|
||||
sudo: required
|
||||
dist: trusty
|
||||
|
||||
go:
|
||||
- 1.9
|
||||
- tip
|
||||
|
||||
script:
|
||||
- go test -v ./...
|
||||
|
||||
before_script:
|
||||
- sudo apt-get update
|
||||
- sudo apt-get -y install libsofthsm
|
10
vendor/github.com/miekg/pkcs11/README.md
generated
vendored
10
vendor/github.com/miekg/pkcs11/README.md
generated
vendored
@ -1,6 +1,6 @@
|
||||
# PKCS#11 [](https://travis-ci.org/miekg/pkcs11) [](http://godoc.org/github.com/miekg/pkcs11)
|
||||
# PKCS#11
|
||||
|
||||
This is a Go implementation of the PKCS#11 API. It wraps the library closely, but uses Go idiom were
|
||||
This is a Go implementation of the PKCS#11 API. It wraps the library closely, but uses Go idiom where
|
||||
it makes sense. It has been tested with SoftHSM.
|
||||
|
||||
## SoftHSM
|
||||
@ -13,10 +13,10 @@ it makes sense. It has been tested with SoftHSM.
|
||||
softhsm --init-token --slot 0 --label test --pin 1234
|
||||
~~~
|
||||
|
||||
* Then use `libsofthsm.so` as the pkcs11 module:
|
||||
* Then use `libsofthsm2.so` as the pkcs11 module:
|
||||
|
||||
~~~ go
|
||||
p := pkcs11.New("/usr/lib/softhsm/libsofthsm.so")
|
||||
p := pkcs11.New("/usr/lib/softhsm/libsofthsm2.so")
|
||||
~~~
|
||||
|
||||
## Examples
|
||||
@ -24,7 +24,7 @@ it makes sense. It has been tested with SoftHSM.
|
||||
A skeleton program would look somewhat like this (yes, pkcs#11 is verbose):
|
||||
|
||||
~~~ go
|
||||
p := pkcs11.New("/usr/lib/softhsm/libsofthsm.so")
|
||||
p := pkcs11.New("/usr/lib/softhsm/libsofthsm2.so")
|
||||
err := p.Initialize()
|
||||
if err != nil {
|
||||
panic(err)
|
||||
|
11
vendor/github.com/miekg/pkcs11/pkcs11.go
generated
vendored
11
vendor/github.com/miekg/pkcs11/pkcs11.go
generated
vendored
@ -2,6 +2,8 @@
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:generate go run const_generate.go
|
||||
|
||||
// Package pkcs11 is a wrapper around the PKCS#11 cryptographic library.
|
||||
package pkcs11
|
||||
|
||||
@ -14,7 +16,7 @@ package pkcs11
|
||||
#cgo windows CFLAGS: -DPACKED_STRUCTURES
|
||||
#cgo linux LDFLAGS: -ldl
|
||||
#cgo darwin LDFLAGS: -ldl
|
||||
#cgo openbsd LDFLAGS: -ldl
|
||||
#cgo openbsd LDFLAGS:
|
||||
#cgo freebsd LDFLAGS: -ldl
|
||||
|
||||
#include <stdlib.h>
|
||||
@ -770,9 +772,10 @@ static inline CK_VOID_PTR getAttributePval(CK_ATTRIBUTE_PTR a)
|
||||
|
||||
*/
|
||||
import "C"
|
||||
import "strings"
|
||||
|
||||
import "unsafe"
|
||||
import (
|
||||
"strings"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// Ctx contains the current pkcs11 context.
|
||||
type Ctx struct {
|
||||
|
3
vendor/github.com/miekg/pkcs11/release.go
generated
vendored
3
vendor/github.com/miekg/pkcs11/release.go
generated
vendored
@ -1,3 +1,4 @@
|
||||
//go:build release
|
||||
// +build release
|
||||
|
||||
package pkcs11
|
||||
@ -5,7 +6,7 @@ package pkcs11
|
||||
import "fmt"
|
||||
|
||||
// Release is current version of the pkcs11 library.
|
||||
var Release = R{1, 0, 3}
|
||||
var Release = R{1, 1, 1}
|
||||
|
||||
// R holds the version of this library.
|
||||
type R struct {
|
||||
|
12
vendor/github.com/miekg/pkcs11/types.go
generated
vendored
12
vendor/github.com/miekg/pkcs11/types.go
generated
vendored
@ -182,8 +182,20 @@ func NewAttribute(typ uint, x interface{}) *Attribute {
|
||||
}
|
||||
case int:
|
||||
a.Value = uintToBytes(uint64(v))
|
||||
case int16:
|
||||
a.Value = uintToBytes(uint64(v))
|
||||
case int32:
|
||||
a.Value = uintToBytes(uint64(v))
|
||||
case int64:
|
||||
a.Value = uintToBytes(uint64(v))
|
||||
case uint:
|
||||
a.Value = uintToBytes(uint64(v))
|
||||
case uint16:
|
||||
a.Value = uintToBytes(uint64(v))
|
||||
case uint32:
|
||||
a.Value = uintToBytes(uint64(v))
|
||||
case uint64:
|
||||
a.Value = uintToBytes(uint64(v))
|
||||
case string:
|
||||
a.Value = []byte(v)
|
||||
case []byte:
|
||||
|
196
vendor/github.com/miekg/pkcs11/const.go → vendor/github.com/miekg/pkcs11/zconst.go
generated
vendored
196
vendor/github.com/miekg/pkcs11/const.go → vendor/github.com/miekg/pkcs11/zconst.go
generated
vendored
@ -2,48 +2,18 @@
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Code generated by "go run const_generate.go"; DO NOT EDIT.
|
||||
|
||||
package pkcs11
|
||||
|
||||
const (
|
||||
CKU_SO uint = 0
|
||||
CKU_USER uint = 1
|
||||
CKU_CONTEXT_SPECIFIC uint = 2
|
||||
)
|
||||
|
||||
const (
|
||||
CKO_DATA uint = 0x00000000
|
||||
CKO_CERTIFICATE uint = 0x00000001
|
||||
CKO_PUBLIC_KEY uint = 0x00000002
|
||||
CKO_PRIVATE_KEY uint = 0x00000003
|
||||
CKO_SECRET_KEY uint = 0x00000004
|
||||
CKO_HW_FEATURE uint = 0x00000005
|
||||
CKO_DOMAIN_PARAMETERS uint = 0x00000006
|
||||
CKO_MECHANISM uint = 0x00000007
|
||||
CKO_OTP_KEY uint = 0x00000008
|
||||
CKO_VENDOR_DEFINED uint = 0x80000000
|
||||
)
|
||||
|
||||
const (
|
||||
CKG_MGF1_SHA1 uint = 0x00000001
|
||||
CKG_MGF1_SHA224 uint = 0x00000005
|
||||
CKG_MGF1_SHA256 uint = 0x00000002
|
||||
CKG_MGF1_SHA384 uint = 0x00000003
|
||||
CKG_MGF1_SHA512 uint = 0x00000004
|
||||
CKG_MGF1_SHA3_224 uint = 0x00000006
|
||||
CKG_MGF1_SHA3_256 uint = 0x00000007
|
||||
CKG_MGF1_SHA3_384 uint = 0x00000008
|
||||
CKG_MGF1_SHA3_512 uint = 0x00000009
|
||||
)
|
||||
|
||||
const (
|
||||
CKZ_DATA_SPECIFIED uint = 0x00000001
|
||||
)
|
||||
|
||||
// Generated with: awk '/#define CK[AFKMRC]/{ print $2 " = " $3 }' pkcs11t.h | sed -e 's/UL$//g' -e 's/UL)$/)/g'
|
||||
|
||||
// All the flag (CKF_), attribute (CKA_), error code (CKR_), key type (CKK_), certificate type (CKC_) and
|
||||
// mechanism (CKM_) constants as defined in PKCS#11.
|
||||
const (
|
||||
CK_TRUE = 1
|
||||
CK_FALSE = 0
|
||||
CK_UNAVAILABLE_INFORMATION = ^uint(0)
|
||||
CK_EFFECTIVELY_INFINITE = 0
|
||||
CK_INVALID_HANDLE = 0
|
||||
CKN_SURRENDER = 0
|
||||
CKN_OTP_CHANGED = 1
|
||||
CKF_TOKEN_PRESENT = 0x00000001
|
||||
CKF_REMOVABLE_DEVICE = 0x00000002
|
||||
CKF_HW_SLOT = 0x00000004
|
||||
@ -66,12 +36,34 @@ const (
|
||||
CKF_SO_PIN_LOCKED = 0x00400000
|
||||
CKF_SO_PIN_TO_BE_CHANGED = 0x00800000
|
||||
CKF_ERROR_STATE = 0x01000000
|
||||
CKU_SO = 0
|
||||
CKU_USER = 1
|
||||
CKU_CONTEXT_SPECIFIC = 2
|
||||
CKS_RO_PUBLIC_SESSION = 0
|
||||
CKS_RO_USER_FUNCTIONS = 1
|
||||
CKS_RW_PUBLIC_SESSION = 2
|
||||
CKS_RW_USER_FUNCTIONS = 3
|
||||
CKS_RW_SO_FUNCTIONS = 4
|
||||
CKF_RW_SESSION = 0x00000002
|
||||
CKF_SERIAL_SESSION = 0x00000004
|
||||
CKO_DATA = 0x00000000
|
||||
CKO_CERTIFICATE = 0x00000001
|
||||
CKO_PUBLIC_KEY = 0x00000002
|
||||
CKO_PRIVATE_KEY = 0x00000003
|
||||
CKO_SECRET_KEY = 0x00000004
|
||||
CKO_HW_FEATURE = 0x00000005
|
||||
CKO_DOMAIN_PARAMETERS = 0x00000006
|
||||
CKO_MECHANISM = 0x00000007
|
||||
CKO_OTP_KEY = 0x00000008
|
||||
CKO_VENDOR_DEFINED = 0x80000000
|
||||
CKH_MONOTONIC_COUNTER = 0x00000001
|
||||
CKH_CLOCK = 0x00000002
|
||||
CKH_USER_INTERFACE = 0x00000003
|
||||
CKH_VENDOR_DEFINED = 0x80000000
|
||||
CKK_RSA = 0x00000000
|
||||
CKK_DSA = 0x00000001
|
||||
CKK_DH = 0x00000002
|
||||
CKK_ECDSA = 0x00000003
|
||||
CKK_ECDSA = 0x00000003 // Deprecated
|
||||
CKK_EC = 0x00000003
|
||||
CKK_X9_42_DH = 0x00000004
|
||||
CKK_KEA = 0x00000005
|
||||
@ -83,7 +75,7 @@ const (
|
||||
CKK_DES3 = 0x00000015
|
||||
CKK_CAST = 0x00000016
|
||||
CKK_CAST3 = 0x00000017
|
||||
CKK_CAST5 = 0x00000018
|
||||
CKK_CAST5 = 0x00000018 // Deprecated
|
||||
CKK_CAST128 = 0x00000018
|
||||
CKK_RC5 = 0x00000019
|
||||
CKK_IDEA = 0x0000001A
|
||||
@ -99,14 +91,14 @@ const (
|
||||
CKK_ACTI = 0x00000024
|
||||
CKK_CAMELLIA = 0x00000025
|
||||
CKK_ARIA = 0x00000026
|
||||
CKK_SHA512_224_HMAC = 0x00000027
|
||||
CKK_SHA512_256_HMAC = 0x00000028
|
||||
CKK_SHA512_T_HMAC = 0x00000029
|
||||
CKK_MD5_HMAC = 0x00000027
|
||||
CKK_SHA_1_HMAC = 0x00000028
|
||||
CKK_SHA224_HMAC = 0x0000002E
|
||||
CKK_RIPEMD128_HMAC = 0x00000029
|
||||
CKK_RIPEMD160_HMAC = 0x0000002A
|
||||
CKK_SHA256_HMAC = 0x0000002B
|
||||
CKK_SHA384_HMAC = 0x0000002C
|
||||
CKK_SHA512_HMAC = 0x0000002D
|
||||
CKK_SHA224_HMAC = 0x0000002E
|
||||
CKK_SEED = 0x0000002F
|
||||
CKK_GOSTR3410 = 0x00000030
|
||||
CKK_GOSTR3411 = 0x00000031
|
||||
@ -116,11 +108,26 @@ const (
|
||||
CKK_SHA3_384_HMAC = 0x00000035
|
||||
CKK_SHA3_512_HMAC = 0x00000036
|
||||
CKK_VENDOR_DEFINED = 0x80000000
|
||||
CK_CERTIFICATE_CATEGORY_UNSPECIFIED = 0
|
||||
CK_CERTIFICATE_CATEGORY_TOKEN_USER = 1
|
||||
CK_CERTIFICATE_CATEGORY_AUTHORITY = 2
|
||||
CK_CERTIFICATE_CATEGORY_OTHER_ENTITY = 3
|
||||
CK_SECURITY_DOMAIN_UNSPECIFIED = 0
|
||||
CK_SECURITY_DOMAIN_MANUFACTURER = 1
|
||||
CK_SECURITY_DOMAIN_OPERATOR = 2
|
||||
CK_SECURITY_DOMAIN_THIRD_PARTY = 3
|
||||
CKC_X_509 = 0x00000000
|
||||
CKC_X_509_ATTR_CERT = 0x00000001
|
||||
CKC_WTLS = 0x00000002
|
||||
CKC_VENDOR_DEFINED = 0x80000000
|
||||
CKF_ARRAY_ATTRIBUTE = 0x40000000
|
||||
CK_OTP_FORMAT_DECIMAL = 0
|
||||
CK_OTP_FORMAT_HEXADECIMAL = 1
|
||||
CK_OTP_FORMAT_ALPHANUMERIC = 2
|
||||
CK_OTP_FORMAT_BINARY = 3
|
||||
CK_OTP_PARAM_IGNORED = 0
|
||||
CK_OTP_PARAM_OPTIONAL = 1
|
||||
CK_OTP_PARAM_MANDATORY = 2
|
||||
CKA_CLASS = 0x00000000
|
||||
CKA_TOKEN = 0x00000001
|
||||
CKA_PRIVATE = 0x00000002
|
||||
@ -183,15 +190,16 @@ const (
|
||||
CKA_MODIFIABLE = 0x00000170
|
||||
CKA_COPYABLE = 0x00000171
|
||||
CKA_DESTROYABLE = 0x00000172
|
||||
CKA_ECDSA_PARAMS = 0x00000180
|
||||
CKA_ECDSA_PARAMS = 0x00000180 // Deprecated
|
||||
CKA_EC_PARAMS = 0x00000180
|
||||
CKA_EC_POINT = 0x00000181
|
||||
CKA_SECONDARY_AUTH = 0x00000200
|
||||
CKA_AUTH_PIN_FLAGS = 0x00000201
|
||||
CKA_SECONDARY_AUTH = 0x00000200 // Deprecated
|
||||
CKA_AUTH_PIN_FLAGS = 0x00000201 // Deprecated
|
||||
CKA_ALWAYS_AUTHENTICATE = 0x00000202
|
||||
CKA_WRAP_WITH_TRUSTED = 0x00000210
|
||||
CKA_WRAP_TEMPLATE = CKF_ARRAY_ATTRIBUTE | 0x00000211
|
||||
CKA_UNWRAP_TEMPLATE = CKF_ARRAY_ATTRIBUTE | 0x00000212
|
||||
CKA_WRAP_TEMPLATE = (CKF_ARRAY_ATTRIBUTE | 0x00000211)
|
||||
CKA_UNWRAP_TEMPLATE = (CKF_ARRAY_ATTRIBUTE | 0x00000212)
|
||||
CKA_DERIVE_TEMPLATE = (CKF_ARRAY_ATTRIBUTE | 0x00000213)
|
||||
CKA_OTP_FORMAT = 0x00000220
|
||||
CKA_OTP_LENGTH = 0x00000221
|
||||
CKA_OTP_TIME_INTERVAL = 0x00000222
|
||||
@ -226,7 +234,7 @@ const (
|
||||
CKA_REQUIRED_CMS_ATTRIBUTES = 0x00000501
|
||||
CKA_DEFAULT_CMS_ATTRIBUTES = 0x00000502
|
||||
CKA_SUPPORTED_CMS_ATTRIBUTES = 0x00000503
|
||||
CKA_ALLOWED_MECHANISMS = CKF_ARRAY_ATTRIBUTE | 0x00000600
|
||||
CKA_ALLOWED_MECHANISMS = (CKF_ARRAY_ATTRIBUTE | 0x00000600)
|
||||
CKA_VENDOR_DEFINED = 0x80000000
|
||||
CKM_RSA_PKCS_KEY_PAIR_GEN = 0x00000000
|
||||
CKM_RSA_PKCS = 0x00000001
|
||||
@ -246,11 +254,10 @@ const (
|
||||
CKM_DSA_KEY_PAIR_GEN = 0x00000010
|
||||
CKM_DSA = 0x00000011
|
||||
CKM_DSA_SHA1 = 0x00000012
|
||||
CKM_DSA_FIPS_G_GEN = 0x00000013
|
||||
CKM_DSA_SHA224 = 0x00000014
|
||||
CKM_DSA_SHA256 = 0x00000015
|
||||
CKM_DSA_SHA384 = 0x00000016
|
||||
CKM_DSA_SHA512 = 0x00000017
|
||||
CKM_DSA_SHA224 = 0x00000013
|
||||
CKM_DSA_SHA256 = 0x00000014
|
||||
CKM_DSA_SHA384 = 0x00000015
|
||||
CKM_DSA_SHA512 = 0x00000016
|
||||
CKM_DSA_SHA3_224 = 0x00000018
|
||||
CKM_DSA_SHA3_256 = 0x00000019
|
||||
CKM_DSA_SHA3_384 = 0x0000001A
|
||||
@ -387,13 +394,13 @@ const (
|
||||
CKM_CAST128_KEY_GEN = 0x00000320
|
||||
CKM_CAST5_ECB = 0x00000321
|
||||
CKM_CAST128_ECB = 0x00000321
|
||||
CKM_CAST5_CBC = 0x00000322
|
||||
CKM_CAST5_CBC = 0x00000322 // Deprecated
|
||||
CKM_CAST128_CBC = 0x00000322
|
||||
CKM_CAST5_MAC = 0x00000323
|
||||
CKM_CAST5_MAC = 0x00000323 // Deprecated
|
||||
CKM_CAST128_MAC = 0x00000323
|
||||
CKM_CAST5_MAC_GENERAL = 0x00000324
|
||||
CKM_CAST5_MAC_GENERAL = 0x00000324 // Deprecated
|
||||
CKM_CAST128_MAC_GENERAL = 0x00000324
|
||||
CKM_CAST5_CBC_PAD = 0x00000325
|
||||
CKM_CAST5_CBC_PAD = 0x00000325 // Deprecated
|
||||
CKM_CAST128_CBC_PAD = 0x00000325
|
||||
CKM_RC5_KEY_GEN = 0x00000330
|
||||
CKM_RC5_ECB = 0x00000331
|
||||
@ -441,9 +448,9 @@ const (
|
||||
CKM_PBE_MD5_DES_CBC = 0x000003A1
|
||||
CKM_PBE_MD5_CAST_CBC = 0x000003A2
|
||||
CKM_PBE_MD5_CAST3_CBC = 0x000003A3
|
||||
CKM_PBE_MD5_CAST5_CBC = 0x000003A4
|
||||
CKM_PBE_MD5_CAST5_CBC = 0x000003A4 // Deprecated
|
||||
CKM_PBE_MD5_CAST128_CBC = 0x000003A4
|
||||
CKM_PBE_SHA1_CAST5_CBC = 0x000003A5
|
||||
CKM_PBE_SHA1_CAST5_CBC = 0x000003A5 // Deprecated
|
||||
CKM_PBE_SHA1_CAST128_CBC = 0x000003A5
|
||||
CKM_PBE_SHA1_RC4_128 = 0x000003A6
|
||||
CKM_PBE_SHA1_RC4_40 = 0x000003A7
|
||||
@ -522,7 +529,7 @@ const (
|
||||
CKM_BATON_COUNTER = 0x00001034
|
||||
CKM_BATON_SHUFFLE = 0x00001035
|
||||
CKM_BATON_WRAP = 0x00001036
|
||||
CKM_ECDSA_KEY_PAIR_GEN = 0x00001040
|
||||
CKM_ECDSA_KEY_PAIR_GEN = 0x00001040 // Deprecated
|
||||
CKM_EC_KEY_PAIR_GEN = 0x00001040
|
||||
CKM_ECDSA = 0x00001041
|
||||
CKM_ECDSA_SHA1 = 0x00001042
|
||||
@ -551,9 +558,9 @@ const (
|
||||
CKM_AES_CTR = 0x00001086
|
||||
CKM_AES_GCM = 0x00001087
|
||||
CKM_AES_CCM = 0x00001088
|
||||
CKM_AES_CMAC_GENERAL = 0x00001089
|
||||
CKM_AES_CTS = 0x00001089
|
||||
CKM_AES_CMAC = 0x0000108A
|
||||
CKM_AES_CTS = 0x0000108B
|
||||
CKM_AES_CMAC_GENERAL = 0x0000108B
|
||||
CKM_AES_XCBC_MAC = 0x0000108C
|
||||
CKM_AES_XCBC_MAC_96 = 0x0000108D
|
||||
CKM_AES_GMAC = 0x0000108E
|
||||
@ -704,33 +711,56 @@ const (
|
||||
CKR_MUTEX_NOT_LOCKED = 0x000001A1
|
||||
CKR_NEW_PIN_MODE = 0x000001B0
|
||||
CKR_NEXT_OTP = 0x000001B1
|
||||
CKR_EXCEEDED_MAX_ITERATIONS = 0x000001C0
|
||||
CKR_FIPS_SELF_TEST_FAILED = 0x000001C1
|
||||
CKR_LIBRARY_LOAD_FAILED = 0x000001C2
|
||||
CKR_PIN_TOO_WEAK = 0x000001C3
|
||||
CKR_PUBLIC_KEY_INVALID = 0x000001C4
|
||||
CKR_EXCEEDED_MAX_ITERATIONS = 0x000001B5
|
||||
CKR_FIPS_SELF_TEST_FAILED = 0x000001B6
|
||||
CKR_LIBRARY_LOAD_FAILED = 0x000001B7
|
||||
CKR_PIN_TOO_WEAK = 0x000001B8
|
||||
CKR_PUBLIC_KEY_INVALID = 0x000001B9
|
||||
CKR_FUNCTION_REJECTED = 0x00000200
|
||||
CKR_VENDOR_DEFINED = 0x80000000
|
||||
CKF_LIBRARY_CANT_CREATE_OS_THREADS = 0x00000001
|
||||
CKF_OS_LOCKING_OK = 0x00000002
|
||||
CKF_DONT_BLOCK = 1
|
||||
CKG_MGF1_SHA1 = 0x00000001
|
||||
CKG_MGF1_SHA256 = 0x00000002
|
||||
CKG_MGF1_SHA384 = 0x00000003
|
||||
CKG_MGF1_SHA512 = 0x00000004
|
||||
CKG_MGF1_SHA224 = 0x00000005
|
||||
CKZ_DATA_SPECIFIED = 0x00000001
|
||||
CKD_NULL = 0x00000001
|
||||
CKD_SHA1_KDF = 0x00000002
|
||||
CKD_SHA1_KDF_ASN1 = 0x00000003
|
||||
CKD_SHA1_KDF_CONCATENATE = 0x00000004
|
||||
CKD_SHA224_KDF = 0x00000005
|
||||
CKD_SHA256_KDF = 0x00000006
|
||||
CKD_SHA384_KDF = 0x00000007
|
||||
CKD_SHA512_KDF = 0x00000008
|
||||
CKD_CPDIVERSIFY_KDF = 0x00000009
|
||||
CKD_SHA3_224_KDF = 0x0000000A
|
||||
CKD_SHA3_256_KDF = 0x0000000B
|
||||
CKD_SHA3_384_KDF = 0x0000000C
|
||||
CKD_SHA3_512_KDF = 0x0000000D
|
||||
CKP_PKCS5_PBKD2_HMAC_SHA1 = 0x00000001
|
||||
CKP_PKCS5_PBKD2_HMAC_GOSTR3411 = 0x00000002
|
||||
CKP_PKCS5_PBKD2_HMAC_SHA224 = 0x00000003
|
||||
CKP_PKCS5_PBKD2_HMAC_SHA256 = 0x00000004
|
||||
CKP_PKCS5_PBKD2_HMAC_SHA384 = 0x00000005
|
||||
CKP_PKCS5_PBKD2_HMAC_SHA512 = 0x00000006
|
||||
CKP_PKCS5_PBKD2_HMAC_SHA512_224 = 0x00000007
|
||||
CKP_PKCS5_PBKD2_HMAC_SHA512_256 = 0x00000008
|
||||
CKZ_SALT_SPECIFIED = 0x00000001
|
||||
CK_OTP_VALUE = 0
|
||||
CK_OTP_PIN = 1
|
||||
CK_OTP_CHALLENGE = 2
|
||||
CK_OTP_TIME = 3
|
||||
CK_OTP_COUNTER = 4
|
||||
CK_OTP_FLAGS = 5
|
||||
CK_OTP_OUTPUT_LENGTH = 6
|
||||
CK_OTP_OUTPUT_FORMAT = 7
|
||||
CKF_NEXT_OTP = 0x00000001
|
||||
CKF_EXCLUDE_TIME = 0x00000002
|
||||
CKF_EXCLUDE_COUNTER = 0x00000004
|
||||
CKF_EXCLUDE_CHALLENGE = 0x00000008
|
||||
CKF_EXCLUDE_PIN = 0x00000010
|
||||
CKF_USER_FRIENDLY_OTP = 0x00000020
|
||||
CKD_NULL = 0x00000001
|
||||
CKD_SHA1_KDF = 0x00000002
|
||||
)
|
||||
|
||||
// Special return values defined in PKCS#11 v2.40 section 3.2.
|
||||
const (
|
||||
// CK_EFFECTIVELY_INFINITE may be returned in the CK_TOKEN_INFO fields ulMaxSessionCount and ulMaxRwSessionCount.
|
||||
// It indicates there is no practical limit on the number of sessions.
|
||||
CK_EFFECTIVELY_INFINITE = 0
|
||||
|
||||
// CK_UNAVAILABLE_INFORMATION may be returned for several fields within CK_TOKEN_INFO. It indicates
|
||||
// the token is unable or unwilling to provide the requested information.
|
||||
CK_UNAVAILABLE_INFORMATION = ^uint(0)
|
||||
)
|
4
vendor/github.com/moby/buildkit/client/client.go
generated
vendored
4
vendor/github.com/moby/buildkit/client/client.go
generated
vendored
@ -4,9 +4,9 @@ import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"crypto/x509"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"net/url"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/containerd/containerd/defaults"
|
||||
@ -212,7 +212,7 @@ func WithCredentials(serverName, ca, cert, key string) ClientOpt {
|
||||
}
|
||||
|
||||
func loadCredentials(opts *withCredentials) (grpc.DialOption, error) {
|
||||
ca, err := ioutil.ReadFile(opts.CACert)
|
||||
ca, err := os.ReadFile(opts.CACert)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "could not read ca certificate")
|
||||
}
|
||||
|
3
vendor/github.com/moby/buildkit/client/llb/marshal.go
generated
vendored
3
vendor/github.com/moby/buildkit/client/llb/marshal.go
generated
vendored
@ -2,7 +2,6 @@ package llb
|
||||
|
||||
import (
|
||||
"io"
|
||||
"io/ioutil"
|
||||
|
||||
"github.com/containerd/containerd/platforms"
|
||||
"github.com/moby/buildkit/solver/pb"
|
||||
@ -67,7 +66,7 @@ func WriteTo(def *Definition, w io.Writer) error {
|
||||
}
|
||||
|
||||
func ReadFrom(r io.Reader) (*Definition, error) {
|
||||
b, err := ioutil.ReadAll(r)
|
||||
b, err := io.ReadAll(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
15
vendor/github.com/moby/buildkit/client/llb/state.go
generated
vendored
15
vendor/github.com/moby/buildkit/client/llb/state.go
generated
vendored
@ -230,13 +230,7 @@ func (s State) WithOutput(o Output) State {
|
||||
}
|
||||
|
||||
func (s State) WithImageConfig(c []byte) (State, error) {
|
||||
var img struct {
|
||||
Config struct {
|
||||
Env []string `json:"Env,omitempty"`
|
||||
WorkingDir string `json:"WorkingDir,omitempty"`
|
||||
User string `json:"User,omitempty"`
|
||||
} `json:"config,omitempty"`
|
||||
}
|
||||
var img ocispecs.Image
|
||||
if err := json.Unmarshal(c, &img); err != nil {
|
||||
return State{}, err
|
||||
}
|
||||
@ -251,6 +245,13 @@ func (s State) WithImageConfig(c []byte) (State, error) {
|
||||
}
|
||||
}
|
||||
s = s.Dir(img.Config.WorkingDir)
|
||||
if img.Architecture != "" && img.OS != "" {
|
||||
s = s.Platform(ocispecs.Platform{
|
||||
OS: img.OS,
|
||||
Architecture: img.Architecture,
|
||||
Variant: img.Variant,
|
||||
})
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
|
||||
|
6
vendor/github.com/moby/buildkit/client/ociindex/ociindex.go
generated
vendored
6
vendor/github.com/moby/buildkit/client/ociindex/ociindex.go
generated
vendored
@ -2,7 +2,7 @@ package ociindex
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"os"
|
||||
|
||||
"github.com/gofrs/flock"
|
||||
@ -62,7 +62,7 @@ func PutDescToIndexJSONFileLocked(indexJSONPath string, desc ocispecs.Descriptor
|
||||
}
|
||||
defer f.Close()
|
||||
var idx ocispecs.Index
|
||||
b, err := ioutil.ReadAll(f)
|
||||
b, err := io.ReadAll(f)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "could not read %s", indexJSONPath)
|
||||
}
|
||||
@ -101,7 +101,7 @@ func ReadIndexJSONFileLocked(indexJSONPath string) (*ocispecs.Index, error) {
|
||||
lock.Unlock()
|
||||
os.RemoveAll(lockPath)
|
||||
}()
|
||||
b, err := ioutil.ReadFile(indexJSONPath)
|
||||
b, err := os.ReadFile(indexJSONPath)
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "could not read %s", indexJSONPath)
|
||||
}
|
||||
|
5
vendor/github.com/moby/buildkit/session/auth/authprovider/tokenseed.go
generated
vendored
5
vendor/github.com/moby/buildkit/session/auth/authprovider/tokenseed.go
generated
vendored
@ -3,7 +3,6 @@ package authprovider
|
||||
import (
|
||||
"crypto/rand"
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"sync"
|
||||
@ -47,7 +46,7 @@ func (ts *tokenSeeds) getSeed(host string) ([]byte, error) {
|
||||
fp := filepath.Join(ts.dir, ".token_seed")
|
||||
|
||||
// we include client side randomness to avoid chosen plaintext attack from the daemon side
|
||||
dt, err := ioutil.ReadFile(fp)
|
||||
dt, err := os.ReadFile(fp)
|
||||
if err != nil {
|
||||
if !errors.Is(err, os.ErrNotExist) && !errors.Is(err, syscall.ENOTDIR) && !errors.Is(err, os.ErrPermission) {
|
||||
return nil, err
|
||||
@ -68,7 +67,7 @@ func (ts *tokenSeeds) getSeed(host string) ([]byte, error) {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := ioutil.WriteFile(fp, dt, 0600); err != nil {
|
||||
if err := os.WriteFile(fp, dt, 0600); err != nil {
|
||||
if !errors.Is(err, syscall.EROFS) && !errors.Is(err, os.ErrPermission) {
|
||||
return nil, err
|
||||
}
|
||||
|
3
vendor/github.com/moby/buildkit/session/secrets/secretsprovider/store.go
generated
vendored
3
vendor/github.com/moby/buildkit/session/secrets/secretsprovider/store.go
generated
vendored
@ -2,7 +2,6 @@ package secretsprovider
|
||||
|
||||
import (
|
||||
"context"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
|
||||
"github.com/moby/buildkit/session/secrets"
|
||||
@ -57,7 +56,7 @@ func (fs *fileStore) GetSecret(ctx context.Context, id string) ([]byte, error) {
|
||||
if v.Env != "" {
|
||||
return []byte(os.Getenv(v.Env)), nil
|
||||
}
|
||||
dt, err := ioutil.ReadFile(v.FilePath)
|
||||
dt, err := os.ReadFile(v.FilePath)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
3
vendor/github.com/moby/buildkit/session/sshforward/ssh.go
generated
vendored
3
vendor/github.com/moby/buildkit/session/sshforward/ssh.go
generated
vendored
@ -1,7 +1,6 @@
|
||||
package sshforward
|
||||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"os"
|
||||
"path/filepath"
|
||||
@ -64,7 +63,7 @@ type SocketOpt struct {
|
||||
}
|
||||
|
||||
func MountSSHSocket(ctx context.Context, c session.Caller, opt SocketOpt) (sockPath string, closer func() error, err error) {
|
||||
dir, err := ioutil.TempDir("", ".buildkit-ssh-sock")
|
||||
dir, err := os.MkdirTemp("", ".buildkit-ssh-sock")
|
||||
if err != nil {
|
||||
return "", nil, errors.WithStack(err)
|
||||
}
|
||||
|
3
vendor/github.com/moby/buildkit/session/sshforward/sshprovider/agentprovider.go
generated
vendored
3
vendor/github.com/moby/buildkit/session/sshforward/sshprovider/agentprovider.go
generated
vendored
@ -3,7 +3,6 @@ package sshprovider
|
||||
import (
|
||||
"context"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net"
|
||||
"os"
|
||||
"runtime"
|
||||
@ -166,7 +165,7 @@ func toAgentSource(paths []string) (source, error) {
|
||||
if err != nil {
|
||||
return source{}, errors.Wrapf(err, "failed to open %s", p)
|
||||
}
|
||||
dt, err := ioutil.ReadAll(&io.LimitedReader{R: f, N: 100 * 1024})
|
||||
dt, err := io.ReadAll(&io.LimitedReader{R: f, N: 100 * 1024})
|
||||
if err != nil {
|
||||
return source{}, errors.Wrapf(err, "failed to read %s", p)
|
||||
}
|
||||
|
2
vendor/github.com/moby/buildkit/solver/pb/ops.pb.go
generated
vendored
2
vendor/github.com/moby/buildkit/solver/pb/ops.pb.go
generated
vendored
@ -1038,7 +1038,7 @@ func (m *SecretOpt) GetOptional() bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// SSHOpt defines options describing secret mounts
|
||||
// SSHOpt defines options describing ssh mounts
|
||||
type SSHOpt struct {
|
||||
// ID of exposed ssh rule. Used for quering the value.
|
||||
ID string `protobuf:"bytes,1,opt,name=ID,proto3" json:"ID,omitempty"`
|
||||
|
2
vendor/github.com/moby/buildkit/solver/pb/ops.proto
generated
vendored
2
vendor/github.com/moby/buildkit/solver/pb/ops.proto
generated
vendored
@ -157,7 +157,7 @@ message SecretOpt {
|
||||
bool optional = 5;
|
||||
}
|
||||
|
||||
// SSHOpt defines options describing secret mounts
|
||||
// SSHOpt defines options describing ssh mounts
|
||||
message SSHOpt {
|
||||
// ID of exposed ssh rule. Used for quering the value.
|
||||
string ID = 1;
|
||||
|
4
vendor/github.com/moby/buildkit/util/contentutil/buffer.go
generated
vendored
4
vendor/github.com/moby/buildkit/util/contentutil/buffer.go
generated
vendored
@ -3,7 +3,7 @@ package contentutil
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
@ -64,7 +64,7 @@ func (b *buffer) ReaderAt(ctx context.Context, desc ocispecs.Descriptor) (conten
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &readerAt{Reader: r, Closer: ioutil.NopCloser(r), size: int64(r.Len())}, nil
|
||||
return &readerAt{Reader: r, Closer: io.NopCloser(r), size: int64(r.Len())}, nil
|
||||
}
|
||||
|
||||
func (b *buffer) getBytesReader(ctx context.Context, dgst digest.Digest) (*bytes.Reader, error) {
|
||||
|
4
vendor/github.com/moby/buildkit/util/imageutil/schema1.go
generated
vendored
4
vendor/github.com/moby/buildkit/util/imageutil/schema1.go
generated
vendored
@ -3,7 +3,7 @@ package imageutil
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"io/ioutil"
|
||||
"io"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
@ -19,7 +19,7 @@ func readSchema1Config(ctx context.Context, ref string, desc ocispecs.Descriptor
|
||||
return "", nil, err
|
||||
}
|
||||
defer rc.Close()
|
||||
dt, err := ioutil.ReadAll(rc)
|
||||
dt, err := io.ReadAll(rc)
|
||||
if err != nil {
|
||||
return "", nil, errors.Wrap(err, "failed to fetch schema1 manifest")
|
||||
}
|
||||
|
6
vendor/github.com/moby/buildkit/util/progress/progressui/printer.go
generated
vendored
6
vendor/github.com/moby/buildkit/util/progress/progressui/printer.go
generated
vendored
@ -256,7 +256,7 @@ func (p *textMux) print(t *trace) {
|
||||
}
|
||||
// make any open vertex active
|
||||
for dgst, v := range t.byDigest {
|
||||
if v.isStarted() && !v.isCompleted() {
|
||||
if v.isStarted() && !v.isCompleted() && v.ProgressGroup == nil && !v.hidden {
|
||||
p.printVtx(t, dgst)
|
||||
return
|
||||
}
|
||||
@ -281,6 +281,10 @@ func (p *textMux) print(t *trace) {
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
if v.lastBlockTime == nil {
|
||||
// shouldn't happen, but not worth crashing over
|
||||
continue
|
||||
}
|
||||
tm := now.Sub(*v.lastBlockTime)
|
||||
speed := float64(v.count) / tm.Seconds()
|
||||
overLimit := tm > maxDelay && dgst != current
|
||||
|
2
vendor/github.com/moby/buildkit/util/resolver/retryhandler/retry.go
generated
vendored
2
vendor/github.com/moby/buildkit/util/resolver/retryhandler/retry.go
generated
vendored
@ -60,7 +60,7 @@ func retryError(err error) bool {
|
||||
return true
|
||||
}
|
||||
// catches TLS timeout or other network-related temporary errors
|
||||
if ne, ok := errors.Cause(err).(net.Error); ok && ne.Temporary() {
|
||||
if ne, ok := errors.Cause(err).(net.Error); ok && ne.Temporary() { //nolint:staticcheck // ignoring "SA1019: Temporary is deprecated", continue to propagate net.Error through the "temporary" status
|
||||
return true
|
||||
}
|
||||
// https://github.com/containerd/containerd/pull/4724
|
||||
|
188
vendor/golang.org/x/crypto/ed25519/ed25519.go
generated
vendored
188
vendor/golang.org/x/crypto/ed25519/ed25519.go
generated
vendored
@ -1,13 +1,7 @@
|
||||
// Copyright 2016 The Go Authors. All rights reserved.
|
||||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// In Go 1.13, the ed25519 package was promoted to the standard library as
|
||||
// crypto/ed25519, and this package became a wrapper for the standard library one.
|
||||
//
|
||||
//go:build !go1.13
|
||||
// +build !go1.13
|
||||
|
||||
// Package ed25519 implements the Ed25519 signature algorithm. See
|
||||
// https://ed25519.cr.yp.to/.
|
||||
//
|
||||
@ -16,21 +10,15 @@
|
||||
// representation includes a public key suffix to make multiple signing
|
||||
// operations with the same key more efficient. This package refers to the RFC
|
||||
// 8032 private key as the “seed”.
|
||||
//
|
||||
// Beginning with Go 1.13, the functionality of this package was moved to the
|
||||
// standard library as crypto/ed25519. This package only acts as a compatibility
|
||||
// wrapper.
|
||||
package ed25519
|
||||
|
||||
// This code is a port of the public domain, “ref10” implementation of ed25519
|
||||
// from SUPERCOP.
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto"
|
||||
cryptorand "crypto/rand"
|
||||
"crypto/sha512"
|
||||
"errors"
|
||||
"crypto/ed25519"
|
||||
"io"
|
||||
"strconv"
|
||||
|
||||
"golang.org/x/crypto/ed25519/internal/edwards25519"
|
||||
)
|
||||
|
||||
const (
|
||||
@ -45,57 +33,21 @@ const (
|
||||
)
|
||||
|
||||
// PublicKey is the type of Ed25519 public keys.
|
||||
type PublicKey []byte
|
||||
//
|
||||
// This type is an alias for crypto/ed25519's PublicKey type.
|
||||
// See the crypto/ed25519 package for the methods on this type.
|
||||
type PublicKey = ed25519.PublicKey
|
||||
|
||||
// PrivateKey is the type of Ed25519 private keys. It implements crypto.Signer.
|
||||
type PrivateKey []byte
|
||||
|
||||
// Public returns the PublicKey corresponding to priv.
|
||||
func (priv PrivateKey) Public() crypto.PublicKey {
|
||||
publicKey := make([]byte, PublicKeySize)
|
||||
copy(publicKey, priv[32:])
|
||||
return PublicKey(publicKey)
|
||||
}
|
||||
|
||||
// Seed returns the private key seed corresponding to priv. It is provided for
|
||||
// interoperability with RFC 8032. RFC 8032's private keys correspond to seeds
|
||||
// in this package.
|
||||
func (priv PrivateKey) Seed() []byte {
|
||||
seed := make([]byte, SeedSize)
|
||||
copy(seed, priv[:32])
|
||||
return seed
|
||||
}
|
||||
|
||||
// Sign signs the given message with priv.
|
||||
// Ed25519 performs two passes over messages to be signed and therefore cannot
|
||||
// handle pre-hashed messages. Thus opts.HashFunc() must return zero to
|
||||
// indicate the message hasn't been hashed. This can be achieved by passing
|
||||
// crypto.Hash(0) as the value for opts.
|
||||
func (priv PrivateKey) Sign(rand io.Reader, message []byte, opts crypto.SignerOpts) (signature []byte, err error) {
|
||||
if opts.HashFunc() != crypto.Hash(0) {
|
||||
return nil, errors.New("ed25519: cannot sign hashed message")
|
||||
}
|
||||
|
||||
return Sign(priv, message), nil
|
||||
}
|
||||
//
|
||||
// This type is an alias for crypto/ed25519's PrivateKey type.
|
||||
// See the crypto/ed25519 package for the methods on this type.
|
||||
type PrivateKey = ed25519.PrivateKey
|
||||
|
||||
// GenerateKey generates a public/private key pair using entropy from rand.
|
||||
// If rand is nil, crypto/rand.Reader will be used.
|
||||
func GenerateKey(rand io.Reader) (PublicKey, PrivateKey, error) {
|
||||
if rand == nil {
|
||||
rand = cryptorand.Reader
|
||||
}
|
||||
|
||||
seed := make([]byte, SeedSize)
|
||||
if _, err := io.ReadFull(rand, seed); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
||||
privateKey := NewKeyFromSeed(seed)
|
||||
publicKey := make([]byte, PublicKeySize)
|
||||
copy(publicKey, privateKey[32:])
|
||||
|
||||
return publicKey, privateKey, nil
|
||||
return ed25519.GenerateKey(rand)
|
||||
}
|
||||
|
||||
// NewKeyFromSeed calculates a private key from a seed. It will panic if
|
||||
@ -103,121 +55,17 @@ func GenerateKey(rand io.Reader) (PublicKey, PrivateKey, error) {
|
||||
// with RFC 8032. RFC 8032's private keys correspond to seeds in this
|
||||
// package.
|
||||
func NewKeyFromSeed(seed []byte) PrivateKey {
|
||||
if l := len(seed); l != SeedSize {
|
||||
panic("ed25519: bad seed length: " + strconv.Itoa(l))
|
||||
}
|
||||
|
||||
digest := sha512.Sum512(seed)
|
||||
digest[0] &= 248
|
||||
digest[31] &= 127
|
||||
digest[31] |= 64
|
||||
|
||||
var A edwards25519.ExtendedGroupElement
|
||||
var hBytes [32]byte
|
||||
copy(hBytes[:], digest[:])
|
||||
edwards25519.GeScalarMultBase(&A, &hBytes)
|
||||
var publicKeyBytes [32]byte
|
||||
A.ToBytes(&publicKeyBytes)
|
||||
|
||||
privateKey := make([]byte, PrivateKeySize)
|
||||
copy(privateKey, seed)
|
||||
copy(privateKey[32:], publicKeyBytes[:])
|
||||
|
||||
return privateKey
|
||||
return ed25519.NewKeyFromSeed(seed)
|
||||
}
|
||||
|
||||
// Sign signs the message with privateKey and returns a signature. It will
|
||||
// panic if len(privateKey) is not PrivateKeySize.
|
||||
func Sign(privateKey PrivateKey, message []byte) []byte {
|
||||
if l := len(privateKey); l != PrivateKeySize {
|
||||
panic("ed25519: bad private key length: " + strconv.Itoa(l))
|
||||
}
|
||||
|
||||
h := sha512.New()
|
||||
h.Write(privateKey[:32])
|
||||
|
||||
var digest1, messageDigest, hramDigest [64]byte
|
||||
var expandedSecretKey [32]byte
|
||||
h.Sum(digest1[:0])
|
||||
copy(expandedSecretKey[:], digest1[:])
|
||||
expandedSecretKey[0] &= 248
|
||||
expandedSecretKey[31] &= 63
|
||||
expandedSecretKey[31] |= 64
|
||||
|
||||
h.Reset()
|
||||
h.Write(digest1[32:])
|
||||
h.Write(message)
|
||||
h.Sum(messageDigest[:0])
|
||||
|
||||
var messageDigestReduced [32]byte
|
||||
edwards25519.ScReduce(&messageDigestReduced, &messageDigest)
|
||||
var R edwards25519.ExtendedGroupElement
|
||||
edwards25519.GeScalarMultBase(&R, &messageDigestReduced)
|
||||
|
||||
var encodedR [32]byte
|
||||
R.ToBytes(&encodedR)
|
||||
|
||||
h.Reset()
|
||||
h.Write(encodedR[:])
|
||||
h.Write(privateKey[32:])
|
||||
h.Write(message)
|
||||
h.Sum(hramDigest[:0])
|
||||
var hramDigestReduced [32]byte
|
||||
edwards25519.ScReduce(&hramDigestReduced, &hramDigest)
|
||||
|
||||
var s [32]byte
|
||||
edwards25519.ScMulAdd(&s, &hramDigestReduced, &expandedSecretKey, &messageDigestReduced)
|
||||
|
||||
signature := make([]byte, SignatureSize)
|
||||
copy(signature[:], encodedR[:])
|
||||
copy(signature[32:], s[:])
|
||||
|
||||
return signature
|
||||
return ed25519.Sign(privateKey, message)
|
||||
}
|
||||
|
||||
// Verify reports whether sig is a valid signature of message by publicKey. It
|
||||
// will panic if len(publicKey) is not PublicKeySize.
|
||||
func Verify(publicKey PublicKey, message, sig []byte) bool {
|
||||
if l := len(publicKey); l != PublicKeySize {
|
||||
panic("ed25519: bad public key length: " + strconv.Itoa(l))
|
||||
}
|
||||
|
||||
if len(sig) != SignatureSize || sig[63]&224 != 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
var A edwards25519.ExtendedGroupElement
|
||||
var publicKeyBytes [32]byte
|
||||
copy(publicKeyBytes[:], publicKey)
|
||||
if !A.FromBytes(&publicKeyBytes) {
|
||||
return false
|
||||
}
|
||||
edwards25519.FeNeg(&A.X, &A.X)
|
||||
edwards25519.FeNeg(&A.T, &A.T)
|
||||
|
||||
h := sha512.New()
|
||||
h.Write(sig[:32])
|
||||
h.Write(publicKey[:])
|
||||
h.Write(message)
|
||||
var digest [64]byte
|
||||
h.Sum(digest[:0])
|
||||
|
||||
var hReduced [32]byte
|
||||
edwards25519.ScReduce(&hReduced, &digest)
|
||||
|
||||
var R edwards25519.ProjectiveGroupElement
|
||||
var s [32]byte
|
||||
copy(s[:], sig[32:])
|
||||
|
||||
// https://tools.ietf.org/html/rfc8032#section-5.1.7 requires that s be in
|
||||
// the range [0, order) in order to prevent signature malleability.
|
||||
if !edwards25519.ScMinimal(&s) {
|
||||
return false
|
||||
}
|
||||
|
||||
edwards25519.GeDoubleScalarMultVartime(&R, &hReduced, &A, &s)
|
||||
|
||||
var checkR [32]byte
|
||||
R.ToBytes(&checkR)
|
||||
return bytes.Equal(sig[:32], checkR[:])
|
||||
return ed25519.Verify(publicKey, message, sig)
|
||||
}
|
||||
|
74
vendor/golang.org/x/crypto/ed25519/ed25519_go113.go
generated
vendored
74
vendor/golang.org/x/crypto/ed25519/ed25519_go113.go
generated
vendored
@ -1,74 +0,0 @@
|
||||
// Copyright 2019 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:build go1.13
|
||||
// +build go1.13
|
||||
|
||||
// Package ed25519 implements the Ed25519 signature algorithm. See
|
||||
// https://ed25519.cr.yp.to/.
|
||||
//
|
||||
// These functions are also compatible with the “Ed25519” function defined in
|
||||
// RFC 8032. However, unlike RFC 8032's formulation, this package's private key
|
||||
// representation includes a public key suffix to make multiple signing
|
||||
// operations with the same key more efficient. This package refers to the RFC
|
||||
// 8032 private key as the “seed”.
|
||||
//
|
||||
// Beginning with Go 1.13, the functionality of this package was moved to the
|
||||
// standard library as crypto/ed25519. This package only acts as a compatibility
|
||||
// wrapper.
|
||||
package ed25519
|
||||
|
||||
import (
|
||||
"crypto/ed25519"
|
||||
"io"
|
||||
)
|
||||
|
||||
const (
|
||||
// PublicKeySize is the size, in bytes, of public keys as used in this package.
|
||||
PublicKeySize = 32
|
||||
// PrivateKeySize is the size, in bytes, of private keys as used in this package.
|
||||
PrivateKeySize = 64
|
||||
// SignatureSize is the size, in bytes, of signatures generated and verified by this package.
|
||||
SignatureSize = 64
|
||||
// SeedSize is the size, in bytes, of private key seeds. These are the private key representations used by RFC 8032.
|
||||
SeedSize = 32
|
||||
)
|
||||
|
||||
// PublicKey is the type of Ed25519 public keys.
|
||||
//
|
||||
// This type is an alias for crypto/ed25519's PublicKey type.
|
||||
// See the crypto/ed25519 package for the methods on this type.
|
||||
type PublicKey = ed25519.PublicKey
|
||||
|
||||
// PrivateKey is the type of Ed25519 private keys. It implements crypto.Signer.
|
||||
//
|
||||
// This type is an alias for crypto/ed25519's PrivateKey type.
|
||||
// See the crypto/ed25519 package for the methods on this type.
|
||||
type PrivateKey = ed25519.PrivateKey
|
||||
|
||||
// GenerateKey generates a public/private key pair using entropy from rand.
|
||||
// If rand is nil, crypto/rand.Reader will be used.
|
||||
func GenerateKey(rand io.Reader) (PublicKey, PrivateKey, error) {
|
||||
return ed25519.GenerateKey(rand)
|
||||
}
|
||||
|
||||
// NewKeyFromSeed calculates a private key from a seed. It will panic if
|
||||
// len(seed) is not SeedSize. This function is provided for interoperability
|
||||
// with RFC 8032. RFC 8032's private keys correspond to seeds in this
|
||||
// package.
|
||||
func NewKeyFromSeed(seed []byte) PrivateKey {
|
||||
return ed25519.NewKeyFromSeed(seed)
|
||||
}
|
||||
|
||||
// Sign signs the message with privateKey and returns a signature. It will
|
||||
// panic if len(privateKey) is not PrivateKeySize.
|
||||
func Sign(privateKey PrivateKey, message []byte) []byte {
|
||||
return ed25519.Sign(privateKey, message)
|
||||
}
|
||||
|
||||
// Verify reports whether sig is a valid signature of message by publicKey. It
|
||||
// will panic if len(publicKey) is not PublicKeySize.
|
||||
func Verify(publicKey PublicKey, message, sig []byte) bool {
|
||||
return ed25519.Verify(publicKey, message, sig)
|
||||
}
|
1422
vendor/golang.org/x/crypto/ed25519/internal/edwards25519/const.go
generated
vendored
1422
vendor/golang.org/x/crypto/ed25519/internal/edwards25519/const.go
generated
vendored
File diff suppressed because it is too large
Load Diff
1793
vendor/golang.org/x/crypto/ed25519/internal/edwards25519/edwards25519.go
generated
vendored
1793
vendor/golang.org/x/crypto/ed25519/internal/edwards25519/edwards25519.go
generated
vendored
File diff suppressed because it is too large
Load Diff
26
vendor/golang.org/x/crypto/ssh/agent/client.go
generated
vendored
26
vendor/golang.org/x/crypto/ssh/agent/client.go
generated
vendored
@ -25,7 +25,6 @@ import (
|
||||
"math/big"
|
||||
"sync"
|
||||
|
||||
"crypto"
|
||||
"golang.org/x/crypto/ed25519"
|
||||
"golang.org/x/crypto/ssh"
|
||||
)
|
||||
@ -771,19 +770,26 @@ func (s *agentKeyringSigner) Sign(rand io.Reader, data []byte) (*ssh.Signature,
|
||||
return s.agent.Sign(s.pub, data)
|
||||
}
|
||||
|
||||
func (s *agentKeyringSigner) SignWithOpts(rand io.Reader, data []byte, opts crypto.SignerOpts) (*ssh.Signature, error) {
|
||||
var flags SignatureFlags
|
||||
if opts != nil {
|
||||
switch opts.HashFunc() {
|
||||
case crypto.SHA256:
|
||||
flags = SignatureFlagRsaSha256
|
||||
case crypto.SHA512:
|
||||
flags = SignatureFlagRsaSha512
|
||||
}
|
||||
func (s *agentKeyringSigner) SignWithAlgorithm(rand io.Reader, data []byte, algorithm string) (*ssh.Signature, error) {
|
||||
if algorithm == "" || algorithm == s.pub.Type() {
|
||||
return s.Sign(rand, data)
|
||||
}
|
||||
|
||||
var flags SignatureFlags
|
||||
switch algorithm {
|
||||
case ssh.KeyAlgoRSASHA256:
|
||||
flags = SignatureFlagRsaSha256
|
||||
case ssh.KeyAlgoRSASHA512:
|
||||
flags = SignatureFlagRsaSha512
|
||||
default:
|
||||
return nil, fmt.Errorf("agent: unsupported algorithm %q", algorithm)
|
||||
}
|
||||
|
||||
return s.agent.SignWithFlags(s.pub, data, flags)
|
||||
}
|
||||
|
||||
var _ ssh.AlgorithmSigner = &agentKeyringSigner{}
|
||||
|
||||
// Calls an extension method. It is up to the agent implementation as to whether or not
|
||||
// any particular extension is supported and may always return an error. Because the
|
||||
// type of the response is up to the implementation, this returns the bytes of the
|
||||
|
6
vendor/golang.org/x/crypto/ssh/agent/keyring.go
generated
vendored
6
vendor/golang.org/x/crypto/ssh/agent/keyring.go
generated
vendored
@ -113,7 +113,7 @@ func (r *keyring) Unlock(passphrase []byte) error {
|
||||
|
||||
// expireKeysLocked removes expired keys from the keyring. If a key was added
|
||||
// with a lifetimesecs contraint and seconds >= lifetimesecs seconds have
|
||||
// ellapsed, it is removed. The caller *must* be holding the keyring mutex.
|
||||
// elapsed, it is removed. The caller *must* be holding the keyring mutex.
|
||||
func (r *keyring) expireKeysLocked() {
|
||||
for _, k := range r.keys {
|
||||
if k.expire != nil && time.Now().After(*k.expire) {
|
||||
@ -205,9 +205,9 @@ func (r *keyring) SignWithFlags(key ssh.PublicKey, data []byte, flags SignatureF
|
||||
var algorithm string
|
||||
switch flags {
|
||||
case SignatureFlagRsaSha256:
|
||||
algorithm = ssh.SigAlgoRSASHA2256
|
||||
algorithm = ssh.KeyAlgoRSASHA256
|
||||
case SignatureFlagRsaSha512:
|
||||
algorithm = ssh.SigAlgoRSASHA2512
|
||||
algorithm = ssh.KeyAlgoRSASHA512
|
||||
default:
|
||||
return nil, fmt.Errorf("agent: unsupported signature flags: %d", flags)
|
||||
}
|
||||
|
93
vendor/golang.org/x/crypto/ssh/certs.go
generated
vendored
93
vendor/golang.org/x/crypto/ssh/certs.go
generated
vendored
@ -14,8 +14,10 @@ import (
|
||||
"time"
|
||||
)
|
||||
|
||||
// These constants from [PROTOCOL.certkeys] represent the key algorithm names
|
||||
// for certificate types supported by this package.
|
||||
// Certificate algorithm names from [PROTOCOL.certkeys]. These values can appear
|
||||
// in Certificate.Type, PublicKey.Type, and ClientConfig.HostKeyAlgorithms.
|
||||
// Unlike key algorithm names, these are not passed to AlgorithmSigner and don't
|
||||
// appear in the Signature.Format field.
|
||||
const (
|
||||
CertAlgoRSAv01 = "ssh-rsa-cert-v01@openssh.com"
|
||||
CertAlgoDSAv01 = "ssh-dss-cert-v01@openssh.com"
|
||||
@ -25,14 +27,21 @@ const (
|
||||
CertAlgoSKECDSA256v01 = "sk-ecdsa-sha2-nistp256-cert-v01@openssh.com"
|
||||
CertAlgoED25519v01 = "ssh-ed25519-cert-v01@openssh.com"
|
||||
CertAlgoSKED25519v01 = "sk-ssh-ed25519-cert-v01@openssh.com"
|
||||
|
||||
// CertAlgoRSASHA256v01 and CertAlgoRSASHA512v01 can't appear as a
|
||||
// Certificate.Type (or PublicKey.Type), but only in
|
||||
// ClientConfig.HostKeyAlgorithms.
|
||||
CertAlgoRSASHA256v01 = "rsa-sha2-256-cert-v01@openssh.com"
|
||||
CertAlgoRSASHA512v01 = "rsa-sha2-512-cert-v01@openssh.com"
|
||||
)
|
||||
|
||||
// These constants from [PROTOCOL.certkeys] represent additional signature
|
||||
// algorithm names for certificate types supported by this package.
|
||||
const (
|
||||
CertSigAlgoRSAv01 = "ssh-rsa-cert-v01@openssh.com"
|
||||
CertSigAlgoRSASHA2256v01 = "rsa-sha2-256-cert-v01@openssh.com"
|
||||
CertSigAlgoRSASHA2512v01 = "rsa-sha2-512-cert-v01@openssh.com"
|
||||
// Deprecated: use CertAlgoRSAv01.
|
||||
CertSigAlgoRSAv01 = CertAlgoRSAv01
|
||||
// Deprecated: use CertAlgoRSASHA256v01.
|
||||
CertSigAlgoRSASHA2256v01 = CertAlgoRSASHA256v01
|
||||
// Deprecated: use CertAlgoRSASHA512v01.
|
||||
CertSigAlgoRSASHA2512v01 = CertAlgoRSASHA512v01
|
||||
)
|
||||
|
||||
// Certificate types distinguish between host and user
|
||||
@ -431,10 +440,14 @@ func (c *Certificate) SignCert(rand io.Reader, authority Signer) error {
|
||||
}
|
||||
c.SignatureKey = authority.PublicKey()
|
||||
|
||||
if v, ok := authority.(AlgorithmSigner); ok {
|
||||
if v.PublicKey().Type() == KeyAlgoRSA {
|
||||
authority = &rsaSigner{v, SigAlgoRSASHA2512}
|
||||
// Default to KeyAlgoRSASHA512 for ssh-rsa signers.
|
||||
if v, ok := authority.(AlgorithmSigner); ok && v.PublicKey().Type() == KeyAlgoRSA {
|
||||
sig, err := v.SignWithAlgorithm(rand, c.bytesForSigning(), KeyAlgoRSASHA512)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
c.Signature = sig
|
||||
return nil
|
||||
}
|
||||
|
||||
sig, err := authority.Sign(rand, c.bytesForSigning())
|
||||
@ -445,32 +458,40 @@ func (c *Certificate) SignCert(rand io.Reader, authority Signer) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// certAlgoNames includes a mapping from signature algorithms to the
|
||||
// corresponding certificate signature algorithm. When a key type (such
|
||||
// as ED25516) is associated with only one algorithm, the KeyAlgo
|
||||
// constant is used instead of the SigAlgo.
|
||||
var certAlgoNames = map[string]string{
|
||||
SigAlgoRSA: CertSigAlgoRSAv01,
|
||||
SigAlgoRSASHA2256: CertSigAlgoRSASHA2256v01,
|
||||
SigAlgoRSASHA2512: CertSigAlgoRSASHA2512v01,
|
||||
KeyAlgoDSA: CertAlgoDSAv01,
|
||||
KeyAlgoECDSA256: CertAlgoECDSA256v01,
|
||||
KeyAlgoECDSA384: CertAlgoECDSA384v01,
|
||||
KeyAlgoECDSA521: CertAlgoECDSA521v01,
|
||||
KeyAlgoSKECDSA256: CertAlgoSKECDSA256v01,
|
||||
KeyAlgoED25519: CertAlgoED25519v01,
|
||||
KeyAlgoSKED25519: CertAlgoSKED25519v01,
|
||||
// certKeyAlgoNames is a mapping from known certificate algorithm names to the
|
||||
// corresponding public key signature algorithm.
|
||||
var certKeyAlgoNames = map[string]string{
|
||||
CertAlgoRSAv01: KeyAlgoRSA,
|
||||
CertAlgoRSASHA256v01: KeyAlgoRSASHA256,
|
||||
CertAlgoRSASHA512v01: KeyAlgoRSASHA512,
|
||||
CertAlgoDSAv01: KeyAlgoDSA,
|
||||
CertAlgoECDSA256v01: KeyAlgoECDSA256,
|
||||
CertAlgoECDSA384v01: KeyAlgoECDSA384,
|
||||
CertAlgoECDSA521v01: KeyAlgoECDSA521,
|
||||
CertAlgoSKECDSA256v01: KeyAlgoSKECDSA256,
|
||||
CertAlgoED25519v01: KeyAlgoED25519,
|
||||
CertAlgoSKED25519v01: KeyAlgoSKED25519,
|
||||
}
|
||||
|
||||
// certToPrivAlgo returns the underlying algorithm for a certificate algorithm.
|
||||
// Panics if a non-certificate algorithm is passed.
|
||||
func certToPrivAlgo(algo string) string {
|
||||
for privAlgo, pubAlgo := range certAlgoNames {
|
||||
if pubAlgo == algo {
|
||||
return privAlgo
|
||||
// underlyingAlgo returns the signature algorithm associated with algo (which is
|
||||
// an advertised or negotiated public key or host key algorithm). These are
|
||||
// usually the same, except for certificate algorithms.
|
||||
func underlyingAlgo(algo string) string {
|
||||
if a, ok := certKeyAlgoNames[algo]; ok {
|
||||
return a
|
||||
}
|
||||
return algo
|
||||
}
|
||||
|
||||
// certificateAlgo returns the certificate algorithms that uses the provided
|
||||
// underlying signature algorithm.
|
||||
func certificateAlgo(algo string) (certAlgo string, ok bool) {
|
||||
for certName, algoName := range certKeyAlgoNames {
|
||||
if algoName == algo {
|
||||
return certName, true
|
||||
}
|
||||
}
|
||||
panic("unknown cert algorithm")
|
||||
return "", false
|
||||
}
|
||||
|
||||
func (cert *Certificate) bytesForSigning() []byte {
|
||||
@ -514,13 +535,13 @@ func (c *Certificate) Marshal() []byte {
|
||||
return result
|
||||
}
|
||||
|
||||
// Type returns the key name. It is part of the PublicKey interface.
|
||||
// Type returns the certificate algorithm name. It is part of the PublicKey interface.
|
||||
func (c *Certificate) Type() string {
|
||||
algo, ok := certAlgoNames[c.Key.Type()]
|
||||
certName, ok := certificateAlgo(c.Key.Type())
|
||||
if !ok {
|
||||
panic("unknown cert key type " + c.Key.Type())
|
||||
panic("unknown certificate type for key type " + c.Key.Type())
|
||||
}
|
||||
return algo
|
||||
return certName
|
||||
}
|
||||
|
||||
// Verify verifies a signature against the certificate's public
|
||||
|
25
vendor/golang.org/x/crypto/ssh/client.go
generated
vendored
25
vendor/golang.org/x/crypto/ssh/client.go
generated
vendored
@ -113,25 +113,16 @@ func (c *connection) clientHandshake(dialAddress string, config *ClientConfig) e
|
||||
return c.clientAuthenticate(config)
|
||||
}
|
||||
|
||||
// verifyHostKeySignature verifies the host key obtained in the key
|
||||
// exchange.
|
||||
// verifyHostKeySignature verifies the host key obtained in the key exchange.
|
||||
// algo is the negotiated algorithm, and may be a certificate type.
|
||||
func verifyHostKeySignature(hostKey PublicKey, algo string, result *kexResult) error {
|
||||
sig, rest, ok := parseSignatureBody(result.Signature)
|
||||
if len(rest) > 0 || !ok {
|
||||
return errors.New("ssh: signature parse error")
|
||||
}
|
||||
|
||||
// For keys, underlyingAlgo is exactly algo. For certificates,
|
||||
// we have to look up the underlying key algorithm that SSH
|
||||
// uses to evaluate signatures.
|
||||
underlyingAlgo := algo
|
||||
for sigAlgo, certAlgo := range certAlgoNames {
|
||||
if certAlgo == algo {
|
||||
underlyingAlgo = sigAlgo
|
||||
}
|
||||
}
|
||||
if sig.Format != underlyingAlgo {
|
||||
return fmt.Errorf("ssh: invalid signature algorithm %q, expected %q", sig.Format, underlyingAlgo)
|
||||
if a := underlyingAlgo(algo); sig.Format != a {
|
||||
return fmt.Errorf("ssh: invalid signature algorithm %q, expected %q", sig.Format, a)
|
||||
}
|
||||
|
||||
return hostKey.Verify(result.H, sig)
|
||||
@ -237,11 +228,11 @@ type ClientConfig struct {
|
||||
// be used for the connection. If empty, a reasonable default is used.
|
||||
ClientVersion string
|
||||
|
||||
// HostKeyAlgorithms lists the key types that the client will
|
||||
// accept from the server as host key, in order of
|
||||
// HostKeyAlgorithms lists the public key algorithms that the client will
|
||||
// accept from the server for host key authentication, in order of
|
||||
// preference. If empty, a reasonable default is used. Any
|
||||
// string returned from PublicKey.Type method may be used, or
|
||||
// any of the CertAlgoXxxx and KeyAlgoXxxx constants.
|
||||
// string returned from a PublicKey.Type method may be used, or
|
||||
// any of the CertAlgo and KeyAlgo constants.
|
||||
HostKeyAlgorithms []string
|
||||
|
||||
// Timeout is the maximum amount of time for the TCP connection to establish.
|
||||
|
132
vendor/golang.org/x/crypto/ssh/client_auth.go
generated
vendored
132
vendor/golang.org/x/crypto/ssh/client_auth.go
generated
vendored
@ -9,6 +9,7 @@ import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type authResult int
|
||||
@ -29,6 +30,33 @@ func (c *connection) clientAuthenticate(config *ClientConfig) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// The server may choose to send a SSH_MSG_EXT_INFO at this point (if we
|
||||
// advertised willingness to receive one, which we always do) or not. See
|
||||
// RFC 8308, Section 2.4.
|
||||
extensions := make(map[string][]byte)
|
||||
if len(packet) > 0 && packet[0] == msgExtInfo {
|
||||
var extInfo extInfoMsg
|
||||
if err := Unmarshal(packet, &extInfo); err != nil {
|
||||
return err
|
||||
}
|
||||
payload := extInfo.Payload
|
||||
for i := uint32(0); i < extInfo.NumExtensions; i++ {
|
||||
name, rest, ok := parseString(payload)
|
||||
if !ok {
|
||||
return parseError(msgExtInfo)
|
||||
}
|
||||
value, rest, ok := parseString(rest)
|
||||
if !ok {
|
||||
return parseError(msgExtInfo)
|
||||
}
|
||||
extensions[string(name)] = value
|
||||
payload = rest
|
||||
}
|
||||
packet, err = c.transport.readPacket()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
var serviceAccept serviceAcceptMsg
|
||||
if err := Unmarshal(packet, &serviceAccept); err != nil {
|
||||
return err
|
||||
@ -41,7 +69,7 @@ func (c *connection) clientAuthenticate(config *ClientConfig) error {
|
||||
|
||||
sessionID := c.transport.getSessionID()
|
||||
for auth := AuthMethod(new(noneAuth)); auth != nil; {
|
||||
ok, methods, err := auth.auth(sessionID, config.User, c.transport, config.Rand)
|
||||
ok, methods, err := auth.auth(sessionID, config.User, c.transport, config.Rand, extensions)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
@ -93,7 +121,7 @@ type AuthMethod interface {
|
||||
// If authentication is not successful, a []string of alternative
|
||||
// method names is returned. If the slice is nil, it will be ignored
|
||||
// and the previous set of possible methods will be reused.
|
||||
auth(session []byte, user string, p packetConn, rand io.Reader) (authResult, []string, error)
|
||||
auth(session []byte, user string, p packetConn, rand io.Reader, extensions map[string][]byte) (authResult, []string, error)
|
||||
|
||||
// method returns the RFC 4252 method name.
|
||||
method() string
|
||||
@ -102,7 +130,7 @@ type AuthMethod interface {
|
||||
// "none" authentication, RFC 4252 section 5.2.
|
||||
type noneAuth int
|
||||
|
||||
func (n *noneAuth) auth(session []byte, user string, c packetConn, rand io.Reader) (authResult, []string, error) {
|
||||
func (n *noneAuth) auth(session []byte, user string, c packetConn, rand io.Reader, _ map[string][]byte) (authResult, []string, error) {
|
||||
if err := c.writePacket(Marshal(&userAuthRequestMsg{
|
||||
User: user,
|
||||
Service: serviceSSH,
|
||||
@ -122,7 +150,7 @@ func (n *noneAuth) method() string {
|
||||
// a function call, e.g. by prompting the user.
|
||||
type passwordCallback func() (password string, err error)
|
||||
|
||||
func (cb passwordCallback) auth(session []byte, user string, c packetConn, rand io.Reader) (authResult, []string, error) {
|
||||
func (cb passwordCallback) auth(session []byte, user string, c packetConn, rand io.Reader, _ map[string][]byte) (authResult, []string, error) {
|
||||
type passwordAuthMsg struct {
|
||||
User string `sshtype:"50"`
|
||||
Service string
|
||||
@ -189,7 +217,46 @@ func (cb publicKeyCallback) method() string {
|
||||
return "publickey"
|
||||
}
|
||||
|
||||
func (cb publicKeyCallback) auth(session []byte, user string, c packetConn, rand io.Reader) (authResult, []string, error) {
|
||||
func pickSignatureAlgorithm(signer Signer, extensions map[string][]byte) (as AlgorithmSigner, algo string) {
|
||||
keyFormat := signer.PublicKey().Type()
|
||||
|
||||
// Like in sendKexInit, if the public key implements AlgorithmSigner we
|
||||
// assume it supports all algorithms, otherwise only the key format one.
|
||||
as, ok := signer.(AlgorithmSigner)
|
||||
if !ok {
|
||||
return algorithmSignerWrapper{signer}, keyFormat
|
||||
}
|
||||
|
||||
extPayload, ok := extensions["server-sig-algs"]
|
||||
if !ok {
|
||||
// If there is no "server-sig-algs" extension, fall back to the key
|
||||
// format algorithm.
|
||||
return as, keyFormat
|
||||
}
|
||||
|
||||
// The server-sig-algs extension only carries underlying signature
|
||||
// algorithm, but we are trying to select a protocol-level public key
|
||||
// algorithm, which might be a certificate type. Extend the list of server
|
||||
// supported algorithms to include the corresponding certificate algorithms.
|
||||
serverAlgos := strings.Split(string(extPayload), ",")
|
||||
for _, algo := range serverAlgos {
|
||||
if certAlgo, ok := certificateAlgo(algo); ok {
|
||||
serverAlgos = append(serverAlgos, certAlgo)
|
||||
}
|
||||
}
|
||||
|
||||
keyAlgos := algorithmsForKeyFormat(keyFormat)
|
||||
algo, err := findCommon("public key signature algorithm", keyAlgos, serverAlgos)
|
||||
if err != nil {
|
||||
// If there is no overlap, try the key anyway with the key format
|
||||
// algorithm, to support servers that fail to list all supported
|
||||
// algorithms.
|
||||
return as, keyFormat
|
||||
}
|
||||
return as, algo
|
||||
}
|
||||
|
||||
func (cb publicKeyCallback) auth(session []byte, user string, c packetConn, rand io.Reader, extensions map[string][]byte) (authResult, []string, error) {
|
||||
// Authentication is performed by sending an enquiry to test if a key is
|
||||
// acceptable to the remote. If the key is acceptable, the client will
|
||||
// attempt to authenticate with the valid key. If not the client will repeat
|
||||
@ -201,7 +268,10 @@ func (cb publicKeyCallback) auth(session []byte, user string, c packetConn, rand
|
||||
}
|
||||
var methods []string
|
||||
for _, signer := range signers {
|
||||
ok, err := validateKey(signer.PublicKey(), user, c)
|
||||
pub := signer.PublicKey()
|
||||
as, algo := pickSignatureAlgorithm(signer, extensions)
|
||||
|
||||
ok, err := validateKey(pub, algo, user, c)
|
||||
if err != nil {
|
||||
return authFailure, nil, err
|
||||
}
|
||||
@ -209,13 +279,13 @@ func (cb publicKeyCallback) auth(session []byte, user string, c packetConn, rand
|
||||
continue
|
||||
}
|
||||
|
||||
pub := signer.PublicKey()
|
||||
pubKey := pub.Marshal()
|
||||
sign, err := signer.Sign(rand, buildDataSignedForAuth(session, userAuthRequestMsg{
|
||||
data := buildDataSignedForAuth(session, userAuthRequestMsg{
|
||||
User: user,
|
||||
Service: serviceSSH,
|
||||
Method: cb.method(),
|
||||
}, []byte(pub.Type()), pubKey))
|
||||
}, algo, pubKey)
|
||||
sign, err := as.SignWithAlgorithm(rand, data, underlyingAlgo(algo))
|
||||
if err != nil {
|
||||
return authFailure, nil, err
|
||||
}
|
||||
@ -229,7 +299,7 @@ func (cb publicKeyCallback) auth(session []byte, user string, c packetConn, rand
|
||||
Service: serviceSSH,
|
||||
Method: cb.method(),
|
||||
HasSig: true,
|
||||
Algoname: pub.Type(),
|
||||
Algoname: algo,
|
||||
PubKey: pubKey,
|
||||
Sig: sig,
|
||||
}
|
||||
@ -266,26 +336,25 @@ func containsMethod(methods []string, method string) bool {
|
||||
}
|
||||
|
||||
// validateKey validates the key provided is acceptable to the server.
|
||||
func validateKey(key PublicKey, user string, c packetConn) (bool, error) {
|
||||
func validateKey(key PublicKey, algo string, user string, c packetConn) (bool, error) {
|
||||
pubKey := key.Marshal()
|
||||
msg := publickeyAuthMsg{
|
||||
User: user,
|
||||
Service: serviceSSH,
|
||||
Method: "publickey",
|
||||
HasSig: false,
|
||||
Algoname: key.Type(),
|
||||
Algoname: algo,
|
||||
PubKey: pubKey,
|
||||
}
|
||||
if err := c.writePacket(Marshal(&msg)); err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
return confirmKeyAck(key, c)
|
||||
return confirmKeyAck(key, algo, c)
|
||||
}
|
||||
|
||||
func confirmKeyAck(key PublicKey, c packetConn) (bool, error) {
|
||||
func confirmKeyAck(key PublicKey, algo string, c packetConn) (bool, error) {
|
||||
pubKey := key.Marshal()
|
||||
algoname := key.Type()
|
||||
|
||||
for {
|
||||
packet, err := c.readPacket()
|
||||
@ -302,14 +371,14 @@ func confirmKeyAck(key PublicKey, c packetConn) (bool, error) {
|
||||
if err := Unmarshal(packet, &msg); err != nil {
|
||||
return false, err
|
||||
}
|
||||
if msg.Algo != algoname || !bytes.Equal(msg.PubKey, pubKey) {
|
||||
if msg.Algo != algo || !bytes.Equal(msg.PubKey, pubKey) {
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
case msgUserAuthFailure:
|
||||
return false, nil
|
||||
default:
|
||||
return false, unexpectedMessageError(msgUserAuthSuccess, packet[0])
|
||||
return false, unexpectedMessageError(msgUserAuthPubKeyOk, packet[0])
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -330,6 +399,7 @@ func PublicKeysCallback(getSigners func() (signers []Signer, err error)) AuthMet
|
||||
// along with a list of remaining authentication methods to try next and
|
||||
// an error if an unexpected response was received.
|
||||
func handleAuthResponse(c packetConn) (authResult, []string, error) {
|
||||
gotMsgExtInfo := false
|
||||
for {
|
||||
packet, err := c.readPacket()
|
||||
if err != nil {
|
||||
@ -341,6 +411,12 @@ func handleAuthResponse(c packetConn) (authResult, []string, error) {
|
||||
if err := handleBannerResponse(c, packet); err != nil {
|
||||
return authFailure, nil, err
|
||||
}
|
||||
case msgExtInfo:
|
||||
// Ignore post-authentication RFC 8308 extensions, once.
|
||||
if gotMsgExtInfo {
|
||||
return authFailure, nil, unexpectedMessageError(msgUserAuthSuccess, packet[0])
|
||||
}
|
||||
gotMsgExtInfo = true
|
||||
case msgUserAuthFailure:
|
||||
var msg userAuthFailureMsg
|
||||
if err := Unmarshal(packet, &msg); err != nil {
|
||||
@ -380,10 +456,10 @@ func handleBannerResponse(c packetConn, packet []byte) error {
|
||||
// disabling echoing (e.g. for passwords), and return all the answers.
|
||||
// Challenge may be called multiple times in a single session. After
|
||||
// successful authentication, the server may send a challenge with no
|
||||
// questions, for which the user and instruction messages should be
|
||||
// questions, for which the name and instruction messages should be
|
||||
// printed. RFC 4256 section 3.3 details how the UI should behave for
|
||||
// both CLI and GUI environments.
|
||||
type KeyboardInteractiveChallenge func(user, instruction string, questions []string, echos []bool) (answers []string, err error)
|
||||
type KeyboardInteractiveChallenge func(name, instruction string, questions []string, echos []bool) (answers []string, err error)
|
||||
|
||||
// KeyboardInteractive returns an AuthMethod using a prompt/response
|
||||
// sequence controlled by the server.
|
||||
@ -395,7 +471,7 @@ func (cb KeyboardInteractiveChallenge) method() string {
|
||||
return "keyboard-interactive"
|
||||
}
|
||||
|
||||
func (cb KeyboardInteractiveChallenge) auth(session []byte, user string, c packetConn, rand io.Reader) (authResult, []string, error) {
|
||||
func (cb KeyboardInteractiveChallenge) auth(session []byte, user string, c packetConn, rand io.Reader, _ map[string][]byte) (authResult, []string, error) {
|
||||
type initiateMsg struct {
|
||||
User string `sshtype:"50"`
|
||||
Service string
|
||||
@ -412,6 +488,7 @@ func (cb KeyboardInteractiveChallenge) auth(session []byte, user string, c packe
|
||||
return authFailure, nil, err
|
||||
}
|
||||
|
||||
gotMsgExtInfo := false
|
||||
for {
|
||||
packet, err := c.readPacket()
|
||||
if err != nil {
|
||||
@ -425,6 +502,13 @@ func (cb KeyboardInteractiveChallenge) auth(session []byte, user string, c packe
|
||||
return authFailure, nil, err
|
||||
}
|
||||
continue
|
||||
case msgExtInfo:
|
||||
// Ignore post-authentication RFC 8308 extensions, once.
|
||||
if gotMsgExtInfo {
|
||||
return authFailure, nil, unexpectedMessageError(msgUserAuthInfoRequest, packet[0])
|
||||
}
|
||||
gotMsgExtInfo = true
|
||||
continue
|
||||
case msgUserAuthInfoRequest:
|
||||
// OK
|
||||
case msgUserAuthFailure:
|
||||
@ -465,7 +549,7 @@ func (cb KeyboardInteractiveChallenge) auth(session []byte, user string, c packe
|
||||
return authFailure, nil, errors.New("ssh: extra data following keyboard-interactive pairs")
|
||||
}
|
||||
|
||||
answers, err := cb(msg.User, msg.Instruction, prompts, echos)
|
||||
answers, err := cb(msg.Name, msg.Instruction, prompts, echos)
|
||||
if err != nil {
|
||||
return authFailure, nil, err
|
||||
}
|
||||
@ -497,9 +581,9 @@ type retryableAuthMethod struct {
|
||||
maxTries int
|
||||
}
|
||||
|
||||
func (r *retryableAuthMethod) auth(session []byte, user string, c packetConn, rand io.Reader) (ok authResult, methods []string, err error) {
|
||||
func (r *retryableAuthMethod) auth(session []byte, user string, c packetConn, rand io.Reader, extensions map[string][]byte) (ok authResult, methods []string, err error) {
|
||||
for i := 0; r.maxTries <= 0 || i < r.maxTries; i++ {
|
||||
ok, methods, err = r.authMethod.auth(session, user, c, rand)
|
||||
ok, methods, err = r.authMethod.auth(session, user, c, rand, extensions)
|
||||
if ok != authFailure || err != nil { // either success, partial success or error terminate
|
||||
return ok, methods, err
|
||||
}
|
||||
@ -542,7 +626,7 @@ type gssAPIWithMICCallback struct {
|
||||
target string
|
||||
}
|
||||
|
||||
func (g *gssAPIWithMICCallback) auth(session []byte, user string, c packetConn, rand io.Reader) (authResult, []string, error) {
|
||||
func (g *gssAPIWithMICCallback) auth(session []byte, user string, c packetConn, rand io.Reader, _ map[string][]byte) (authResult, []string, error) {
|
||||
m := &userAuthRequestMsg{
|
||||
User: user,
|
||||
Service: serviceSSH,
|
||||
|
86
vendor/golang.org/x/crypto/ssh/common.go
generated
vendored
86
vendor/golang.org/x/crypto/ssh/common.go
generated
vendored
@ -44,11 +44,11 @@ var preferredCiphers = []string{
|
||||
// supportedKexAlgos specifies the supported key-exchange algorithms in
|
||||
// preference order.
|
||||
var supportedKexAlgos = []string{
|
||||
kexAlgoCurve25519SHA256,
|
||||
kexAlgoCurve25519SHA256, kexAlgoCurve25519SHA256LibSSH,
|
||||
// P384 and P521 are not constant-time yet, but since we don't
|
||||
// reuse ephemeral keys, using them for ECDH should be OK.
|
||||
kexAlgoECDH256, kexAlgoECDH384, kexAlgoECDH521,
|
||||
kexAlgoDH14SHA1, kexAlgoDH1SHA1,
|
||||
kexAlgoDH14SHA256, kexAlgoDH14SHA1, kexAlgoDH1SHA1,
|
||||
}
|
||||
|
||||
// serverForbiddenKexAlgos contains key exchange algorithms, that are forbidden
|
||||
@ -61,21 +61,21 @@ var serverForbiddenKexAlgos = map[string]struct{}{
|
||||
// preferredKexAlgos specifies the default preference for key-exchange algorithms
|
||||
// in preference order.
|
||||
var preferredKexAlgos = []string{
|
||||
kexAlgoCurve25519SHA256,
|
||||
kexAlgoCurve25519SHA256, kexAlgoCurve25519SHA256LibSSH,
|
||||
kexAlgoECDH256, kexAlgoECDH384, kexAlgoECDH521,
|
||||
kexAlgoDH14SHA1,
|
||||
kexAlgoDH14SHA256, kexAlgoDH14SHA1,
|
||||
}
|
||||
|
||||
// supportedHostKeyAlgos specifies the supported host-key algorithms (i.e. methods
|
||||
// of authenticating servers) in preference order.
|
||||
var supportedHostKeyAlgos = []string{
|
||||
CertSigAlgoRSASHA2512v01, CertSigAlgoRSASHA2256v01,
|
||||
CertSigAlgoRSAv01, CertAlgoDSAv01, CertAlgoECDSA256v01,
|
||||
CertAlgoRSASHA512v01, CertAlgoRSASHA256v01,
|
||||
CertAlgoRSAv01, CertAlgoDSAv01, CertAlgoECDSA256v01,
|
||||
CertAlgoECDSA384v01, CertAlgoECDSA521v01, CertAlgoED25519v01,
|
||||
|
||||
KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521,
|
||||
SigAlgoRSASHA2512, SigAlgoRSASHA2256,
|
||||
SigAlgoRSA, KeyAlgoDSA,
|
||||
KeyAlgoRSASHA512, KeyAlgoRSASHA256,
|
||||
KeyAlgoRSA, KeyAlgoDSA,
|
||||
|
||||
KeyAlgoED25519,
|
||||
}
|
||||
@ -89,23 +89,33 @@ var supportedMACs = []string{
|
||||
|
||||
var supportedCompressions = []string{compressionNone}
|
||||
|
||||
// hashFuncs keeps the mapping of supported algorithms to their respective
|
||||
// hashes needed for signature verification.
|
||||
// hashFuncs keeps the mapping of supported signature algorithms to their
|
||||
// respective hashes needed for signing and verification.
|
||||
var hashFuncs = map[string]crypto.Hash{
|
||||
SigAlgoRSA: crypto.SHA1,
|
||||
SigAlgoRSASHA2256: crypto.SHA256,
|
||||
SigAlgoRSASHA2512: crypto.SHA512,
|
||||
KeyAlgoDSA: crypto.SHA1,
|
||||
KeyAlgoECDSA256: crypto.SHA256,
|
||||
KeyAlgoECDSA384: crypto.SHA384,
|
||||
KeyAlgoECDSA521: crypto.SHA512,
|
||||
CertSigAlgoRSAv01: crypto.SHA1,
|
||||
CertSigAlgoRSASHA2256v01: crypto.SHA256,
|
||||
CertSigAlgoRSASHA2512v01: crypto.SHA512,
|
||||
CertAlgoDSAv01: crypto.SHA1,
|
||||
CertAlgoECDSA256v01: crypto.SHA256,
|
||||
CertAlgoECDSA384v01: crypto.SHA384,
|
||||
CertAlgoECDSA521v01: crypto.SHA512,
|
||||
KeyAlgoRSA: crypto.SHA1,
|
||||
KeyAlgoRSASHA256: crypto.SHA256,
|
||||
KeyAlgoRSASHA512: crypto.SHA512,
|
||||
KeyAlgoDSA: crypto.SHA1,
|
||||
KeyAlgoECDSA256: crypto.SHA256,
|
||||
KeyAlgoECDSA384: crypto.SHA384,
|
||||
KeyAlgoECDSA521: crypto.SHA512,
|
||||
// KeyAlgoED25519 doesn't pre-hash.
|
||||
KeyAlgoSKECDSA256: crypto.SHA256,
|
||||
KeyAlgoSKED25519: crypto.SHA256,
|
||||
}
|
||||
|
||||
// algorithmsForKeyFormat returns the supported signature algorithms for a given
|
||||
// public key format (PublicKey.Type), in order of preference. See RFC 8332,
|
||||
// Section 2. See also the note in sendKexInit on backwards compatibility.
|
||||
func algorithmsForKeyFormat(keyFormat string) []string {
|
||||
switch keyFormat {
|
||||
case KeyAlgoRSA:
|
||||
return []string{KeyAlgoRSASHA256, KeyAlgoRSASHA512, KeyAlgoRSA}
|
||||
case CertAlgoRSAv01:
|
||||
return []string{CertAlgoRSASHA256v01, CertAlgoRSASHA512v01, CertAlgoRSAv01}
|
||||
default:
|
||||
return []string{keyFormat}
|
||||
}
|
||||
}
|
||||
|
||||
// unexpectedMessageError results when the SSH message that we received didn't
|
||||
@ -152,6 +162,11 @@ func (a *directionAlgorithms) rekeyBytes() int64 {
|
||||
return 1 << 30
|
||||
}
|
||||
|
||||
var aeadCiphers = map[string]bool{
|
||||
gcmCipherID: true,
|
||||
chacha20Poly1305ID: true,
|
||||
}
|
||||
|
||||
type algorithms struct {
|
||||
kex string
|
||||
hostKey string
|
||||
@ -187,14 +202,18 @@ func findAgreedAlgorithms(isClient bool, clientKexInit, serverKexInit *kexInitMs
|
||||
return
|
||||
}
|
||||
|
||||
ctos.MAC, err = findCommon("client to server MAC", clientKexInit.MACsClientServer, serverKexInit.MACsClientServer)
|
||||
if err != nil {
|
||||
return
|
||||
if !aeadCiphers[ctos.Cipher] {
|
||||
ctos.MAC, err = findCommon("client to server MAC", clientKexInit.MACsClientServer, serverKexInit.MACsClientServer)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
stoc.MAC, err = findCommon("server to client MAC", clientKexInit.MACsServerClient, serverKexInit.MACsServerClient)
|
||||
if err != nil {
|
||||
return
|
||||
if !aeadCiphers[stoc.Cipher] {
|
||||
stoc.MAC, err = findCommon("server to client MAC", clientKexInit.MACsServerClient, serverKexInit.MACsServerClient)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
ctos.Compression, err = findCommon("client to server compression", clientKexInit.CompressionClientServer, serverKexInit.CompressionClientServer)
|
||||
@ -278,8 +297,9 @@ func (c *Config) SetDefaults() {
|
||||
}
|
||||
|
||||
// buildDataSignedForAuth returns the data that is signed in order to prove
|
||||
// possession of a private key. See RFC 4252, section 7.
|
||||
func buildDataSignedForAuth(sessionID []byte, req userAuthRequestMsg, algo, pubKey []byte) []byte {
|
||||
// possession of a private key. See RFC 4252, section 7. algo is the advertised
|
||||
// algorithm, and may be a certificate type.
|
||||
func buildDataSignedForAuth(sessionID []byte, req userAuthRequestMsg, algo string, pubKey []byte) []byte {
|
||||
data := struct {
|
||||
Session []byte
|
||||
Type byte
|
||||
@ -287,7 +307,7 @@ func buildDataSignedForAuth(sessionID []byte, req userAuthRequestMsg, algo, pubK
|
||||
Service string
|
||||
Method string
|
||||
Sign bool
|
||||
Algo []byte
|
||||
Algo string
|
||||
PubKey []byte
|
||||
}{
|
||||
sessionID,
|
||||
|
98
vendor/golang.org/x/crypto/ssh/handshake.go
generated
vendored
98
vendor/golang.org/x/crypto/ssh/handshake.go
generated
vendored
@ -455,21 +455,36 @@ func (t *handshakeTransport) sendKexInit() error {
|
||||
}
|
||||
io.ReadFull(rand.Reader, msg.Cookie[:])
|
||||
|
||||
if len(t.hostKeys) > 0 {
|
||||
isServer := len(t.hostKeys) > 0
|
||||
if isServer {
|
||||
for _, k := range t.hostKeys {
|
||||
algo := k.PublicKey().Type()
|
||||
switch algo {
|
||||
case KeyAlgoRSA:
|
||||
msg.ServerHostKeyAlgos = append(msg.ServerHostKeyAlgos, []string{SigAlgoRSASHA2512, SigAlgoRSASHA2256, SigAlgoRSA}...)
|
||||
case CertAlgoRSAv01:
|
||||
msg.ServerHostKeyAlgos = append(msg.ServerHostKeyAlgos, []string{CertSigAlgoRSASHA2512v01, CertSigAlgoRSASHA2256v01, CertSigAlgoRSAv01}...)
|
||||
default:
|
||||
msg.ServerHostKeyAlgos = append(msg.ServerHostKeyAlgos, algo)
|
||||
// If k is an AlgorithmSigner, presume it supports all signature algorithms
|
||||
// associated with the key format. (Ideally AlgorithmSigner would have a
|
||||
// method to advertise supported algorithms, but it doesn't. This means that
|
||||
// adding support for a new algorithm is a breaking change, as we will
|
||||
// immediately negotiate it even if existing implementations don't support
|
||||
// it. If that ever happens, we'll have to figure something out.)
|
||||
// If k is not an AlgorithmSigner, we can only assume it only supports the
|
||||
// algorithms that matches the key format. (This means that Sign can't pick
|
||||
// a different default.)
|
||||
keyFormat := k.PublicKey().Type()
|
||||
if _, ok := k.(AlgorithmSigner); ok {
|
||||
msg.ServerHostKeyAlgos = append(msg.ServerHostKeyAlgos, algorithmsForKeyFormat(keyFormat)...)
|
||||
} else {
|
||||
msg.ServerHostKeyAlgos = append(msg.ServerHostKeyAlgos, keyFormat)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
msg.ServerHostKeyAlgos = t.hostKeyAlgorithms
|
||||
|
||||
// As a client we opt in to receiving SSH_MSG_EXT_INFO so we know what
|
||||
// algorithms the server supports for public key authentication. See RFC
|
||||
// 8303, Section 2.1.
|
||||
msg.KexAlgos = make([]string, 0, len(t.config.KeyExchanges)+1)
|
||||
msg.KexAlgos = append(msg.KexAlgos, t.config.KeyExchanges...)
|
||||
msg.KexAlgos = append(msg.KexAlgos, "ext-info-c")
|
||||
}
|
||||
|
||||
packet := Marshal(msg)
|
||||
|
||||
// writePacket destroys the contents, so save a copy.
|
||||
@ -589,9 +604,9 @@ func (t *handshakeTransport) enterKeyExchange(otherInitPacket []byte) error {
|
||||
|
||||
var result *kexResult
|
||||
if len(t.hostKeys) > 0 {
|
||||
result, err = t.server(kex, t.algorithms, &magics)
|
||||
result, err = t.server(kex, &magics)
|
||||
} else {
|
||||
result, err = t.client(kex, t.algorithms, &magics)
|
||||
result, err = t.client(kex, &magics)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
@ -618,33 +633,52 @@ func (t *handshakeTransport) enterKeyExchange(otherInitPacket []byte) error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (t *handshakeTransport) server(kex kexAlgorithm, algs *algorithms, magics *handshakeMagics) (*kexResult, error) {
|
||||
var hostKey Signer
|
||||
for _, k := range t.hostKeys {
|
||||
kt := k.PublicKey().Type()
|
||||
if kt == algs.hostKey {
|
||||
hostKey = k
|
||||
} else if signer, ok := k.(AlgorithmSigner); ok {
|
||||
// Some signature algorithms don't show up as key types
|
||||
// so we have to manually check for a compatible host key.
|
||||
switch kt {
|
||||
case KeyAlgoRSA:
|
||||
if algs.hostKey == SigAlgoRSASHA2256 || algs.hostKey == SigAlgoRSASHA2512 {
|
||||
hostKey = &rsaSigner{signer, algs.hostKey}
|
||||
}
|
||||
case CertAlgoRSAv01:
|
||||
if algs.hostKey == CertSigAlgoRSASHA2256v01 || algs.hostKey == CertSigAlgoRSASHA2512v01 {
|
||||
hostKey = &rsaSigner{signer, certToPrivAlgo(algs.hostKey)}
|
||||
}
|
||||
// algorithmSignerWrapper is an AlgorithmSigner that only supports the default
|
||||
// key format algorithm.
|
||||
//
|
||||
// This is technically a violation of the AlgorithmSigner interface, but it
|
||||
// should be unreachable given where we use this. Anyway, at least it returns an
|
||||
// error instead of panicing or producing an incorrect signature.
|
||||
type algorithmSignerWrapper struct {
|
||||
Signer
|
||||
}
|
||||
|
||||
func (a algorithmSignerWrapper) SignWithAlgorithm(rand io.Reader, data []byte, algorithm string) (*Signature, error) {
|
||||
if algorithm != underlyingAlgo(a.PublicKey().Type()) {
|
||||
return nil, errors.New("ssh: internal error: algorithmSignerWrapper invoked with non-default algorithm")
|
||||
}
|
||||
return a.Sign(rand, data)
|
||||
}
|
||||
|
||||
func pickHostKey(hostKeys []Signer, algo string) AlgorithmSigner {
|
||||
for _, k := range hostKeys {
|
||||
if algo == k.PublicKey().Type() {
|
||||
return algorithmSignerWrapper{k}
|
||||
}
|
||||
k, ok := k.(AlgorithmSigner)
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
for _, a := range algorithmsForKeyFormat(k.PublicKey().Type()) {
|
||||
if algo == a {
|
||||
return k
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
r, err := kex.Server(t.conn, t.config.Rand, magics, hostKey)
|
||||
func (t *handshakeTransport) server(kex kexAlgorithm, magics *handshakeMagics) (*kexResult, error) {
|
||||
hostKey := pickHostKey(t.hostKeys, t.algorithms.hostKey)
|
||||
if hostKey == nil {
|
||||
return nil, errors.New("ssh: internal error: negotiated unsupported signature type")
|
||||
}
|
||||
|
||||
r, err := kex.Server(t.conn, t.config.Rand, magics, hostKey, t.algorithms.hostKey)
|
||||
return r, err
|
||||
}
|
||||
|
||||
func (t *handshakeTransport) client(kex kexAlgorithm, algs *algorithms, magics *handshakeMagics) (*kexResult, error) {
|
||||
func (t *handshakeTransport) client(kex kexAlgorithm, magics *handshakeMagics) (*kexResult, error) {
|
||||
result, err := kex.Client(t.conn, t.config.Rand, magics)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -655,7 +689,7 @@ func (t *handshakeTransport) client(kex kexAlgorithm, algs *algorithms, magics *
|
||||
return nil, err
|
||||
}
|
||||
|
||||
if err := verifyHostKeySignature(hostKey, algs.hostKey, result); err != nil {
|
||||
if err := verifyHostKeySignature(hostKey, t.algorithms.hostKey, result); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
|
186
vendor/golang.org/x/crypto/ssh/kex.go
generated
vendored
186
vendor/golang.org/x/crypto/ssh/kex.go
generated
vendored
@ -20,12 +20,14 @@ import (
|
||||
)
|
||||
|
||||
const (
|
||||
kexAlgoDH1SHA1 = "diffie-hellman-group1-sha1"
|
||||
kexAlgoDH14SHA1 = "diffie-hellman-group14-sha1"
|
||||
kexAlgoECDH256 = "ecdh-sha2-nistp256"
|
||||
kexAlgoECDH384 = "ecdh-sha2-nistp384"
|
||||
kexAlgoECDH521 = "ecdh-sha2-nistp521"
|
||||
kexAlgoCurve25519SHA256 = "curve25519-sha256@libssh.org"
|
||||
kexAlgoDH1SHA1 = "diffie-hellman-group1-sha1"
|
||||
kexAlgoDH14SHA1 = "diffie-hellman-group14-sha1"
|
||||
kexAlgoDH14SHA256 = "diffie-hellman-group14-sha256"
|
||||
kexAlgoECDH256 = "ecdh-sha2-nistp256"
|
||||
kexAlgoECDH384 = "ecdh-sha2-nistp384"
|
||||
kexAlgoECDH521 = "ecdh-sha2-nistp521"
|
||||
kexAlgoCurve25519SHA256LibSSH = "curve25519-sha256@libssh.org"
|
||||
kexAlgoCurve25519SHA256 = "curve25519-sha256"
|
||||
|
||||
// For the following kex only the client half contains a production
|
||||
// ready implementation. The server half only consists of a minimal
|
||||
@ -75,8 +77,9 @@ func (m *handshakeMagics) write(w io.Writer) {
|
||||
// kexAlgorithm abstracts different key exchange algorithms.
|
||||
type kexAlgorithm interface {
|
||||
// Server runs server-side key agreement, signing the result
|
||||
// with a hostkey.
|
||||
Server(p packetConn, rand io.Reader, magics *handshakeMagics, s Signer) (*kexResult, error)
|
||||
// with a hostkey. algo is the negotiated algorithm, and may
|
||||
// be a certificate type.
|
||||
Server(p packetConn, rand io.Reader, magics *handshakeMagics, s AlgorithmSigner, algo string) (*kexResult, error)
|
||||
|
||||
// Client runs the client-side key agreement. Caller is
|
||||
// responsible for verifying the host key signature.
|
||||
@ -86,6 +89,7 @@ type kexAlgorithm interface {
|
||||
// dhGroup is a multiplicative group suitable for implementing Diffie-Hellman key agreement.
|
||||
type dhGroup struct {
|
||||
g, p, pMinus1 *big.Int
|
||||
hashFunc crypto.Hash
|
||||
}
|
||||
|
||||
func (group *dhGroup) diffieHellman(theirPublic, myPrivate *big.Int) (*big.Int, error) {
|
||||
@ -96,8 +100,6 @@ func (group *dhGroup) diffieHellman(theirPublic, myPrivate *big.Int) (*big.Int,
|
||||
}
|
||||
|
||||
func (group *dhGroup) Client(c packetConn, randSource io.Reader, magics *handshakeMagics) (*kexResult, error) {
|
||||
hashFunc := crypto.SHA1
|
||||
|
||||
var x *big.Int
|
||||
for {
|
||||
var err error
|
||||
@ -132,7 +134,7 @@ func (group *dhGroup) Client(c packetConn, randSource io.Reader, magics *handsha
|
||||
return nil, err
|
||||
}
|
||||
|
||||
h := hashFunc.New()
|
||||
h := group.hashFunc.New()
|
||||
magics.write(h)
|
||||
writeString(h, kexDHReply.HostKey)
|
||||
writeInt(h, X)
|
||||
@ -146,12 +148,11 @@ func (group *dhGroup) Client(c packetConn, randSource io.Reader, magics *handsha
|
||||
K: K,
|
||||
HostKey: kexDHReply.HostKey,
|
||||
Signature: kexDHReply.Signature,
|
||||
Hash: crypto.SHA1,
|
||||
Hash: group.hashFunc,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (group *dhGroup) Server(c packetConn, randSource io.Reader, magics *handshakeMagics, priv Signer) (result *kexResult, err error) {
|
||||
hashFunc := crypto.SHA1
|
||||
func (group *dhGroup) Server(c packetConn, randSource io.Reader, magics *handshakeMagics, priv AlgorithmSigner, algo string) (result *kexResult, err error) {
|
||||
packet, err := c.readPacket()
|
||||
if err != nil {
|
||||
return
|
||||
@ -179,7 +180,7 @@ func (group *dhGroup) Server(c packetConn, randSource io.Reader, magics *handsha
|
||||
|
||||
hostKeyBytes := priv.PublicKey().Marshal()
|
||||
|
||||
h := hashFunc.New()
|
||||
h := group.hashFunc.New()
|
||||
magics.write(h)
|
||||
writeString(h, hostKeyBytes)
|
||||
writeInt(h, kexDHInit.X)
|
||||
@ -193,7 +194,7 @@ func (group *dhGroup) Server(c packetConn, randSource io.Reader, magics *handsha
|
||||
|
||||
// H is already a hash, but the hostkey signing will apply its
|
||||
// own key-specific hash algorithm.
|
||||
sig, err := signAndMarshal(priv, randSource, H)
|
||||
sig, err := signAndMarshal(priv, randSource, H, algo)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -211,7 +212,7 @@ func (group *dhGroup) Server(c packetConn, randSource io.Reader, magics *handsha
|
||||
K: K,
|
||||
HostKey: hostKeyBytes,
|
||||
Signature: sig,
|
||||
Hash: crypto.SHA1,
|
||||
Hash: group.hashFunc,
|
||||
}, err
|
||||
}
|
||||
|
||||
@ -314,7 +315,7 @@ func validateECPublicKey(curve elliptic.Curve, x, y *big.Int) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
func (kex *ecdh) Server(c packetConn, rand io.Reader, magics *handshakeMagics, priv Signer) (result *kexResult, err error) {
|
||||
func (kex *ecdh) Server(c packetConn, rand io.Reader, magics *handshakeMagics, priv AlgorithmSigner, algo string) (result *kexResult, err error) {
|
||||
packet, err := c.readPacket()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@ -359,7 +360,7 @@ func (kex *ecdh) Server(c packetConn, rand io.Reader, magics *handshakeMagics, p
|
||||
|
||||
// H is already a hash, but the hostkey signing will apply its
|
||||
// own key-specific hash algorithm.
|
||||
sig, err := signAndMarshal(priv, rand, H)
|
||||
sig, err := signAndMarshal(priv, rand, H, algo)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -384,39 +385,62 @@ func (kex *ecdh) Server(c packetConn, rand io.Reader, magics *handshakeMagics, p
|
||||
}, nil
|
||||
}
|
||||
|
||||
// ecHash returns the hash to match the given elliptic curve, see RFC
|
||||
// 5656, section 6.2.1
|
||||
func ecHash(curve elliptic.Curve) crypto.Hash {
|
||||
bitSize := curve.Params().BitSize
|
||||
switch {
|
||||
case bitSize <= 256:
|
||||
return crypto.SHA256
|
||||
case bitSize <= 384:
|
||||
return crypto.SHA384
|
||||
}
|
||||
return crypto.SHA512
|
||||
}
|
||||
|
||||
var kexAlgoMap = map[string]kexAlgorithm{}
|
||||
|
||||
func init() {
|
||||
// This is the group called diffie-hellman-group1-sha1 in RFC
|
||||
// 4253 and Oakley Group 2 in RFC 2409.
|
||||
// This is the group called diffie-hellman-group1-sha1 in
|
||||
// RFC 4253 and Oakley Group 2 in RFC 2409.
|
||||
p, _ := new(big.Int).SetString("FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE65381FFFFFFFFFFFFFFFF", 16)
|
||||
kexAlgoMap[kexAlgoDH1SHA1] = &dhGroup{
|
||||
g: new(big.Int).SetInt64(2),
|
||||
p: p,
|
||||
pMinus1: new(big.Int).Sub(p, bigOne),
|
||||
hashFunc: crypto.SHA1,
|
||||
}
|
||||
|
||||
// This are the groups called diffie-hellman-group14-sha1 and
|
||||
// diffie-hellman-group14-sha256 in RFC 4253 and RFC 8268,
|
||||
// and Oakley Group 14 in RFC 3526.
|
||||
p, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AACAA68FFFFFFFFFFFFFFFF", 16)
|
||||
group14 := &dhGroup{
|
||||
g: new(big.Int).SetInt64(2),
|
||||
p: p,
|
||||
pMinus1: new(big.Int).Sub(p, bigOne),
|
||||
}
|
||||
|
||||
// This is the group called diffie-hellman-group14-sha1 in RFC
|
||||
// 4253 and Oakley Group 14 in RFC 3526.
|
||||
p, _ = new(big.Int).SetString("FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AACAA68FFFFFFFFFFFFFFFF", 16)
|
||||
|
||||
kexAlgoMap[kexAlgoDH14SHA1] = &dhGroup{
|
||||
g: new(big.Int).SetInt64(2),
|
||||
p: p,
|
||||
pMinus1: new(big.Int).Sub(p, bigOne),
|
||||
g: group14.g, p: group14.p, pMinus1: group14.pMinus1,
|
||||
hashFunc: crypto.SHA1,
|
||||
}
|
||||
kexAlgoMap[kexAlgoDH14SHA256] = &dhGroup{
|
||||
g: group14.g, p: group14.p, pMinus1: group14.pMinus1,
|
||||
hashFunc: crypto.SHA256,
|
||||
}
|
||||
|
||||
kexAlgoMap[kexAlgoECDH521] = &ecdh{elliptic.P521()}
|
||||
kexAlgoMap[kexAlgoECDH384] = &ecdh{elliptic.P384()}
|
||||
kexAlgoMap[kexAlgoECDH256] = &ecdh{elliptic.P256()}
|
||||
kexAlgoMap[kexAlgoCurve25519SHA256] = &curve25519sha256{}
|
||||
kexAlgoMap[kexAlgoCurve25519SHA256LibSSH] = &curve25519sha256{}
|
||||
kexAlgoMap[kexAlgoDHGEXSHA1] = &dhGEXSHA{hashFunc: crypto.SHA1}
|
||||
kexAlgoMap[kexAlgoDHGEXSHA256] = &dhGEXSHA{hashFunc: crypto.SHA256}
|
||||
}
|
||||
|
||||
// curve25519sha256 implements the curve25519-sha256@libssh.org key
|
||||
// agreement protocol, as described in
|
||||
// https://git.libssh.org/projects/libssh.git/tree/doc/curve25519-sha256@libssh.org.txt
|
||||
// curve25519sha256 implements the curve25519-sha256 (formerly known as
|
||||
// curve25519-sha256@libssh.org) key exchange method, as described in RFC 8731.
|
||||
type curve25519sha256 struct{}
|
||||
|
||||
type curve25519KeyPair struct {
|
||||
@ -486,7 +510,7 @@ func (kex *curve25519sha256) Client(c packetConn, rand io.Reader, magics *handsh
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (kex *curve25519sha256) Server(c packetConn, rand io.Reader, magics *handshakeMagics, priv Signer) (result *kexResult, err error) {
|
||||
func (kex *curve25519sha256) Server(c packetConn, rand io.Reader, magics *handshakeMagics, priv AlgorithmSigner, algo string) (result *kexResult, err error) {
|
||||
packet, err := c.readPacket()
|
||||
if err != nil {
|
||||
return
|
||||
@ -527,7 +551,7 @@ func (kex *curve25519sha256) Server(c packetConn, rand io.Reader, magics *handsh
|
||||
|
||||
H := h.Sum(nil)
|
||||
|
||||
sig, err := signAndMarshal(priv, rand, H)
|
||||
sig, err := signAndMarshal(priv, rand, H, algo)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -553,7 +577,6 @@ func (kex *curve25519sha256) Server(c packetConn, rand io.Reader, magics *handsh
|
||||
// diffie-hellman-group-exchange-sha256 key agreement protocols,
|
||||
// as described in RFC 4419
|
||||
type dhGEXSHA struct {
|
||||
g, p *big.Int
|
||||
hashFunc crypto.Hash
|
||||
}
|
||||
|
||||
@ -563,14 +586,7 @@ const (
|
||||
dhGroupExchangeMaximumBits = 8192
|
||||
)
|
||||
|
||||
func (gex *dhGEXSHA) diffieHellman(theirPublic, myPrivate *big.Int) (*big.Int, error) {
|
||||
if theirPublic.Sign() <= 0 || theirPublic.Cmp(gex.p) >= 0 {
|
||||
return nil, fmt.Errorf("ssh: DH parameter out of bounds")
|
||||
}
|
||||
return new(big.Int).Exp(theirPublic, myPrivate, gex.p), nil
|
||||
}
|
||||
|
||||
func (gex dhGEXSHA) Client(c packetConn, randSource io.Reader, magics *handshakeMagics) (*kexResult, error) {
|
||||
func (gex *dhGEXSHA) Client(c packetConn, randSource io.Reader, magics *handshakeMagics) (*kexResult, error) {
|
||||
// Send GexRequest
|
||||
kexDHGexRequest := kexDHGexRequestMsg{
|
||||
MinBits: dhGroupExchangeMinimumBits,
|
||||
@ -587,35 +603,29 @@ func (gex dhGEXSHA) Client(c packetConn, randSource io.Reader, magics *handshake
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var kexDHGexGroup kexDHGexGroupMsg
|
||||
if err = Unmarshal(packet, &kexDHGexGroup); err != nil {
|
||||
var msg kexDHGexGroupMsg
|
||||
if err = Unmarshal(packet, &msg); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// reject if p's bit length < dhGroupExchangeMinimumBits or > dhGroupExchangeMaximumBits
|
||||
if kexDHGexGroup.P.BitLen() < dhGroupExchangeMinimumBits || kexDHGexGroup.P.BitLen() > dhGroupExchangeMaximumBits {
|
||||
return nil, fmt.Errorf("ssh: server-generated gex p is out of range (%d bits)", kexDHGexGroup.P.BitLen())
|
||||
if msg.P.BitLen() < dhGroupExchangeMinimumBits || msg.P.BitLen() > dhGroupExchangeMaximumBits {
|
||||
return nil, fmt.Errorf("ssh: server-generated gex p is out of range (%d bits)", msg.P.BitLen())
|
||||
}
|
||||
|
||||
gex.p = kexDHGexGroup.P
|
||||
gex.g = kexDHGexGroup.G
|
||||
|
||||
// Check if g is safe by verifing that g > 1 and g < p - 1
|
||||
one := big.NewInt(1)
|
||||
var pMinusOne = &big.Int{}
|
||||
pMinusOne.Sub(gex.p, one)
|
||||
if gex.g.Cmp(one) != 1 && gex.g.Cmp(pMinusOne) != -1 {
|
||||
// Check if g is safe by verifying that 1 < g < p-1
|
||||
pMinusOne := new(big.Int).Sub(msg.P, bigOne)
|
||||
if msg.G.Cmp(bigOne) <= 0 || msg.G.Cmp(pMinusOne) >= 0 {
|
||||
return nil, fmt.Errorf("ssh: server provided gex g is not safe")
|
||||
}
|
||||
|
||||
// Send GexInit
|
||||
var pHalf = &big.Int{}
|
||||
pHalf.Rsh(gex.p, 1)
|
||||
pHalf := new(big.Int).Rsh(msg.P, 1)
|
||||
x, err := rand.Int(randSource, pHalf)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
X := new(big.Int).Exp(gex.g, x, gex.p)
|
||||
X := new(big.Int).Exp(msg.G, x, msg.P)
|
||||
kexDHGexInit := kexDHGexInitMsg{
|
||||
X: X,
|
||||
}
|
||||
@ -634,13 +644,13 @@ func (gex dhGEXSHA) Client(c packetConn, randSource io.Reader, magics *handshake
|
||||
return nil, err
|
||||
}
|
||||
|
||||
kInt, err := gex.diffieHellman(kexDHGexReply.Y, x)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
if kexDHGexReply.Y.Cmp(bigOne) <= 0 || kexDHGexReply.Y.Cmp(pMinusOne) >= 0 {
|
||||
return nil, errors.New("ssh: DH parameter out of bounds")
|
||||
}
|
||||
kInt := new(big.Int).Exp(kexDHGexReply.Y, x, msg.P)
|
||||
|
||||
// Check if k is safe by verifing that k > 1 and k < p - 1
|
||||
if kInt.Cmp(one) != 1 && kInt.Cmp(pMinusOne) != -1 {
|
||||
// Check if k is safe by verifying that k > 1 and k < p - 1
|
||||
if kInt.Cmp(bigOne) <= 0 || kInt.Cmp(pMinusOne) >= 0 {
|
||||
return nil, fmt.Errorf("ssh: derived k is not safe")
|
||||
}
|
||||
|
||||
@ -650,8 +660,8 @@ func (gex dhGEXSHA) Client(c packetConn, randSource io.Reader, magics *handshake
|
||||
binary.Write(h, binary.BigEndian, uint32(dhGroupExchangeMinimumBits))
|
||||
binary.Write(h, binary.BigEndian, uint32(dhGroupExchangePreferredBits))
|
||||
binary.Write(h, binary.BigEndian, uint32(dhGroupExchangeMaximumBits))
|
||||
writeInt(h, gex.p)
|
||||
writeInt(h, gex.g)
|
||||
writeInt(h, msg.P)
|
||||
writeInt(h, msg.G)
|
||||
writeInt(h, X)
|
||||
writeInt(h, kexDHGexReply.Y)
|
||||
K := make([]byte, intLength(kInt))
|
||||
@ -670,7 +680,7 @@ func (gex dhGEXSHA) Client(c packetConn, randSource io.Reader, magics *handshake
|
||||
// Server half implementation of the Diffie Hellman Key Exchange with SHA1 and SHA256.
|
||||
//
|
||||
// This is a minimal implementation to satisfy the automated tests.
|
||||
func (gex dhGEXSHA) Server(c packetConn, randSource io.Reader, magics *handshakeMagics, priv Signer) (result *kexResult, err error) {
|
||||
func (gex dhGEXSHA) Server(c packetConn, randSource io.Reader, magics *handshakeMagics, priv AlgorithmSigner, algo string) (result *kexResult, err error) {
|
||||
// Receive GexRequest
|
||||
packet, err := c.readPacket()
|
||||
if err != nil {
|
||||
@ -681,35 +691,17 @@ func (gex dhGEXSHA) Server(c packetConn, randSource io.Reader, magics *handshake
|
||||
return
|
||||
}
|
||||
|
||||
// smoosh the user's preferred size into our own limits
|
||||
if kexDHGexRequest.PreferedBits > dhGroupExchangeMaximumBits {
|
||||
kexDHGexRequest.PreferedBits = dhGroupExchangeMaximumBits
|
||||
}
|
||||
if kexDHGexRequest.PreferedBits < dhGroupExchangeMinimumBits {
|
||||
kexDHGexRequest.PreferedBits = dhGroupExchangeMinimumBits
|
||||
}
|
||||
// fix min/max if they're inconsistent. technically, we could just pout
|
||||
// and hang up, but there's no harm in giving them the benefit of the
|
||||
// doubt and just picking a bitsize for them.
|
||||
if kexDHGexRequest.MinBits > kexDHGexRequest.PreferedBits {
|
||||
kexDHGexRequest.MinBits = kexDHGexRequest.PreferedBits
|
||||
}
|
||||
if kexDHGexRequest.MaxBits < kexDHGexRequest.PreferedBits {
|
||||
kexDHGexRequest.MaxBits = kexDHGexRequest.PreferedBits
|
||||
}
|
||||
|
||||
// Send GexGroup
|
||||
// This is the group called diffie-hellman-group14-sha1 in RFC
|
||||
// 4253 and Oakley Group 14 in RFC 3526.
|
||||
p, _ := new(big.Int).SetString("FFFFFFFFFFFFFFFFC90FDAA22168C234C4C6628B80DC1CD129024E088A67CC74020BBEA63B139B22514A08798E3404DDEF9519B3CD3A431B302B0A6DF25F14374FE1356D6D51C245E485B576625E7EC6F44C42E9A637ED6B0BFF5CB6F406B7EDEE386BFB5A899FA5AE9F24117C4B1FE649286651ECE45B3DC2007CB8A163BF0598DA48361C55D39A69163FA8FD24CF5F83655D23DCA3AD961C62F356208552BB9ED529077096966D670C354E4ABC9804F1746C08CA18217C32905E462E36CE3BE39E772C180E86039B2783A2EC07A28FB5C55DF06F4C52C9DE2BCBF6955817183995497CEA956AE515D2261898FA051015728E5A8AACAA68FFFFFFFFFFFFFFFF", 16)
|
||||
gex.p = p
|
||||
gex.g = big.NewInt(2)
|
||||
g := big.NewInt(2)
|
||||
|
||||
kexDHGexGroup := kexDHGexGroupMsg{
|
||||
P: gex.p,
|
||||
G: gex.g,
|
||||
msg := &kexDHGexGroupMsg{
|
||||
P: p,
|
||||
G: g,
|
||||
}
|
||||
if err := c.writePacket(Marshal(&kexDHGexGroup)); err != nil {
|
||||
if err := c.writePacket(Marshal(msg)); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
@ -723,19 +715,19 @@ func (gex dhGEXSHA) Server(c packetConn, randSource io.Reader, magics *handshake
|
||||
return
|
||||
}
|
||||
|
||||
var pHalf = &big.Int{}
|
||||
pHalf.Rsh(gex.p, 1)
|
||||
pHalf := new(big.Int).Rsh(p, 1)
|
||||
|
||||
y, err := rand.Int(randSource, pHalf)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
Y := new(big.Int).Exp(g, y, p)
|
||||
|
||||
Y := new(big.Int).Exp(gex.g, y, gex.p)
|
||||
kInt, err := gex.diffieHellman(kexDHGexInit.X, y)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
pMinusOne := new(big.Int).Sub(p, bigOne)
|
||||
if kexDHGexInit.X.Cmp(bigOne) <= 0 || kexDHGexInit.X.Cmp(pMinusOne) >= 0 {
|
||||
return nil, errors.New("ssh: DH parameter out of bounds")
|
||||
}
|
||||
kInt := new(big.Int).Exp(kexDHGexInit.X, y, p)
|
||||
|
||||
hostKeyBytes := priv.PublicKey().Marshal()
|
||||
|
||||
@ -745,8 +737,8 @@ func (gex dhGEXSHA) Server(c packetConn, randSource io.Reader, magics *handshake
|
||||
binary.Write(h, binary.BigEndian, uint32(dhGroupExchangeMinimumBits))
|
||||
binary.Write(h, binary.BigEndian, uint32(dhGroupExchangePreferredBits))
|
||||
binary.Write(h, binary.BigEndian, uint32(dhGroupExchangeMaximumBits))
|
||||
writeInt(h, gex.p)
|
||||
writeInt(h, gex.g)
|
||||
writeInt(h, p)
|
||||
writeInt(h, g)
|
||||
writeInt(h, kexDHGexInit.X)
|
||||
writeInt(h, Y)
|
||||
|
||||
@ -758,7 +750,7 @@ func (gex dhGEXSHA) Server(c packetConn, randSource io.Reader, magics *handshake
|
||||
|
||||
// H is already a hash, but the hostkey signing will apply its
|
||||
// own key-specific hash algorithm.
|
||||
sig, err := signAndMarshal(priv, randSource, H)
|
||||
sig, err := signAndMarshal(priv, randSource, H, algo)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
158
vendor/golang.org/x/crypto/ssh/keys.go
generated
vendored
158
vendor/golang.org/x/crypto/ssh/keys.go
generated
vendored
@ -30,8 +30,9 @@ import (
|
||||
"golang.org/x/crypto/ssh/internal/bcrypt_pbkdf"
|
||||
)
|
||||
|
||||
// These constants represent the algorithm names for key types supported by this
|
||||
// package.
|
||||
// Public key algorithms names. These values can appear in PublicKey.Type,
|
||||
// ClientConfig.HostKeyAlgorithms, Signature.Format, or as AlgorithmSigner
|
||||
// arguments.
|
||||
const (
|
||||
KeyAlgoRSA = "ssh-rsa"
|
||||
KeyAlgoDSA = "ssh-dss"
|
||||
@ -41,16 +42,21 @@ const (
|
||||
KeyAlgoECDSA521 = "ecdsa-sha2-nistp521"
|
||||
KeyAlgoED25519 = "ssh-ed25519"
|
||||
KeyAlgoSKED25519 = "sk-ssh-ed25519@openssh.com"
|
||||
|
||||
// KeyAlgoRSASHA256 and KeyAlgoRSASHA512 are only public key algorithms, not
|
||||
// public key formats, so they can't appear as a PublicKey.Type. The
|
||||
// corresponding PublicKey.Type is KeyAlgoRSA. See RFC 8332, Section 2.
|
||||
KeyAlgoRSASHA256 = "rsa-sha2-256"
|
||||
KeyAlgoRSASHA512 = "rsa-sha2-512"
|
||||
)
|
||||
|
||||
// These constants represent non-default signature algorithms that are supported
|
||||
// as algorithm parameters to AlgorithmSigner.SignWithAlgorithm methods. See
|
||||
// [PROTOCOL.agent] section 4.5.1 and
|
||||
// https://tools.ietf.org/html/draft-ietf-curdle-rsa-sha2-10
|
||||
const (
|
||||
SigAlgoRSA = "ssh-rsa"
|
||||
SigAlgoRSASHA2256 = "rsa-sha2-256"
|
||||
SigAlgoRSASHA2512 = "rsa-sha2-512"
|
||||
// Deprecated: use KeyAlgoRSA.
|
||||
SigAlgoRSA = KeyAlgoRSA
|
||||
// Deprecated: use KeyAlgoRSASHA256.
|
||||
SigAlgoRSASHA2256 = KeyAlgoRSASHA256
|
||||
// Deprecated: use KeyAlgoRSASHA512.
|
||||
SigAlgoRSASHA2512 = KeyAlgoRSASHA512
|
||||
)
|
||||
|
||||
// parsePubKey parses a public key of the given algorithm.
|
||||
@ -70,7 +76,7 @@ func parsePubKey(in []byte, algo string) (pubKey PublicKey, rest []byte, err err
|
||||
case KeyAlgoSKED25519:
|
||||
return parseSKEd25519(in)
|
||||
case CertAlgoRSAv01, CertAlgoDSAv01, CertAlgoECDSA256v01, CertAlgoECDSA384v01, CertAlgoECDSA521v01, CertAlgoSKECDSA256v01, CertAlgoED25519v01, CertAlgoSKED25519v01:
|
||||
cert, err := parseCert(in, certToPrivAlgo(algo))
|
||||
cert, err := parseCert(in, certKeyAlgoNames[algo])
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
@ -289,18 +295,21 @@ func MarshalAuthorizedKey(key PublicKey) []byte {
|
||||
return b.Bytes()
|
||||
}
|
||||
|
||||
// PublicKey is an abstraction of different types of public keys.
|
||||
// PublicKey represents a public key using an unspecified algorithm.
|
||||
//
|
||||
// Some PublicKeys provided by this package also implement CryptoPublicKey.
|
||||
type PublicKey interface {
|
||||
// Type returns the key's type, e.g. "ssh-rsa".
|
||||
// Type returns the key format name, e.g. "ssh-rsa".
|
||||
Type() string
|
||||
|
||||
// Marshal returns the serialized key data in SSH wire format,
|
||||
// with the name prefix. To unmarshal the returned data, use
|
||||
// the ParsePublicKey function.
|
||||
// Marshal returns the serialized key data in SSH wire format, with the name
|
||||
// prefix. To unmarshal the returned data, use the ParsePublicKey function.
|
||||
Marshal() []byte
|
||||
|
||||
// Verify that sig is a signature on the given data using this
|
||||
// key. This function will hash the data appropriately first.
|
||||
// Verify that sig is a signature on the given data using this key. This
|
||||
// method will hash the data appropriately first. sig.Format is allowed to
|
||||
// be any signature algorithm compatible with the key type, the caller
|
||||
// should check if it has more stringent requirements.
|
||||
Verify(data []byte, sig *Signature) error
|
||||
}
|
||||
|
||||
@ -311,25 +320,32 @@ type CryptoPublicKey interface {
|
||||
}
|
||||
|
||||
// A Signer can create signatures that verify against a public key.
|
||||
//
|
||||
// Some Signers provided by this package also implement AlgorithmSigner.
|
||||
type Signer interface {
|
||||
// PublicKey returns an associated PublicKey instance.
|
||||
// PublicKey returns the associated PublicKey.
|
||||
PublicKey() PublicKey
|
||||
|
||||
// Sign returns raw signature for the given data. This method
|
||||
// will apply the hash specified for the keytype to the data.
|
||||
// Sign returns a signature for the given data. This method will hash the
|
||||
// data appropriately first. The signature algorithm is expected to match
|
||||
// the key format returned by the PublicKey.Type method (and not to be any
|
||||
// alternative algorithm supported by the key format).
|
||||
Sign(rand io.Reader, data []byte) (*Signature, error)
|
||||
}
|
||||
|
||||
// A AlgorithmSigner is a Signer that also supports specifying a specific
|
||||
// algorithm to use for signing.
|
||||
// An AlgorithmSigner is a Signer that also supports specifying an algorithm to
|
||||
// use for signing.
|
||||
//
|
||||
// An AlgorithmSigner can't advertise the algorithms it supports, so it should
|
||||
// be prepared to be invoked with every algorithm supported by the public key
|
||||
// format.
|
||||
type AlgorithmSigner interface {
|
||||
Signer
|
||||
|
||||
// SignWithAlgorithm is like Signer.Sign, but allows specification of a
|
||||
// non-default signing algorithm. See the SigAlgo* constants in this
|
||||
// package for signature algorithms supported by this package. Callers may
|
||||
// pass an empty string for the algorithm in which case the AlgorithmSigner
|
||||
// will use its default algorithm.
|
||||
// SignWithAlgorithm is like Signer.Sign, but allows specifying a desired
|
||||
// signing algorithm. Callers may pass an empty string for the algorithm in
|
||||
// which case the AlgorithmSigner will use a default algorithm. This default
|
||||
// doesn't currently control any behavior in this package.
|
||||
SignWithAlgorithm(rand io.Reader, data []byte, algorithm string) (*Signature, error)
|
||||
}
|
||||
|
||||
@ -381,17 +397,11 @@ func (r *rsaPublicKey) Marshal() []byte {
|
||||
}
|
||||
|
||||
func (r *rsaPublicKey) Verify(data []byte, sig *Signature) error {
|
||||
var hash crypto.Hash
|
||||
switch sig.Format {
|
||||
case SigAlgoRSA:
|
||||
hash = crypto.SHA1
|
||||
case SigAlgoRSASHA2256:
|
||||
hash = crypto.SHA256
|
||||
case SigAlgoRSASHA2512:
|
||||
hash = crypto.SHA512
|
||||
default:
|
||||
supportedAlgos := algorithmsForKeyFormat(r.Type())
|
||||
if !contains(supportedAlgos, sig.Format) {
|
||||
return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, r.Type())
|
||||
}
|
||||
hash := hashFuncs[sig.Format]
|
||||
h := hash.New()
|
||||
h.Write(data)
|
||||
digest := h.Sum(nil)
|
||||
@ -466,7 +476,7 @@ func (k *dsaPublicKey) Verify(data []byte, sig *Signature) error {
|
||||
if sig.Format != k.Type() {
|
||||
return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, k.Type())
|
||||
}
|
||||
h := crypto.SHA1.New()
|
||||
h := hashFuncs[sig.Format].New()
|
||||
h.Write(data)
|
||||
digest := h.Sum(nil)
|
||||
|
||||
@ -499,7 +509,7 @@ func (k *dsaPrivateKey) PublicKey() PublicKey {
|
||||
}
|
||||
|
||||
func (k *dsaPrivateKey) Sign(rand io.Reader, data []byte) (*Signature, error) {
|
||||
return k.SignWithAlgorithm(rand, data, "")
|
||||
return k.SignWithAlgorithm(rand, data, k.PublicKey().Type())
|
||||
}
|
||||
|
||||
func (k *dsaPrivateKey) SignWithAlgorithm(rand io.Reader, data []byte, algorithm string) (*Signature, error) {
|
||||
@ -507,7 +517,7 @@ func (k *dsaPrivateKey) SignWithAlgorithm(rand io.Reader, data []byte, algorithm
|
||||
return nil, fmt.Errorf("ssh: unsupported signature algorithm %s", algorithm)
|
||||
}
|
||||
|
||||
h := crypto.SHA1.New()
|
||||
h := hashFuncs[k.PublicKey().Type()].New()
|
||||
h.Write(data)
|
||||
digest := h.Sum(nil)
|
||||
r, s, err := dsa.Sign(rand, k.PrivateKey, digest)
|
||||
@ -603,19 +613,6 @@ func supportedEllipticCurve(curve elliptic.Curve) bool {
|
||||
return curve == elliptic.P256() || curve == elliptic.P384() || curve == elliptic.P521()
|
||||
}
|
||||
|
||||
// ecHash returns the hash to match the given elliptic curve, see RFC
|
||||
// 5656, section 6.2.1
|
||||
func ecHash(curve elliptic.Curve) crypto.Hash {
|
||||
bitSize := curve.Params().BitSize
|
||||
switch {
|
||||
case bitSize <= 256:
|
||||
return crypto.SHA256
|
||||
case bitSize <= 384:
|
||||
return crypto.SHA384
|
||||
}
|
||||
return crypto.SHA512
|
||||
}
|
||||
|
||||
// parseECDSA parses an ECDSA key according to RFC 5656, section 3.1.
|
||||
func parseECDSA(in []byte) (out PublicKey, rest []byte, err error) {
|
||||
var w struct {
|
||||
@ -671,7 +668,7 @@ func (k *ecdsaPublicKey) Verify(data []byte, sig *Signature) error {
|
||||
return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, k.Type())
|
||||
}
|
||||
|
||||
h := ecHash(k.Curve).New()
|
||||
h := hashFuncs[sig.Format].New()
|
||||
h.Write(data)
|
||||
digest := h.Sum(nil)
|
||||
|
||||
@ -775,7 +772,7 @@ func (k *skECDSAPublicKey) Verify(data []byte, sig *Signature) error {
|
||||
return fmt.Errorf("ssh: signature type %s for key type %s", sig.Format, k.Type())
|
||||
}
|
||||
|
||||
h := ecHash(k.Curve).New()
|
||||
h := hashFuncs[sig.Format].New()
|
||||
h.Write([]byte(k.application))
|
||||
appDigest := h.Sum(nil)
|
||||
|
||||
@ -874,7 +871,7 @@ func (k *skEd25519PublicKey) Verify(data []byte, sig *Signature) error {
|
||||
return fmt.Errorf("invalid size %d for Ed25519 public key", l)
|
||||
}
|
||||
|
||||
h := sha256.New()
|
||||
h := hashFuncs[sig.Format].New()
|
||||
h.Write([]byte(k.application))
|
||||
appDigest := h.Sum(nil)
|
||||
|
||||
@ -939,15 +936,6 @@ func newDSAPrivateKey(key *dsa.PrivateKey) (Signer, error) {
|
||||
return &dsaPrivateKey{key}, nil
|
||||
}
|
||||
|
||||
type rsaSigner struct {
|
||||
AlgorithmSigner
|
||||
defaultAlgorithm string
|
||||
}
|
||||
|
||||
func (s *rsaSigner) Sign(rand io.Reader, data []byte) (*Signature, error) {
|
||||
return s.AlgorithmSigner.SignWithAlgorithm(rand, data, s.defaultAlgorithm)
|
||||
}
|
||||
|
||||
type wrappedSigner struct {
|
||||
signer crypto.Signer
|
||||
pubKey PublicKey
|
||||
@ -970,44 +958,20 @@ func (s *wrappedSigner) PublicKey() PublicKey {
|
||||
}
|
||||
|
||||
func (s *wrappedSigner) Sign(rand io.Reader, data []byte) (*Signature, error) {
|
||||
return s.SignWithAlgorithm(rand, data, "")
|
||||
return s.SignWithAlgorithm(rand, data, s.pubKey.Type())
|
||||
}
|
||||
|
||||
func (s *wrappedSigner) SignWithAlgorithm(rand io.Reader, data []byte, algorithm string) (*Signature, error) {
|
||||
var hashFunc crypto.Hash
|
||||
|
||||
if _, ok := s.pubKey.(*rsaPublicKey); ok {
|
||||
// RSA keys support a few hash functions determined by the requested signature algorithm
|
||||
switch algorithm {
|
||||
case "", SigAlgoRSA:
|
||||
algorithm = SigAlgoRSA
|
||||
hashFunc = crypto.SHA1
|
||||
case SigAlgoRSASHA2256:
|
||||
hashFunc = crypto.SHA256
|
||||
case SigAlgoRSASHA2512:
|
||||
hashFunc = crypto.SHA512
|
||||
default:
|
||||
return nil, fmt.Errorf("ssh: unsupported signature algorithm %s", algorithm)
|
||||
}
|
||||
} else {
|
||||
// The only supported algorithm for all other key types is the same as the type of the key
|
||||
if algorithm == "" {
|
||||
algorithm = s.pubKey.Type()
|
||||
} else if algorithm != s.pubKey.Type() {
|
||||
return nil, fmt.Errorf("ssh: unsupported signature algorithm %s", algorithm)
|
||||
}
|
||||
|
||||
switch key := s.pubKey.(type) {
|
||||
case *dsaPublicKey:
|
||||
hashFunc = crypto.SHA1
|
||||
case *ecdsaPublicKey:
|
||||
hashFunc = ecHash(key.Curve)
|
||||
case ed25519PublicKey:
|
||||
default:
|
||||
return nil, fmt.Errorf("ssh: unsupported key type %T", key)
|
||||
}
|
||||
if algorithm == "" {
|
||||
algorithm = s.pubKey.Type()
|
||||
}
|
||||
|
||||
supportedAlgos := algorithmsForKeyFormat(s.pubKey.Type())
|
||||
if !contains(supportedAlgos, algorithm) {
|
||||
return nil, fmt.Errorf("ssh: unsupported signature algorithm %q for key format %q", algorithm, s.pubKey.Type())
|
||||
}
|
||||
|
||||
hashFunc := hashFuncs[algorithm]
|
||||
var digest []byte
|
||||
if hashFunc != 0 {
|
||||
h := hashFunc.New()
|
||||
|
21
vendor/golang.org/x/crypto/ssh/messages.go
generated
vendored
21
vendor/golang.org/x/crypto/ssh/messages.go
generated
vendored
@ -141,6 +141,14 @@ type serviceAcceptMsg struct {
|
||||
Service string `sshtype:"6"`
|
||||
}
|
||||
|
||||
// See RFC 8308, section 2.3
|
||||
const msgExtInfo = 7
|
||||
|
||||
type extInfoMsg struct {
|
||||
NumExtensions uint32 `sshtype:"7"`
|
||||
Payload []byte `ssh:"rest"`
|
||||
}
|
||||
|
||||
// See RFC 4252, section 5.
|
||||
const msgUserAuthRequest = 50
|
||||
|
||||
@ -180,11 +188,11 @@ const msgUserAuthInfoRequest = 60
|
||||
const msgUserAuthInfoResponse = 61
|
||||
|
||||
type userAuthInfoRequestMsg struct {
|
||||
User string `sshtype:"60"`
|
||||
Instruction string
|
||||
DeprecatedLanguage string
|
||||
NumPrompts uint32
|
||||
Prompts []byte `ssh:"rest"`
|
||||
Name string `sshtype:"60"`
|
||||
Instruction string
|
||||
Language string
|
||||
NumPrompts uint32
|
||||
Prompts []byte `ssh:"rest"`
|
||||
}
|
||||
|
||||
// See RFC 4254, section 5.1.
|
||||
@ -782,6 +790,8 @@ func decode(packet []byte) (interface{}, error) {
|
||||
msg = new(serviceRequestMsg)
|
||||
case msgServiceAccept:
|
||||
msg = new(serviceAcceptMsg)
|
||||
case msgExtInfo:
|
||||
msg = new(extInfoMsg)
|
||||
case msgKexInit:
|
||||
msg = new(kexInitMsg)
|
||||
case msgKexDHInit:
|
||||
@ -843,6 +853,7 @@ var packetTypeNames = map[byte]string{
|
||||
msgDisconnect: "disconnectMsg",
|
||||
msgServiceRequest: "serviceRequestMsg",
|
||||
msgServiceAccept: "serviceAcceptMsg",
|
||||
msgExtInfo: "extInfoMsg",
|
||||
msgKexInit: "kexInitMsg",
|
||||
msgKexDHInit: "kexDHInitMsg",
|
||||
msgKexDHReply: "kexDHReplyMsg",
|
||||
|
46
vendor/golang.org/x/crypto/ssh/server.go
generated
vendored
46
vendor/golang.org/x/crypto/ssh/server.go
generated
vendored
@ -120,7 +120,7 @@ type ServerConfig struct {
|
||||
}
|
||||
|
||||
// AddHostKey adds a private key as a host key. If an existing host
|
||||
// key exists with the same algorithm, it is overwritten. Each server
|
||||
// key exists with the same public key format, it is replaced. Each server
|
||||
// config must have at least one host key.
|
||||
func (s *ServerConfig) AddHostKey(key Signer) {
|
||||
for i, k := range s.hostKeys {
|
||||
@ -212,9 +212,10 @@ func NewServerConn(c net.Conn, config *ServerConfig) (*ServerConn, <-chan NewCha
|
||||
}
|
||||
|
||||
// signAndMarshal signs the data with the appropriate algorithm,
|
||||
// and serializes the result in SSH wire format.
|
||||
func signAndMarshal(k Signer, rand io.Reader, data []byte) ([]byte, error) {
|
||||
sig, err := k.Sign(rand, data)
|
||||
// and serializes the result in SSH wire format. algo is the negotiate
|
||||
// algorithm and may be a certificate type.
|
||||
func signAndMarshal(k AlgorithmSigner, rand io.Reader, data []byte, algo string) ([]byte, error) {
|
||||
sig, err := k.SignWithAlgorithm(rand, data, underlyingAlgo(algo))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
@ -284,7 +285,7 @@ func (s *connection) serverHandshake(config *ServerConfig) (*Permissions, error)
|
||||
|
||||
func isAcceptableAlgo(algo string) bool {
|
||||
switch algo {
|
||||
case SigAlgoRSA, SigAlgoRSASHA2256, SigAlgoRSASHA2512, KeyAlgoDSA, KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521, KeyAlgoSKECDSA256, KeyAlgoED25519, KeyAlgoSKED25519,
|
||||
case KeyAlgoRSA, KeyAlgoRSASHA256, KeyAlgoRSASHA512, KeyAlgoDSA, KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521, KeyAlgoSKECDSA256, KeyAlgoED25519, KeyAlgoSKED25519,
|
||||
CertAlgoRSAv01, CertAlgoDSAv01, CertAlgoECDSA256v01, CertAlgoECDSA384v01, CertAlgoECDSA521v01, CertAlgoSKECDSA256v01, CertAlgoED25519v01, CertAlgoSKED25519v01:
|
||||
return true
|
||||
}
|
||||
@ -553,6 +554,7 @@ userAuthLoop:
|
||||
if !ok || len(payload) > 0 {
|
||||
return nil, parseError(msgUserAuthRequest)
|
||||
}
|
||||
|
||||
// Ensure the public key algo and signature algo
|
||||
// are supported. Compare the private key
|
||||
// algorithm name that corresponds to algo with
|
||||
@ -562,7 +564,12 @@ userAuthLoop:
|
||||
authErr = fmt.Errorf("ssh: algorithm %q not accepted", sig.Format)
|
||||
break
|
||||
}
|
||||
signedData := buildDataSignedForAuth(sessionID, userAuthReq, algoBytes, pubKeyData)
|
||||
if underlyingAlgo(algo) != sig.Format {
|
||||
authErr = fmt.Errorf("ssh: signature %q not compatible with selected algorithm %q", sig.Format, algo)
|
||||
break
|
||||
}
|
||||
|
||||
signedData := buildDataSignedForAuth(sessionID, userAuthReq, algo, pubKeyData)
|
||||
|
||||
if err := pubKey.Verify(signedData, sig); err != nil {
|
||||
return nil, err
|
||||
@ -633,6 +640,30 @@ userAuthLoop:
|
||||
}
|
||||
|
||||
authFailures++
|
||||
if config.MaxAuthTries > 0 && authFailures >= config.MaxAuthTries {
|
||||
// If we have hit the max attempts, don't bother sending the
|
||||
// final SSH_MSG_USERAUTH_FAILURE message, since there are
|
||||
// no more authentication methods which can be attempted,
|
||||
// and this message may cause the client to re-attempt
|
||||
// authentication while we send the disconnect message.
|
||||
// Continue, and trigger the disconnect at the start of
|
||||
// the loop.
|
||||
//
|
||||
// The SSH specification is somewhat confusing about this,
|
||||
// RFC 4252 Section 5.1 requires each authentication failure
|
||||
// be responded to with a respective SSH_MSG_USERAUTH_FAILURE
|
||||
// message, but Section 4 says the server should disconnect
|
||||
// after some number of attempts, but it isn't explicit which
|
||||
// message should take precedence (i.e. should there be a failure
|
||||
// message than a disconnect message, or if we are going to
|
||||
// disconnect, should we only send that message.)
|
||||
//
|
||||
// Either way, OpenSSH disconnects immediately after the last
|
||||
// failed authnetication attempt, and given they are typically
|
||||
// considered the golden implementation it seems reasonable
|
||||
// to match that behavior.
|
||||
continue
|
||||
}
|
||||
|
||||
var failureMsg userAuthFailureMsg
|
||||
if config.PasswordCallback != nil {
|
||||
@ -670,7 +701,7 @@ type sshClientKeyboardInteractive struct {
|
||||
*connection
|
||||
}
|
||||
|
||||
func (c *sshClientKeyboardInteractive) Challenge(user, instruction string, questions []string, echos []bool) (answers []string, err error) {
|
||||
func (c *sshClientKeyboardInteractive) Challenge(name, instruction string, questions []string, echos []bool) (answers []string, err error) {
|
||||
if len(questions) != len(echos) {
|
||||
return nil, errors.New("ssh: echos and questions must have equal length")
|
||||
}
|
||||
@ -682,6 +713,7 @@ func (c *sshClientKeyboardInteractive) Challenge(user, instruction string, quest
|
||||
}
|
||||
|
||||
if err := c.transport.writePacket(Marshal(&userAuthInfoRequestMsg{
|
||||
Name: name,
|
||||
Instruction: instruction,
|
||||
NumPrompts: uint32(len(questions)),
|
||||
Prompts: prompts,
|
||||
|
1
vendor/golang.org/x/crypto/ssh/session.go
generated
vendored
1
vendor/golang.org/x/crypto/ssh/session.go
generated
vendored
@ -85,6 +85,7 @@ const (
|
||||
IXANY = 39
|
||||
IXOFF = 40
|
||||
IMAXBEL = 41
|
||||
IUTF8 = 42 // RFC 8160
|
||||
ISIG = 50
|
||||
ICANON = 51
|
||||
XCASE = 52
|
||||
|
10
vendor/golang.org/x/crypto/ssh/transport.go
generated
vendored
10
vendor/golang.org/x/crypto/ssh/transport.go
generated
vendored
@ -238,15 +238,19 @@ var (
|
||||
// (to setup server->client keys) or clientKeys (for client->server keys).
|
||||
func newPacketCipher(d direction, algs directionAlgorithms, kex *kexResult) (packetCipher, error) {
|
||||
cipherMode := cipherModes[algs.Cipher]
|
||||
macMode := macModes[algs.MAC]
|
||||
|
||||
iv := make([]byte, cipherMode.ivSize)
|
||||
key := make([]byte, cipherMode.keySize)
|
||||
macKey := make([]byte, macMode.keySize)
|
||||
|
||||
generateKeyMaterial(iv, d.ivTag, kex)
|
||||
generateKeyMaterial(key, d.keyTag, kex)
|
||||
generateKeyMaterial(macKey, d.macKeyTag, kex)
|
||||
|
||||
var macKey []byte
|
||||
if !aeadCiphers[algs.Cipher] {
|
||||
macMode := macModes[algs.MAC]
|
||||
macKey = make([]byte, macMode.keySize)
|
||||
generateKeyMaterial(macKey, d.macKeyTag, kex)
|
||||
}
|
||||
|
||||
return cipherModes[algs.Cipher].create(key, iv, macKey, algs)
|
||||
}
|
||||
|
7
vendor/google.golang.org/grpc/CONTRIBUTING.md
generated
vendored
7
vendor/google.golang.org/grpc/CONTRIBUTING.md
generated
vendored
@ -53,9 +53,8 @@ How to get your contributions merged smoothly and quickly.
|
||||
- **All tests need to be passing** before your change can be merged. We
|
||||
recommend you **run tests locally** before creating your PR to catch breakages
|
||||
early on.
|
||||
- `make all` to test everything, OR
|
||||
- `make vet` to catch vet errors
|
||||
- `make test` to run the tests
|
||||
- `make testrace` to run tests in race mode
|
||||
- `VET_SKIP_PROTO=1 ./vet.sh` to catch vet errors
|
||||
- `go test -cpu 1,4 -timeout 7m ./...` to run the tests
|
||||
- `go test -race -cpu 1,4 -timeout 7m ./...` to run tests in race mode
|
||||
|
||||
- Exceptions to the rules can be made if there's a compelling reason for doing so.
|
||||
|
2
vendor/google.golang.org/grpc/clientconn.go
generated
vendored
2
vendor/google.golang.org/grpc/clientconn.go
generated
vendored
@ -79,7 +79,7 @@ var (
|
||||
// errNoTransportSecurity indicates that there is no transport security
|
||||
// being set for ClientConn. Users should either set one or explicitly
|
||||
// call WithInsecure DialOption to disable security.
|
||||
errNoTransportSecurity = errors.New("grpc: no transport security set (use grpc.WithInsecure() explicitly or set credentials)")
|
||||
errNoTransportSecurity = errors.New("grpc: no transport security set (use grpc.WithTransportCredentials(insecure.NewCredentials()) explicitly or set credentials)")
|
||||
// errTransportCredsAndBundle indicates that creds bundle is used together
|
||||
// with other individual Transport Credentials.
|
||||
errTransportCredsAndBundle = errors.New("grpc: credentials.Bundle may not be used with individual TransportCredentials")
|
||||
|
26
vendor/google.golang.org/grpc/credentials/insecure/insecure.go
generated
vendored
26
vendor/google.golang.org/grpc/credentials/insecure/insecure.go
generated
vendored
@ -70,3 +70,29 @@ type info struct {
|
||||
func (info) AuthType() string {
|
||||
return "insecure"
|
||||
}
|
||||
|
||||
// insecureBundle implements an insecure bundle.
|
||||
// An insecure bundle provides a thin wrapper around insecureTC to support
|
||||
// the credentials.Bundle interface.
|
||||
type insecureBundle struct{}
|
||||
|
||||
// NewBundle returns a bundle with disabled transport security and no per rpc credential.
|
||||
func NewBundle() credentials.Bundle {
|
||||
return insecureBundle{}
|
||||
}
|
||||
|
||||
// NewWithMode returns a new insecure Bundle. The mode is ignored.
|
||||
func (insecureBundle) NewWithMode(string) (credentials.Bundle, error) {
|
||||
return insecureBundle{}, nil
|
||||
}
|
||||
|
||||
// PerRPCCredentials returns an nil implementation as insecure
|
||||
// bundle does not support a per rpc credential.
|
||||
func (insecureBundle) PerRPCCredentials() credentials.PerRPCCredentials {
|
||||
return nil
|
||||
}
|
||||
|
||||
// TransportCredentials returns the underlying insecure transport credential.
|
||||
func (insecureBundle) TransportCredentials() credentials.TransportCredentials {
|
||||
return NewCredentials()
|
||||
}
|
||||
|
6
vendor/google.golang.org/grpc/go.mod
generated
vendored
6
vendor/google.golang.org/grpc/go.mod
generated
vendored
@ -8,12 +8,12 @@ require (
|
||||
github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1
|
||||
github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b
|
||||
github.com/golang/protobuf v1.4.3
|
||||
github.com/google/go-cmp v0.5.0
|
||||
github.com/golang/protobuf v1.5.2
|
||||
github.com/google/go-cmp v0.5.5
|
||||
github.com/google/uuid v1.1.2
|
||||
golang.org/x/net v0.0.0-20200822124328-c89045814202
|
||||
golang.org/x/oauth2 v0.0.0-20200107190931-bf48bf16ab8d
|
||||
golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd
|
||||
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013
|
||||
google.golang.org/protobuf v1.25.0
|
||||
google.golang.org/protobuf v1.26.0
|
||||
)
|
||||
|
11
vendor/google.golang.org/grpc/go.sum
generated
vendored
11
vendor/google.golang.org/grpc/go.sum
generated
vendored
@ -40,14 +40,17 @@ github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:W
|
||||
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
|
||||
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
|
||||
github.com/golang/protobuf v1.4.2/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/golang/protobuf v1.4.3 h1:JjCZWpVbqXDqFVmTfYWEVTMIYrL/NPdPSCHPJ0T/raM=
|
||||
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
|
||||
github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk=
|
||||
github.com/golang/protobuf v1.5.2 h1:ROPKBNFfQgOUMifHyP+KYbvpjbdoFNs+aK7DXlji0Tw=
|
||||
github.com/golang/protobuf v1.5.2/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY=
|
||||
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
|
||||
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
|
||||
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.0 h1:/QaMHBdZ26BB3SSst0Iwl10Epc+xhTquomWX0oZEB6w=
|
||||
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.5 h1:Khx7svrCpmxxtHBq5j2mp/xVjsi8hQMfNLvJFAlrGgU=
|
||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y=
|
||||
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
|
||||
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
|
||||
@ -117,8 +120,10 @@ google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzi
|
||||
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
|
||||
google.golang.org/protobuf v1.25.0 h1:Ejskq+SyPohKW+1uil0JJMtmHCgJPJ/qWTxr8qp+R4c=
|
||||
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
|
||||
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||
google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk=
|
||||
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
|
9
vendor/google.golang.org/grpc/interceptor.go
generated
vendored
9
vendor/google.golang.org/grpc/interceptor.go
generated
vendored
@ -72,9 +72,12 @@ type UnaryServerInfo struct {
|
||||
}
|
||||
|
||||
// UnaryHandler defines the handler invoked by UnaryServerInterceptor to complete the normal
|
||||
// execution of a unary RPC. If a UnaryHandler returns an error, it should be produced by the
|
||||
// status package, or else gRPC will use codes.Unknown as the status code and err.Error() as
|
||||
// the status message of the RPC.
|
||||
// execution of a unary RPC.
|
||||
//
|
||||
// If a UnaryHandler returns an error, it should either be produced by the
|
||||
// status package, or be one of the context errors. Otherwise, gRPC will use
|
||||
// codes.Unknown as the status code and err.Error() as the status message of the
|
||||
// RPC.
|
||||
type UnaryHandler func(ctx context.Context, req interface{}) (interface{}, error)
|
||||
|
||||
// UnaryServerInterceptor provides a hook to intercept the execution of a unary RPC on the server. info
|
||||
|
69
vendor/google.golang.org/grpc/internal/channelz/funcs.go
generated
vendored
69
vendor/google.golang.org/grpc/internal/channelz/funcs.go
generated
vendored
@ -24,6 +24,7 @@
|
||||
package channelz
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"sort"
|
||||
"sync"
|
||||
@ -49,7 +50,8 @@ var (
|
||||
// TurnOn turns on channelz data collection.
|
||||
func TurnOn() {
|
||||
if !IsOn() {
|
||||
NewChannelzStorage()
|
||||
db.set(newChannelMap())
|
||||
idGen.reset()
|
||||
atomic.StoreInt32(&curState, 1)
|
||||
}
|
||||
}
|
||||
@ -94,46 +96,40 @@ func (d *dbWrapper) get() *channelMap {
|
||||
return d.DB
|
||||
}
|
||||
|
||||
// NewChannelzStorage initializes channelz data storage and id generator.
|
||||
// NewChannelzStorageForTesting initializes channelz data storage and id
|
||||
// generator for testing purposes.
|
||||
//
|
||||
// This function returns a cleanup function to wait for all channelz state to be reset by the
|
||||
// grpc goroutines when those entities get closed. By using this cleanup function, we make sure tests
|
||||
// don't mess up each other, i.e. lingering goroutine from previous test doing entity removal happen
|
||||
// to remove some entity just register by the new test, since the id space is the same.
|
||||
//
|
||||
// Note: This function is exported for testing purpose only. User should not call
|
||||
// it in most cases.
|
||||
func NewChannelzStorage() (cleanup func() error) {
|
||||
db.set(&channelMap{
|
||||
topLevelChannels: make(map[int64]struct{}),
|
||||
channels: make(map[int64]*channel),
|
||||
listenSockets: make(map[int64]*listenSocket),
|
||||
normalSockets: make(map[int64]*normalSocket),
|
||||
servers: make(map[int64]*server),
|
||||
subChannels: make(map[int64]*subChannel),
|
||||
})
|
||||
// Returns a cleanup function to be invoked by the test, which waits for up to
|
||||
// 10s for all channelz state to be reset by the grpc goroutines when those
|
||||
// entities get closed. This cleanup function helps with ensuring that tests
|
||||
// don't mess up each other.
|
||||
func NewChannelzStorageForTesting() (cleanup func() error) {
|
||||
db.set(newChannelMap())
|
||||
idGen.reset()
|
||||
|
||||
return func() error {
|
||||
var err error
|
||||
cm := db.get()
|
||||
if cm == nil {
|
||||
return nil
|
||||
}
|
||||
for i := 0; i < 1000; i++ {
|
||||
cm.mu.Lock()
|
||||
if len(cm.topLevelChannels) == 0 && len(cm.servers) == 0 && len(cm.channels) == 0 && len(cm.subChannels) == 0 && len(cm.listenSockets) == 0 && len(cm.normalSockets) == 0 {
|
||||
cm.mu.Unlock()
|
||||
// all things stored in the channelz map have been cleared.
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
||||
defer cancel()
|
||||
ticker := time.NewTicker(10 * time.Millisecond)
|
||||
defer ticker.Stop()
|
||||
for {
|
||||
cm.mu.RLock()
|
||||
topLevelChannels, servers, channels, subChannels, listenSockets, normalSockets := len(cm.topLevelChannels), len(cm.servers), len(cm.channels), len(cm.subChannels), len(cm.listenSockets), len(cm.normalSockets)
|
||||
cm.mu.RUnlock()
|
||||
|
||||
if err := ctx.Err(); err != nil {
|
||||
return fmt.Errorf("after 10s the channelz map has not been cleaned up yet, topchannels: %d, servers: %d, channels: %d, subchannels: %d, listen sockets: %d, normal sockets: %d", topLevelChannels, servers, channels, subChannels, listenSockets, normalSockets)
|
||||
}
|
||||
if topLevelChannels == 0 && servers == 0 && channels == 0 && subChannels == 0 && listenSockets == 0 && normalSockets == 0 {
|
||||
return nil
|
||||
}
|
||||
cm.mu.Unlock()
|
||||
time.Sleep(10 * time.Millisecond)
|
||||
<-ticker.C
|
||||
}
|
||||
|
||||
cm.mu.Lock()
|
||||
err = fmt.Errorf("after 10s the channelz map has not been cleaned up yet, topchannels: %d, servers: %d, channels: %d, subchannels: %d, listen sockets: %d, normal sockets: %d", len(cm.topLevelChannels), len(cm.servers), len(cm.channels), len(cm.subChannels), len(cm.listenSockets), len(cm.normalSockets))
|
||||
cm.mu.Unlock()
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
@ -326,6 +322,17 @@ type channelMap struct {
|
||||
normalSockets map[int64]*normalSocket
|
||||
}
|
||||
|
||||
func newChannelMap() *channelMap {
|
||||
return &channelMap{
|
||||
topLevelChannels: make(map[int64]struct{}),
|
||||
channels: make(map[int64]*channel),
|
||||
listenSockets: make(map[int64]*listenSocket),
|
||||
normalSockets: make(map[int64]*normalSocket),
|
||||
servers: make(map[int64]*server),
|
||||
subChannels: make(map[int64]*subChannel),
|
||||
}
|
||||
}
|
||||
|
||||
func (c *channelMap) addServer(id int64, s *server) {
|
||||
c.mu.Lock()
|
||||
s.cm = c
|
||||
|
12
vendor/google.golang.org/grpc/internal/envconfig/xds.go
generated
vendored
12
vendor/google.golang.org/grpc/internal/envconfig/xds.go
generated
vendored
@ -26,13 +26,13 @@ import (
|
||||
const (
|
||||
// XDSBootstrapFileNameEnv is the env variable to set bootstrap file name.
|
||||
// Do not use this and read from env directly. Its value is read and kept in
|
||||
// variable BootstrapFileName.
|
||||
// variable XDSBootstrapFileName.
|
||||
//
|
||||
// When both bootstrap FileName and FileContent are set, FileName is used.
|
||||
XDSBootstrapFileNameEnv = "GRPC_XDS_BOOTSTRAP"
|
||||
// XDSBootstrapFileContentEnv is the env variable to set bootstrapp file
|
||||
// XDSBootstrapFileContentEnv is the env variable to set bootstrap file
|
||||
// content. Do not use this and read from env directly. Its value is read
|
||||
// and kept in variable BootstrapFileName.
|
||||
// and kept in variable XDSBootstrapFileContent.
|
||||
//
|
||||
// When both bootstrap FileName and FileContent are set, FileName is used.
|
||||
XDSBootstrapFileContentEnv = "GRPC_XDS_BOOTSTRAP_CONFIG"
|
||||
@ -41,6 +41,7 @@ const (
|
||||
clientSideSecuritySupportEnv = "GRPC_XDS_EXPERIMENTAL_SECURITY_SUPPORT"
|
||||
aggregateAndDNSSupportEnv = "GRPC_XDS_EXPERIMENTAL_ENABLE_AGGREGATE_AND_LOGICAL_DNS_CLUSTER"
|
||||
rbacSupportEnv = "GRPC_XDS_EXPERIMENTAL_RBAC"
|
||||
outlierDetectionSupportEnv = "GRPC_EXPERIMENTAL_ENABLE_OUTLIER_DETECTION"
|
||||
federationEnv = "GRPC_EXPERIMENTAL_XDS_FEDERATION"
|
||||
rlsInXDSEnv = "GRPC_EXPERIMENTAL_XDS_RLS_LB"
|
||||
|
||||
@ -82,7 +83,10 @@ var (
|
||||
// which can be disabled by setting the environment variable
|
||||
// "GRPC_XDS_EXPERIMENTAL_RBAC" to "false".
|
||||
XDSRBAC = !strings.EqualFold(os.Getenv(rbacSupportEnv), "false")
|
||||
|
||||
// XDSOutlierDetection indicates whether outlier detection support is
|
||||
// enabled, which can be enabled by setting the environment variable
|
||||
// "GRPC_EXPERIMENTAL_ENABLE_OUTLIER_DETECTION" to "true".
|
||||
XDSOutlierDetection = strings.EqualFold(os.Getenv(outlierDetectionSupportEnv), "true")
|
||||
// XDSFederation indicates whether federation support is enabled.
|
||||
XDSFederation = strings.EqualFold(os.Getenv(federationEnv), "true")
|
||||
|
||||
|
7
vendor/google.golang.org/grpc/internal/internal.go
generated
vendored
7
vendor/google.golang.org/grpc/internal/internal.go
generated
vendored
@ -38,11 +38,10 @@ var (
|
||||
// KeepaliveMinPingTime is the minimum ping interval. This must be 10s by
|
||||
// default, but tests may wish to set it lower for convenience.
|
||||
KeepaliveMinPingTime = 10 * time.Second
|
||||
// ParseServiceConfigForTesting is for creating a fake
|
||||
// ClientConn for resolver testing only
|
||||
ParseServiceConfigForTesting interface{} // func(string) *serviceconfig.ParseResult
|
||||
// ParseServiceConfig parses a JSON representation of the service config.
|
||||
ParseServiceConfig interface{} // func(string) *serviceconfig.ParseResult
|
||||
// EqualServiceConfigForTesting is for testing service config generation and
|
||||
// parsing. Both a and b should be returned by ParseServiceConfigForTesting.
|
||||
// parsing. Both a and b should be returned by ParseServiceConfig.
|
||||
// This function compares the config without rawJSON stripped, in case the
|
||||
// there's difference in white space.
|
||||
EqualServiceConfigForTesting func(a, b serviceconfig.Config) bool
|
||||
|
6
vendor/google.golang.org/grpc/internal/transport/transport.go
generated
vendored
6
vendor/google.golang.org/grpc/internal/transport/transport.go
generated
vendored
@ -741,6 +741,12 @@ func (e ConnectionError) Origin() error {
|
||||
return e.err
|
||||
}
|
||||
|
||||
// Unwrap returns the original error of this connection error or nil when the
|
||||
// origin is nil.
|
||||
func (e ConnectionError) Unwrap() error {
|
||||
return e.err
|
||||
}
|
||||
|
||||
var (
|
||||
// ErrConnClosing indicates that the transport is closing.
|
||||
ErrConnClosing = connectionErrorf(true, nil, "transport is closing")
|
||||
|
12
vendor/google.golang.org/grpc/regenerate.sh
generated
vendored
12
vendor/google.golang.org/grpc/regenerate.sh
generated
vendored
@ -27,9 +27,9 @@ export PATH=${GOBIN}:${PATH}
|
||||
mkdir -p ${GOBIN}
|
||||
|
||||
echo "remove existing generated files"
|
||||
# grpc_testingv3/testv3.pb.go is not re-generated because it was
|
||||
# intentionally generated by an older version of protoc-gen-go.
|
||||
rm -f $(find . -name '*.pb.go' | grep -v 'grpc_testingv3/testv3.pb.go')
|
||||
# grpc_testing_not_regenerate/*.pb.go is not re-generated,
|
||||
# see grpc_testing_not_regenerate/README.md for details.
|
||||
rm -f $(find . -name '*.pb.go' | grep -v 'grpc_testing_not_regenerate')
|
||||
|
||||
echo "go install google.golang.org/protobuf/cmd/protoc-gen-go"
|
||||
(cd test/tools && go install google.golang.org/protobuf/cmd/protoc-gen-go)
|
||||
@ -117,9 +117,9 @@ done
|
||||
mkdir -p ${WORKDIR}/out/google.golang.org/grpc/internal/proto/grpc_lookup_v1
|
||||
mv ${WORKDIR}/out/google.golang.org/grpc/lookup/grpc_lookup_v1/* ${WORKDIR}/out/google.golang.org/grpc/internal/proto/grpc_lookup_v1
|
||||
|
||||
# grpc_testingv3/testv3.pb.go is not re-generated because it was
|
||||
# intentionally generated by an older version of protoc-gen-go.
|
||||
rm ${WORKDIR}/out/google.golang.org/grpc/reflection/grpc_testingv3/*.pb.go
|
||||
# grpc_testing_not_regenerate/*.pb.go are not re-generated,
|
||||
# see grpc_testing_not_regenerate/README.md for details.
|
||||
rm ${WORKDIR}/out/google.golang.org/grpc/reflection/grpc_testing_not_regenerate/*.pb.go
|
||||
|
||||
# grpc/service_config/service_config.proto does not have a go_package option.
|
||||
mv ${WORKDIR}/out/grpc/service_config/service_config.pb.go internal/proto/grpc_service_config
|
||||
|
11
vendor/google.golang.org/grpc/server.go
generated
vendored
11
vendor/google.golang.org/grpc/server.go
generated
vendored
@ -1283,9 +1283,10 @@ func (s *Server) processUnaryRPC(t transport.ServerTransport, stream *transport.
|
||||
if appErr != nil {
|
||||
appStatus, ok := status.FromError(appErr)
|
||||
if !ok {
|
||||
// Convert appErr if it is not a grpc status error.
|
||||
appErr = status.Error(codes.Unknown, appErr.Error())
|
||||
appStatus, _ = status.FromError(appErr)
|
||||
// Convert non-status application error to a status error with code
|
||||
// Unknown, but handle context errors specifically.
|
||||
appStatus = status.FromContextError(appErr)
|
||||
appErr = appStatus.Err()
|
||||
}
|
||||
if trInfo != nil {
|
||||
trInfo.tr.LazyLog(stringer(appStatus.Message()), true)
|
||||
@ -1549,7 +1550,9 @@ func (s *Server) processStreamingRPC(t transport.ServerTransport, stream *transp
|
||||
if appErr != nil {
|
||||
appStatus, ok := status.FromError(appErr)
|
||||
if !ok {
|
||||
appStatus = status.New(codes.Unknown, appErr.Error())
|
||||
// Convert non-status application error to a status error with code
|
||||
// Unknown, but handle context errors specifically.
|
||||
appStatus = status.FromContextError(appErr)
|
||||
appErr = appStatus.Err()
|
||||
}
|
||||
if trInfo != nil {
|
||||
|
2
vendor/google.golang.org/grpc/service_config.go
generated
vendored
2
vendor/google.golang.org/grpc/service_config.go
generated
vendored
@ -218,7 +218,7 @@ type jsonSC struct {
|
||||
}
|
||||
|
||||
func init() {
|
||||
internal.ParseServiceConfigForTesting = parseServiceConfig
|
||||
internal.ParseServiceConfig = parseServiceConfig
|
||||
}
|
||||
func parseServiceConfig(js string) *serviceconfig.ParseResult {
|
||||
if len(js) == 0 {
|
||||
|
10
vendor/google.golang.org/grpc/stream.go
generated
vendored
10
vendor/google.golang.org/grpc/stream.go
generated
vendored
@ -46,10 +46,12 @@ import (
|
||||
)
|
||||
|
||||
// StreamHandler defines the handler called by gRPC server to complete the
|
||||
// execution of a streaming RPC. If a StreamHandler returns an error, it
|
||||
// should be produced by the status package, or else gRPC will use
|
||||
// codes.Unknown as the status code and err.Error() as the status message
|
||||
// of the RPC.
|
||||
// execution of a streaming RPC.
|
||||
//
|
||||
// If a StreamHandler returns an error, it should either be produced by the
|
||||
// status package, or be one of the context errors. Otherwise, gRPC will use
|
||||
// codes.Unknown as the status code and err.Error() as the status message of the
|
||||
// RPC.
|
||||
type StreamHandler func(srv interface{}, stream ServerStream) error
|
||||
|
||||
// StreamDesc represents a streaming RPC service's method specification. Used
|
||||
|
2
vendor/google.golang.org/grpc/version.go
generated
vendored
2
vendor/google.golang.org/grpc/version.go
generated
vendored
@ -19,4 +19,4 @@
|
||||
package grpc
|
||||
|
||||
// Version is the current grpc version.
|
||||
const Version = "1.44.1-dev"
|
||||
const Version = "1.45.0"
|
||||
|
2
vendor/google.golang.org/grpc/vet.sh
generated
vendored
2
vendor/google.golang.org/grpc/vet.sh
generated
vendored
@ -107,7 +107,7 @@ for MOD_FILE in $(find . -name 'go.mod'); do
|
||||
go vet -all ./... | fail_on_output
|
||||
gofmt -s -d -l . 2>&1 | fail_on_output
|
||||
goimports -l . 2>&1 | not grep -vE "\.pb\.go"
|
||||
golint ./... 2>&1 | not grep -vE "/testv3\.pb\.go:"
|
||||
golint ./... 2>&1 | not grep -vE "/grpc_testing_not_regenerate/.*\.pb\.go:"
|
||||
|
||||
go mod tidy
|
||||
git status --porcelain 2>&1 | fail_on_output || \
|
||||
|
25
vendor/modules.txt
vendored
25
vendor/modules.txt
vendored
@ -45,7 +45,7 @@ github.com/compose-spec/compose-go/types
|
||||
# github.com/containerd/console v1.0.3
|
||||
## explicit
|
||||
github.com/containerd/console
|
||||
# github.com/containerd/containerd v1.6.1
|
||||
# github.com/containerd/containerd v1.6.3-0.20220401172941-5ff8fce1fcc6
|
||||
## explicit
|
||||
github.com/containerd/containerd/api/services/content/v1
|
||||
github.com/containerd/containerd/archive/compression
|
||||
@ -72,7 +72,7 @@ github.com/containerd/containerd/remotes/docker/schema1
|
||||
github.com/containerd/containerd/remotes/errors
|
||||
github.com/containerd/containerd/services/content/contentserver
|
||||
github.com/containerd/containerd/version
|
||||
# github.com/containerd/continuity v0.2.2
|
||||
# github.com/containerd/continuity v0.2.3-0.20220330195504-d132b287edc8
|
||||
github.com/containerd/continuity/sysx
|
||||
# github.com/containerd/ttrpc v1.1.0
|
||||
github.com/containerd/ttrpc
|
||||
@ -83,7 +83,7 @@ github.com/davecgh/go-spew/spew
|
||||
# github.com/distribution/distribution/v3 v3.0.0-20210316161203-a01c71e2477e
|
||||
github.com/distribution/distribution/v3/digestset
|
||||
github.com/distribution/distribution/v3/reference
|
||||
# github.com/docker/cli v20.10.12+incompatible => github.com/docker/cli v20.10.3-0.20220226190722-8667ccd1124c+incompatible
|
||||
# github.com/docker/cli v20.10.13+incompatible => github.com/docker/cli v20.10.3-0.20220226190722-8667ccd1124c+incompatible
|
||||
## explicit
|
||||
github.com/docker/cli/cli
|
||||
github.com/docker/cli/cli-plugins/manager
|
||||
@ -130,7 +130,7 @@ github.com/docker/distribution/registry/client/transport
|
||||
github.com/docker/distribution/registry/storage/cache
|
||||
github.com/docker/distribution/registry/storage/cache/memory
|
||||
github.com/docker/distribution/uuid
|
||||
# github.com/docker/docker v20.10.7+incompatible => github.com/docker/docker v20.10.3-0.20220121014307-40bb9831756f+incompatible
|
||||
# github.com/docker/docker v20.10.7+incompatible => github.com/docker/docker v20.10.3-0.20220224222438-c78f6963a1c0+incompatible
|
||||
## explicit
|
||||
github.com/docker/docker/api
|
||||
github.com/docker/docker/api/types
|
||||
@ -277,7 +277,7 @@ github.com/inconshreveable/mousetrap
|
||||
github.com/json-iterator/go
|
||||
# github.com/kardianos/osext v0.0.0-20190222173326-2bc1f35cddc0
|
||||
## explicit
|
||||
# github.com/klauspost/compress v1.15.0
|
||||
# github.com/klauspost/compress v1.15.1
|
||||
github.com/klauspost/compress
|
||||
github.com/klauspost/compress/fse
|
||||
github.com/klauspost/compress/huff0
|
||||
@ -288,13 +288,13 @@ github.com/klauspost/compress/zstd/internal/xxhash
|
||||
github.com/mattn/go-shellwords
|
||||
# github.com/matttproud/golang_protobuf_extensions v1.0.2-0.20181231171920-c182affec369
|
||||
github.com/matttproud/golang_protobuf_extensions/pbutil
|
||||
# github.com/miekg/pkcs11 v1.0.3
|
||||
# github.com/miekg/pkcs11 v1.1.1
|
||||
github.com/miekg/pkcs11
|
||||
# github.com/mitchellh/go-wordwrap v0.0.0-20150314170334-ad45545899c7
|
||||
github.com/mitchellh/go-wordwrap
|
||||
# github.com/mitchellh/mapstructure v1.4.3
|
||||
github.com/mitchellh/mapstructure
|
||||
# github.com/moby/buildkit v0.10.0
|
||||
# github.com/moby/buildkit v0.10.1-0.20220402051847-3e38a2d34830
|
||||
## explicit
|
||||
github.com/moby/buildkit/api/services/control
|
||||
github.com/moby/buildkit/api/types
|
||||
@ -376,11 +376,11 @@ github.com/morikuni/aec
|
||||
# github.com/opencontainers/go-digest v1.0.0
|
||||
## explicit
|
||||
github.com/opencontainers/go-digest
|
||||
# github.com/opencontainers/image-spec v1.0.2-0.20211117181255-693428a734f5
|
||||
# github.com/opencontainers/image-spec v1.0.3-0.20211202183452-c5a74bcca799
|
||||
## explicit
|
||||
github.com/opencontainers/image-spec/specs-go
|
||||
github.com/opencontainers/image-spec/specs-go/v1
|
||||
# github.com/opencontainers/runc v1.1.0
|
||||
# github.com/opencontainers/runc v1.1.1
|
||||
github.com/opencontainers/runc/libcontainer/user
|
||||
# github.com/pelletier/go-toml v1.9.4
|
||||
## explicit
|
||||
@ -510,14 +510,13 @@ go.opentelemetry.io/proto/otlp/collector/trace/v1
|
||||
go.opentelemetry.io/proto/otlp/common/v1
|
||||
go.opentelemetry.io/proto/otlp/resource/v1
|
||||
go.opentelemetry.io/proto/otlp/trace/v1
|
||||
# golang.org/x/crypto v0.0.0-20211202192323-5770296d904e
|
||||
# golang.org/x/crypto v0.0.0-20220315160706-3147a52a75dd
|
||||
golang.org/x/crypto/bcrypt
|
||||
golang.org/x/crypto/blowfish
|
||||
golang.org/x/crypto/chacha20
|
||||
golang.org/x/crypto/curve25519
|
||||
golang.org/x/crypto/curve25519/internal/field
|
||||
golang.org/x/crypto/ed25519
|
||||
golang.org/x/crypto/ed25519/internal/edwards25519
|
||||
golang.org/x/crypto/internal/poly1305
|
||||
golang.org/x/crypto/internal/subtle
|
||||
golang.org/x/crypto/nacl/sign
|
||||
@ -582,7 +581,7 @@ google.golang.org/genproto/googleapis/api/httpbody
|
||||
google.golang.org/genproto/googleapis/rpc/errdetails
|
||||
google.golang.org/genproto/googleapis/rpc/status
|
||||
google.golang.org/genproto/protobuf/field_mask
|
||||
# google.golang.org/grpc v1.44.0
|
||||
# google.golang.org/grpc v1.45.0
|
||||
## explicit
|
||||
google.golang.org/grpc
|
||||
google.golang.org/grpc/attributes
|
||||
@ -898,7 +897,7 @@ sigs.k8s.io/structured-merge-diff/v4/value
|
||||
# sigs.k8s.io/yaml v1.2.0
|
||||
sigs.k8s.io/yaml
|
||||
# github.com/docker/cli => github.com/docker/cli v20.10.3-0.20220226190722-8667ccd1124c+incompatible
|
||||
# github.com/docker/docker => github.com/docker/docker v20.10.3-0.20220121014307-40bb9831756f+incompatible
|
||||
# github.com/docker/docker => github.com/docker/docker v20.10.3-0.20220224222438-c78f6963a1c0+incompatible
|
||||
# k8s.io/api => k8s.io/api v0.22.4
|
||||
# k8s.io/apimachinery => k8s.io/apimachinery v0.22.4
|
||||
# k8s.io/client-go => k8s.io/client-go v0.22.4
|
||||
|
Loading…
x
Reference in New Issue
Block a user