vendor: github.com/moby/buildkit db304eb93126 (v0.13.0-dev)

full diff: d6e142600e...db304eb931

Signed-off-by: CrazyMax <1951866+crazy-max@users.noreply.github.com>
This commit is contained in:
CrazyMax 2024-02-21 11:54:00 +01:00
parent 414f215929
commit 953cbf6696
No known key found for this signature in database
GPG Key ID: ADE44D8C9D44FBE4
127 changed files with 3442 additions and 1797 deletions

29
go.mod
View File

@ -7,15 +7,15 @@ require (
github.com/aws/aws-sdk-go-v2/config v1.26.6
github.com/compose-spec/compose-go/v2 v2.0.0-rc.3
github.com/containerd/console v1.0.4
github.com/containerd/containerd v1.7.12
github.com/containerd/continuity v0.4.2
github.com/containerd/containerd v1.7.13
github.com/containerd/continuity v0.4.3
github.com/containerd/log v0.1.0
github.com/containerd/typeurl/v2 v2.1.1
github.com/creack/pty v1.1.18
github.com/distribution/reference v0.5.0
github.com/docker/cli v25.0.2+incompatible
github.com/docker/cli v25.0.3+incompatible
github.com/docker/cli-docs-tool v0.7.0
github.com/docker/docker v25.0.2+incompatible
github.com/docker/docker v25.0.3+incompatible
github.com/docker/go-units v0.5.0
github.com/gofrs/flock v0.8.1
github.com/gogo/protobuf v1.3.2
@ -24,7 +24,7 @@ require (
github.com/google/uuid v1.5.0
github.com/hashicorp/go-cty-funcs v0.0.0-20230405223818-a090f58aa992
github.com/hashicorp/hcl/v2 v2.19.1
github.com/moby/buildkit v0.13.0-beta3.0.20240205165705-d6e142600ee5 // master (v0.13.0-dev)
github.com/moby/buildkit v0.13.0-rc1.0.20240221065707-db304eb93126 // master (v0.13.0-dev)
github.com/moby/sys/mountinfo v0.7.1
github.com/moby/sys/signal v0.7.0
github.com/morikuni/aec v1.0.0
@ -49,7 +49,7 @@ require (
golang.org/x/sync v0.4.0
golang.org/x/sys v0.16.0
golang.org/x/term v0.15.0
google.golang.org/grpc v1.58.3
google.golang.org/grpc v1.59.0
gopkg.in/yaml.v3 v3.0.1
k8s.io/api v0.26.7
k8s.io/apimachinery v0.26.7
@ -119,6 +119,7 @@ require (
github.com/mitchellh/go-wordwrap v0.0.0-20150314170334-ad45545899c7 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/mitchellh/reflectwalk v1.0.2 // indirect
github.com/moby/docker-image-spec v1.3.1 // indirect
github.com/moby/locker v1.0.1 // indirect
github.com/moby/patternmatcher v0.6.0 // indirect
github.com/moby/spdystream v0.2.0 // indirect
@ -129,10 +130,10 @@ require (
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/prometheus/client_golang v1.16.0 // indirect
github.com/prometheus/client_model v0.4.0 // indirect
github.com/prometheus/common v0.42.0 // indirect
github.com/prometheus/procfs v0.10.1 // indirect
github.com/prometheus/client_golang v1.17.0 // indirect
github.com/prometheus/client_model v0.5.0 // indirect
github.com/prometheus/common v0.44.0 // indirect
github.com/prometheus/procfs v0.12.0 // indirect
github.com/rivo/uniseg v0.2.0 // indirect
github.com/secure-systems-lab/go-securesystemslib v0.4.0 // indirect
github.com/shibumi/go-pathspec v1.3.0 // indirect
@ -155,14 +156,14 @@ require (
golang.org/x/crypto v0.17.0 // indirect
golang.org/x/exp v0.0.0-20230713183714-613f0c0eb8a1 // indirect
golang.org/x/net v0.17.0 // indirect
golang.org/x/oauth2 v0.10.0 // indirect
golang.org/x/oauth2 v0.11.0 // indirect
golang.org/x/text v0.14.0 // indirect
golang.org/x/time v0.3.0 // indirect
golang.org/x/tools v0.14.0 // indirect
google.golang.org/appengine v1.6.7 // indirect
google.golang.org/genproto v0.0.0-20230711160842-782d3b101e98 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20230711160842-782d3b101e98 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98 // indirect
google.golang.org/genproto v0.0.0-20231016165738-49dd2c1f3d0b // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20231016165738-49dd2c1f3d0b // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20231016165738-49dd2c1f3d0b // indirect
google.golang.org/protobuf v1.31.0 // indirect
gopkg.in/inf.v0 v0.9.1 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect

78
go.sum
View File

@ -1,7 +1,7 @@
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cloud.google.com/go v0.110.4 h1:1JYyxKMN9hd5dR2MYTPWkGUgcoxVVhg0LKNKEo0qvmk=
cloud.google.com/go/compute v1.21.0 h1:JNBsyXVoOoNJtTQcnEY5uYpZIbeCTYIeDe0Xh1bySMk=
cloud.google.com/go/compute v1.21.0/go.mod h1:4tCnrn48xsqlwSAiLf1HXMQk8CONslYbdiEZc9FEIbM=
cloud.google.com/go v0.110.8 h1:tyNdfIxjzaWctIiLYOTalaLKZ17SI44SKFW26QbOhME=
cloud.google.com/go/compute v1.23.1 h1:V97tBoDaZHb6leicZ1G6DLK2BAaZLJ/7+9BB/En3hR0=
cloud.google.com/go/compute v1.23.1/go.mod h1:CqB3xpmPKKt3OJpW2ndFIXnA9A4xAy/F3Xp1ixncW78=
cloud.google.com/go/compute/metadata v0.2.3 h1:mg4jlk7mCAj6xXp9UJ4fjI9VUI5rubuGBW5aJ7UnBMY=
cloud.google.com/go/compute/metadata v0.2.3/go.mod h1:VAV5nSsACxMJvgaAuX6Pk2AawlZn8kiOGuCv6gTkwuA=
github.com/AdaLogics/go-fuzz-headers v0.0.0-20230811130428-ced1acdcaa24 h1:bvDV9vkmnHYOMsOr4WLk+Vo07yKIzd94sVoIqshQ4bU=
@ -94,19 +94,19 @@ github.com/containerd/cgroups v1.1.0 h1:v8rEWFl6EoqHB+swVNjVoCJE8o3jX7e8nqBGPLaD
github.com/containerd/cgroups v1.1.0/go.mod h1:6ppBcbh/NOOUU+dMKrykgaBnK9lCIBxHqJDGwsa1mIw=
github.com/containerd/console v1.0.4 h1:F2g4+oChYvBTsASRTz8NP6iIAi97J3TtSAsLbIFn4ro=
github.com/containerd/console v1.0.4/go.mod h1:YynlIjWYF8myEu6sdkwKIvGQq+cOckRm6So2avqoYAk=
github.com/containerd/containerd v1.7.12 h1:+KQsnv4VnzyxWcfO9mlxxELaoztsDEjOuCMPAuPqgU0=
github.com/containerd/containerd v1.7.12/go.mod h1:/5OMpE1p0ylxtEUGY8kuCYkDRzJm9NO1TFMWjUpdevk=
github.com/containerd/continuity v0.4.2 h1:v3y/4Yz5jwnvqPKJJ+7Wf93fyWoCB3F5EclWG023MDM=
github.com/containerd/continuity v0.4.2/go.mod h1:F6PTNCKepoxEaXLQp3wDAjygEnImnZ/7o4JzpodfroQ=
github.com/containerd/containerd v1.7.13 h1:wPYKIeGMN8vaggSKuV1X0wZulpMz4CrgEsZdaCyB6Is=
github.com/containerd/containerd v1.7.13/go.mod h1:zT3up6yTRfEUa6+GsITYIJNgSVL9NQ4x4h1RPzk0Wu4=
github.com/containerd/continuity v0.4.3 h1:6HVkalIp+2u1ZLH1J/pYX2oBVXlJZvh1X1A7bEZ9Su8=
github.com/containerd/continuity v0.4.3/go.mod h1:F6PTNCKepoxEaXLQp3wDAjygEnImnZ/7o4JzpodfroQ=
github.com/containerd/fifo v1.1.0 h1:4I2mbh5stb1u6ycIABlBw9zgtlK8viPI9QkQNRQEEmY=
github.com/containerd/fifo v1.1.0/go.mod h1:bmC4NWMbXlt2EZ0Hc7Fx7QzTFxgPID13eH0Qu+MAb2o=
github.com/containerd/log v0.1.0 h1:TCJt7ioM2cr/tfR8GPbGf9/VRAX8D2B4PjzCpfX540I=
github.com/containerd/log v0.1.0/go.mod h1:VRRf09a7mHDIRezVKTRCrOq78v577GXq3bSa3EhrzVo=
github.com/containerd/nydus-snapshotter v0.13.1 h1:5XNkCZ9ivLXCcyx3Jbbfh/fntkcls69uBg0x9VE8zlk=
github.com/containerd/nydus-snapshotter v0.13.1/go.mod h1:XWAz9ytsjBuKPVXDKP3xoMlcSKNsGnjXlEup6DuzUIo=
github.com/containerd/stargz-snapshotter v0.14.3 h1:OTUVZoPSPs8mGgmQUE1dqw3WX/3nrsmsurW7UPLWl1U=
github.com/containerd/stargz-snapshotter/estargz v0.14.3 h1:OqlDCK3ZVUO6C3B/5FSkDwbkEETK84kQgEeFwDC+62k=
github.com/containerd/stargz-snapshotter/estargz v0.14.3/go.mod h1:KY//uOCIkSuNAHhJogcZtrNHdKrA99/FCCRjE3HD36o=
github.com/containerd/nydus-snapshotter v0.13.7 h1:x7DHvGnzJOu1ZPwPYkeOPk5MjZZYbdddygEjaSDoFTk=
github.com/containerd/nydus-snapshotter v0.13.7/go.mod h1:VPVKQ3jmHFIcUIV2yiQ1kImZuBFS3GXDohKs9mRABVE=
github.com/containerd/stargz-snapshotter v0.15.1 h1:fpsP4kf/Z4n2EYnU0WT8ZCE3eiKDwikDhL6VwxIlgeA=
github.com/containerd/stargz-snapshotter/estargz v0.15.1 h1:eXJjw9RbkLFgioVaTG+G/ZW/0kEe2oEKCdS/ZxIyoCU=
github.com/containerd/stargz-snapshotter/estargz v0.15.1/go.mod h1:gr2RNwukQ/S9Nv33Lt6UC7xEx58C+LHRdoqbEKjz1Kk=
github.com/containerd/ttrpc v1.2.2 h1:9vqZr0pxwOF5koz6N0N3kJ0zDHokrcPxIR/ZR2YFtOs=
github.com/containerd/ttrpc v1.2.2/go.mod h1:sIT6l32Ph/H9cvnJsfXM5drIVzTr5A2flTf1G5tYZak=
github.com/containerd/typeurl/v2 v2.1.1 h1:3Q4Pt7i8nYwy2KmQWIw2+1hTvwTE/6w9FqcttATPO/4=
@ -121,15 +121,15 @@ github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs
github.com/denisenkom/go-mssqldb v0.0.0-20191128021309-1d7a30a10f73/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU=
github.com/distribution/reference v0.5.0 h1:/FUIFXtfc/x2gpa5/VGfiGLuOIdYa1t65IKK2OFGvA0=
github.com/distribution/reference v0.5.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
github.com/docker/cli v25.0.2+incompatible h1:6GEdvxwEA451/+Y3GtqIGn/MNjujQazUlxC6uGu8Tog=
github.com/docker/cli v25.0.2+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
github.com/docker/cli v25.0.3+incompatible h1:KLeNs7zws74oFuVhgZQ5ONGZiXUUdgsdy6/EsX/6284=
github.com/docker/cli v25.0.3+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
github.com/docker/cli-docs-tool v0.7.0 h1:M2Da98Unz2kz3A5d4yeSGbhyOge2mfYSNjAFt01Rw0M=
github.com/docker/cli-docs-tool v0.7.0/go.mod h1:zMjqTFCU361PRh8apiXzeAZ1Q/xupbIwTusYpzCXS/o=
github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
github.com/docker/distribution v2.8.2+incompatible h1:T3de5rq0dB1j30rp0sA2rER+m322EBzniBPB6ZIzuh8=
github.com/docker/distribution v2.8.2+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
github.com/docker/docker v25.0.2+incompatible h1:/OaKeauroa10K4Nqavw4zlhcDq/WBcPMc5DbjOGgozY=
github.com/docker/docker v25.0.2+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/docker v25.0.3+incompatible h1:D5fy/lYmY7bvZa0XTZ5/UJPljor41F+vdyJG5luQLfQ=
github.com/docker/docker v25.0.3+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
github.com/docker/docker-credential-helpers v0.8.0 h1:YQFtbBQb4VrpoPxhFuzEBPQ9E16qz5SpHLS+uswaCp8=
github.com/docker/docker-credential-helpers v0.8.0/go.mod h1:UGFXcuoQ5TxPiB54nHOZ32AWRqQdECoh/Mg0AlEYb40=
github.com/docker/go v1.5.1-1.0.20160303222718-d30aec9fd63c h1:lzqkGL9b3znc+ZUgi7FlLnqjQhcXxkNM/quxIjBVMD0=
@ -197,8 +197,8 @@ github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/glog v1.1.0 h1:/d3pCKDPWNnvIWe0vVUpNP32qc8U3PDVxySP/y360qE=
github.com/golang/glog v1.1.0/go.mod h1:pfYeQZ3JWZoXTV5sFc986z3HTpwQs9At6P4ImfuP3NQ=
github.com/golang/glog v1.1.2 h1:DVjP2PbBOzHyzA+dn3WhHIq4NdVu3Q+pvivFICf/7fo=
github.com/golang/glog v1.1.2/go.mod h1:zR+okUeTbrL6EL3xHUDxZuEtGv04p5shwip1+mL/rLQ=
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
@ -320,8 +320,10 @@ github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyua
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/mitchellh/reflectwalk v1.0.2 h1:G2LzWKi524PWgd3mLHV8Y5k7s6XUvT0Gef6zxSIeXaQ=
github.com/mitchellh/reflectwalk v1.0.2/go.mod h1:mSTlrgnPZtwu0c4WaC2kGObEpuNDbx0jmZXqmk4esnw=
github.com/moby/buildkit v0.13.0-beta3.0.20240205165705-d6e142600ee5 h1:FJknzwgQMF0PviKWgRpJ3GtGbAkPNw5/PQtqqXnqvVM=
github.com/moby/buildkit v0.13.0-beta3.0.20240205165705-d6e142600ee5/go.mod h1:wWi92eSRd6lwFOiMcq6L2EJTuP7TvPTRl5KF3jmDiYc=
github.com/moby/buildkit v0.13.0-rc1.0.20240221065707-db304eb93126 h1:aXdgP8jLyDnKEOXis4Aydp4VlXYpg2loUJarhygTOuU=
github.com/moby/buildkit v0.13.0-rc1.0.20240221065707-db304eb93126/go.mod h1:XaLDo1L55QqXS/04FE91+mAbwjkr0vZu9g6zZlzvXL8=
github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0=
github.com/moby/docker-image-spec v1.3.1/go.mod h1:eKmb5VW8vQEh/BAr2yvVNvuiJuY6UIocYsFu/DxxRpo=
github.com/moby/locker v1.0.1 h1:fOXqR41zeveg4fFODix+1Ch4mj/gT0NE1XJbp/epuBg=
github.com/moby/locker v1.0.1/go.mod h1:S7SDdo5zpBK84bzzVlKr2V0hz+7x9hWbYC/kq7oQppc=
github.com/moby/patternmatcher v0.6.0 h1:GmP9lR19aU5GqSSFko+5pRqHi+Ohk1O69aFiKkVGiPk=
@ -384,26 +386,26 @@ github.com/prometheus/client_golang v0.9.0-pre1.0.20180209125602-c332b6f63c06/go
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
github.com/prometheus/client_golang v1.1.0/go.mod h1:I1FGZT9+L76gKKOs5djB6ezCbFQP1xR9D75/vuwEF3g=
github.com/prometheus/client_golang v1.16.0 h1:yk/hx9hDbrGHovbci4BY+pRMfSuuat626eFsHb7tmT8=
github.com/prometheus/client_golang v1.16.0/go.mod h1:Zsulrv/L9oM40tJ7T815tM89lFEugiJ9HzIqaAx4LKc=
github.com/prometheus/client_golang v1.17.0 h1:rl2sfwZMtSthVU752MqfjQozy7blglC+1SOtjMAMh+Q=
github.com/prometheus/client_golang v1.17.0/go.mod h1:VeL+gMmOAxkS2IqfCq0ZmHSL+LjWfWDUmp1mBz9JgUY=
github.com/prometheus/client_model v0.0.0-20171117100541-99fa1f4be8e5/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.4.0 h1:5lQXD3cAg1OXBf4Wq03gTrXHeaV0TQvGfUooCfx1yqY=
github.com/prometheus/client_model v0.4.0/go.mod h1:oMQmHW1/JoDwqLtg57MGgP/Fb1CJEYF2imWWhWtMkYU=
github.com/prometheus/client_model v0.5.0 h1:VQw1hfvPvk3Uv6Qf29VrPF32JB6rtbgI6cYPYQjL0Qw=
github.com/prometheus/client_model v0.5.0/go.mod h1:dTiFglRmd66nLR9Pv9f0mZi7B7fk5Pm3gvsjB5tr+kI=
github.com/prometheus/common v0.0.0-20180110214958-89604d197083/go.mod h1:daVV7qP5qjZbuso7PdcryaAu0sAZbrN9i7WWcTMWvro=
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc=
github.com/prometheus/common v0.42.0 h1:EKsfXEYo4JpWMHH5cg+KOUWeuJSov1Id8zGR8eeI1YM=
github.com/prometheus/common v0.42.0/go.mod h1:xBwqVerjNdUDjgODMpudtOMwlOwf2SaTr1yjz4b7Zbc=
github.com/prometheus/common v0.44.0 h1:+5BrQJwiBB9xsMygAB3TNvpQKOwlkc25LbISbrdOOfY=
github.com/prometheus/common v0.44.0/go.mod h1:ofAIvZbQ1e/nugmZGz4/qCb9Ap1VoSTIO7x0VV9VvuY=
github.com/prometheus/procfs v0.0.0-20180125133057-cb4147076ac7/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.0.3/go.mod h1:4A/X28fw3Fc593LaREMrKMqOKvUAntwMDaekg4FpcdQ=
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
github.com/prometheus/procfs v0.10.1 h1:kYK1Va/YMlutzCGazswoHKo//tZVlFpKYh+PymziUAg=
github.com/prometheus/procfs v0.10.1/go.mod h1:nwNm2aOCAYw8uTR/9bWRREkZFxAUcWzPHWJq+XBB/FM=
github.com/prometheus/procfs v0.12.0 h1:jluTpSng7V9hY0O2R9DzzJHYb2xULk9VTR1V1R/k6Bo=
github.com/prometheus/procfs v0.12.0/go.mod h1:pcuDEFsWDnvcgNzo4EEweacyhjeA9Zk3cnaOZAZEfOo=
github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY=
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ=
@ -547,8 +549,8 @@ golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwY
golang.org/x/net v0.17.0 h1:pVaXccu2ozPjCXewfr1S7xza/zcXTity9cCdXQYSjIM=
golang.org/x/net v0.17.0/go.mod h1:NxSsAGuq816PNPmqtQdLE42eU2Fs7NoRIZrHJAlaCOE=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.10.0 h1:zHCpF2Khkwy4mMB4bv0U37YtJdTGW8jI0glAApi0Kh8=
golang.org/x/oauth2 v0.10.0/go.mod h1:kTpgurOux7LqtuxjuyZa4Gj2gdezIt/jQtGnNFfypQI=
golang.org/x/oauth2 v0.11.0 h1:vPL4xzxBM4niKCW6g9whtaWVXTJf1U5e4aZxxFx/gbU=
golang.org/x/oauth2 v0.11.0/go.mod h1:LdF7O/8bLR/qWK9DrpXmbHLTouvRHK0SgJl0GmDBchk=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@ -611,12 +613,12 @@ google.golang.org/genproto v0.0.0-20200224152610-e50cd9704f63/go.mod h1:55QSHmfG
google.golang.org/genproto v0.0.0-20200423170343-7949de9c1215/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
google.golang.org/genproto v0.0.0-20201019141844-1ed22bb0c154/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20230711160842-782d3b101e98 h1:Z0hjGZePRE0ZBWotvtrwxFNrNE9CUAGtplaDK5NNI/g=
google.golang.org/genproto v0.0.0-20230711160842-782d3b101e98/go.mod h1:S7mY02OqCJTD0E1OiQy1F72PWFB4bZJ87cAtLPYgDR0=
google.golang.org/genproto/googleapis/api v0.0.0-20230711160842-782d3b101e98 h1:FmF5cCW94Ij59cfpoLiwTgodWmm60eEV0CjlsVg2fuw=
google.golang.org/genproto/googleapis/api v0.0.0-20230711160842-782d3b101e98/go.mod h1:rsr7RhLuwsDKL7RmgDDCUc6yaGr1iqceVb5Wv6f6YvQ=
google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98 h1:bVf09lpb+OJbByTj913DRJioFFAjf/ZGxEz7MajTp2U=
google.golang.org/genproto/googleapis/rpc v0.0.0-20230711160842-782d3b101e98/go.mod h1:TUfxEVdsvPg18p6AslUXFoLdpED4oBnGwyqk3dV1XzM=
google.golang.org/genproto v0.0.0-20231016165738-49dd2c1f3d0b h1:+YaDE2r2OG8t/z5qmsh7Y+XXwCbvadxxZ0YY6mTdrVA=
google.golang.org/genproto v0.0.0-20231016165738-49dd2c1f3d0b/go.mod h1:CgAqfJo+Xmu0GwA0411Ht3OU3OntXwsGmrmjI8ioGXI=
google.golang.org/genproto/googleapis/api v0.0.0-20231016165738-49dd2c1f3d0b h1:CIC2YMXmIhYw6evmhPxBKJ4fmLbOFtXQN/GV3XOZR8k=
google.golang.org/genproto/googleapis/api v0.0.0-20231016165738-49dd2c1f3d0b/go.mod h1:IBQ646DjkDkvUIsVq/cc03FUFQ9wbZu7yE396YcL870=
google.golang.org/genproto/googleapis/rpc v0.0.0-20231016165738-49dd2c1f3d0b h1:ZlWIi1wSK56/8hn4QcBp/j9M7Gt3U/3hZw3mC7vDICo=
google.golang.org/genproto/googleapis/rpc v0.0.0-20231016165738-49dd2c1f3d0b/go.mod h1:swOH3j0KzcDDgGUWr+SNpyTen5YrXjS3eyPzFYKc6lc=
google.golang.org/grpc v1.0.5/go.mod h1:yo6s7OP7yaDglbqo1J04qKzAhqBH6lvTonzMVmEdcZw=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
@ -624,8 +626,8 @@ google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQ
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.27.1/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3IjizoKk=
google.golang.org/grpc v1.58.3 h1:BjnpXut1btbtgN/6sp+brB2Kbm2LjNXnidYujAVbSoQ=
google.golang.org/grpc v1.58.3/go.mod h1:tgX3ZQDlNJGU96V6yHh1T/JeoBQ2TXdr43YbYSsCJk0=
google.golang.org/grpc v1.59.0 h1:Z5Iec2pjwb+LEOqzpB2MR12/eKFhDPhuqW91O+4bwUk=
google.golang.org/grpc v1.59.0/go.mod h1:aUPDwccQo6OTjy7Hct4AfBPD1GptF4fyUjIkQ9YtF98=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=

View File

@ -108,6 +108,12 @@ type Status struct {
// WalkFunc defines the callback for a blob walk.
type WalkFunc func(Info) error
// InfoReaderProvider provides both info and reader for the specific content.
type InfoReaderProvider interface {
InfoProvider
Provider
}
// InfoProvider provides info for content inspection.
type InfoProvider interface {
// Info will return metadata about content available in the content store.

View File

@ -148,7 +148,7 @@ func WithSkipNonDistributableBlobs() ExportOpt {
// The manifest itself is excluded only if it's not present locally.
// This allows to export multi-platform images if not all platforms are present
// while still persisting the multi-platform index.
func WithSkipMissing(store ContentProvider) ExportOpt {
func WithSkipMissing(store content.InfoReaderProvider) ExportOpt {
return func(ctx context.Context, o *exportOptions) error {
o.blobRecordOptions.childrenHandler = images.HandlerFunc(func(ctx context.Context, desc ocispec.Descriptor) (subdescs []ocispec.Descriptor, err error) {
children, err := images.Children(ctx, store, desc)
@ -211,12 +211,6 @@ func copySourceLabels(ctx context.Context, infoProvider content.InfoProvider, de
return desc, nil
}
// ContentProvider provides both content and info about content
type ContentProvider interface {
content.Provider
content.InfoProvider
}
// Export implements Exporter.
func Export(ctx context.Context, store content.Provider, writer io.Writer, opts ...ExportOpt) error {
var eo exportOptions

View File

@ -23,7 +23,7 @@ var (
Package = "github.com/containerd/containerd"
// Version holds the complete version number. Filled in at linking time.
Version = "1.7.12+unknown"
Version = "1.7.13+unknown"
// Revision is filled with the VCS (e.g. git) revision being used to build
// the program at linking time.

View File

@ -103,11 +103,6 @@ func copyDirectory(dst, src string, inodes map[uint64]string, o *copyDirOpts) er
}
}
entries, err := os.ReadDir(src)
if err != nil {
return fmt.Errorf("failed to read %s: %w", src, err)
}
if err := copyFileInfo(stat, src, dst); err != nil {
return fmt.Errorf("failed to copy file info for %s: %w", dst, err)
}
@ -116,7 +111,15 @@ func copyDirectory(dst, src string, inodes map[uint64]string, o *copyDirOpts) er
return fmt.Errorf("failed to copy xattrs: %w", err)
}
for _, entry := range entries {
f, err := os.Open(src)
if err != nil {
return err
}
defer f.Close()
dr := &dirReader{f: f}
handleEntry := func(entry os.DirEntry) error {
source := filepath.Join(src, entry.Name())
target := filepath.Join(dst, entry.Name())
@ -130,7 +133,7 @@ func copyDirectory(dst, src string, inodes map[uint64]string, o *copyDirOpts) er
if err := copyDirectory(target, source, inodes, o); err != nil {
return err
}
continue
return nil
case (fileInfo.Mode() & os.ModeType) == 0:
link, err := getLinkSource(target, fileInfo, inodes)
if err != nil {
@ -159,7 +162,7 @@ func copyDirectory(dst, src string, inodes map[uint64]string, o *copyDirOpts) er
}
default:
logrus.Warnf("unsupported mode: %s: %s", source, fileInfo.Mode())
continue
return nil
}
if err := copyFileInfo(fileInfo, source, target); err != nil {
@ -169,9 +172,20 @@ func copyDirectory(dst, src string, inodes map[uint64]string, o *copyDirOpts) er
if err := copyXAttrs(target, source, o.xex, o.xeh); err != nil {
return fmt.Errorf("failed to copy xattrs: %w", err)
}
return nil
}
return nil
for {
entry := dr.Next()
if entry == nil {
break
}
if err := handleEntry(entry); err != nil {
return err
}
}
return dr.Err()
}
// CopyFile copies the source file to the target.

53
vendor/github.com/containerd/continuity/fs/dir.go generated vendored Normal file
View File

@ -0,0 +1,53 @@
/*
Copyright The containerd Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package fs
import (
"io"
"os"
)
type dirReader struct {
buf []os.DirEntry
f *os.File
err error
}
func (r *dirReader) Next() os.DirEntry {
if len(r.buf) == 0 {
infos, err := r.f.ReadDir(32)
if err != nil {
if err != io.EOF {
r.err = err
}
return nil
}
r.buf = infos
}
if len(r.buf) == 0 {
return nil
}
out := r.buf[0]
r.buf[0] = nil
r.buf = r.buf[1:]
return out
}
func (r *dirReader) Err() error {
return r.err
}

View File

@ -8327,6 +8327,16 @@ paths:
description: "BuildKit output configuration"
type: "string"
default: ""
- name: "version"
in: "query"
type: "string"
default: "1"
enum: ["1", "2"]
description: |
Version of the builder backend to use.
- `1` is the first generation classic (deprecated) builder in the Docker daemon (default)
- `2` is [BuildKit](https://github.com/moby/buildkit)
responses:
200:
description: "no error"

View File

@ -14,6 +14,9 @@ type EndpointSettings struct {
IPAMConfig *EndpointIPAMConfig
Links []string
Aliases []string // Aliases holds the list of extra, user-specified DNS names for this endpoint.
// MacAddress may be used to specify a MAC address when the container is created.
// Once the container is running, it becomes operational data (it may contain a
// generated address).
MacAddress string
// Operational data
NetworkID string

View File

@ -30,30 +30,9 @@ const (
ip6 ipFamily = "IPv6"
)
// HasIPv6Subnets checks whether there's any IPv6 subnets in the ipam parameter. It ignores any invalid Subnet and nil
// ipam.
func HasIPv6Subnets(ipam *IPAM) bool {
if ipam == nil {
return false
}
for _, cfg := range ipam.Config {
subnet, err := netip.ParsePrefix(cfg.Subnet)
if err != nil {
continue
}
if subnet.Addr().Is6() {
return true
}
}
return false
}
// ValidateIPAM checks whether the network's IPAM passed as argument is valid. It returns a joinError of the list of
// errors found.
func ValidateIPAM(ipam *IPAM) error {
func ValidateIPAM(ipam *IPAM, enableIPv6 bool) error {
if ipam == nil {
return nil
}
@ -70,6 +49,10 @@ func ValidateIPAM(ipam *IPAM) error {
subnetFamily = ip6
}
if !enableIPv6 && subnetFamily == ip6 {
continue
}
if subnet != subnet.Masked() {
errs = append(errs, fmt.Errorf("invalid subnet %s: it should be %s", subnet, subnet.Masked()))
}

View File

@ -3,11 +3,15 @@ package ioutils // import "github.com/docker/docker/pkg/ioutils"
import (
"context"
"io"
"runtime/debug"
"sync/atomic"
// make sure crypto.SHA256, crypto.sha512 and crypto.SHA384 are registered
// TODO remove once https://github.com/opencontainers/go-digest/pull/64 is merged.
_ "crypto/sha256"
_ "crypto/sha512"
"github.com/containerd/log"
)
// ReadCloserWrapper wraps an io.Reader, and implements an io.ReadCloser
@ -16,10 +20,15 @@ import (
type ReadCloserWrapper struct {
io.Reader
closer func() error
closed atomic.Bool
}
// Close calls back the passed closer function
func (r *ReadCloserWrapper) Close() error {
if !r.closed.CompareAndSwap(false, true) {
subsequentCloseWarn("ReadCloserWrapper")
return nil
}
return r.closer()
}
@ -87,6 +96,7 @@ type cancelReadCloser struct {
cancel func()
pR *io.PipeReader // Stream to read from
pW *io.PipeWriter
closed atomic.Bool
}
// NewCancelReadCloser creates a wrapper that closes the ReadCloser when the
@ -146,6 +156,17 @@ func (p *cancelReadCloser) closeWithError(err error) {
// Close closes the wrapper its underlying reader. It will cause
// future calls to Read to return io.EOF.
func (p *cancelReadCloser) Close() error {
if !p.closed.CompareAndSwap(false, true) {
subsequentCloseWarn("cancelReadCloser")
return nil
}
p.closeWithError(io.EOF)
return nil
}
func subsequentCloseWarn(name string) {
log.G(context.TODO()).Error("subsequent attempt to close " + name)
if log.GetLevel() >= log.DebugLevel {
log.G(context.TODO()).Errorf("stack trace: %s", string(debug.Stack()))
}
}

View File

@ -1,6 +1,9 @@
package ioutils // import "github.com/docker/docker/pkg/ioutils"
import "io"
import (
"io"
"sync/atomic"
)
// NopWriter represents a type which write operation is nop.
type NopWriter struct{}
@ -29,9 +32,14 @@ func (f *NopFlusher) Flush() {}
type writeCloserWrapper struct {
io.Writer
closer func() error
closed atomic.Bool
}
func (r *writeCloserWrapper) Close() error {
if !r.closed.CompareAndSwap(false, true) {
subsequentCloseWarn("WriteCloserWrapper")
return nil
}
return r.closer()
}

View File

@ -84,6 +84,11 @@ func (g *gatewayClientForBuild) ResolveImageConfig(ctx context.Context, in *gate
return g.gateway.ResolveImageConfig(ctx, in, opts...)
}
func (g *gatewayClientForBuild) ResolveSourceMeta(ctx context.Context, in *gatewayapi.ResolveSourceMetaRequest, opts ...grpc.CallOption) (*gatewayapi.ResolveSourceMetaResponse, error) {
ctx = buildid.AppendToOutgoingContext(ctx, g.buildID)
return g.gateway.ResolveSourceMeta(ctx, in, opts...)
}
func (g *gatewayClientForBuild) Solve(ctx context.Context, in *gatewayapi.SolveRequest, opts ...grpc.CallOption) (*gatewayapi.SolveResponse, error) {
ctx = buildid.AppendToOutgoingContext(ctx, g.buildID)
return g.gateway.Solve(ctx, in, opts...)

View File

@ -46,6 +46,7 @@ type mount struct {
tmpfsOpt TmpfsInfo
cacheSharing CacheMountSharingMode
noOutput bool
contentCache MountContentCache
}
type ExecOp struct {
@ -281,6 +282,9 @@ func (e *ExecOp) Marshal(ctx context.Context, c *Constraints) (digest.Digest, []
} else if m.source != nil {
addCap(&e.constraints, pb.CapExecMountBind)
}
if m.contentCache != MountContentCacheDefault {
addCap(&e.constraints, pb.CapExecMountContentCache)
}
}
if len(e.secrets) > 0 {
@ -366,6 +370,14 @@ func (e *ExecOp) Marshal(ctx context.Context, c *Constraints) (digest.Digest, []
pm.CacheOpt.Sharing = pb.CacheSharingOpt_LOCKED
}
}
switch m.contentCache {
case MountContentCacheDefault:
pm.ContentCache = pb.MountContentCache_DEFAULT
case MountContentCacheOn:
pm.ContentCache = pb.MountContentCache_ON
case MountContentCacheOff:
pm.ContentCache = pb.MountContentCache_OFF
}
if m.tmpfs {
pm.MountType = pb.MountType_TMPFS
pm.TmpfsOpt = &pb.TmpfsOpt{
@ -492,6 +504,12 @@ func ForceNoOutput(m *mount) {
m.noOutput = true
}
func ContentCache(cache MountContentCache) MountOption {
return func(m *mount) {
m.contentCache = cache
}
}
func AsPersistentCacheDir(id string, sharing CacheMountSharingMode) MountOption {
return func(m *mount) {
m.cacheID = id
@ -783,3 +801,11 @@ const (
UlimitSigpending UlimitName = "sigpending"
UlimitStack UlimitName = "stack"
)
type MountContentCache int
const (
MountContentCacheDefault MountContentCache = iota
MountContentCacheOn
MountContentCacheOff
)

View File

@ -1,11 +1,7 @@
package llb
import (
"context"
spb "github.com/moby/buildkit/sourcepolicy/pb"
digest "github.com/opencontainers/go-digest"
ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/moby/buildkit/client/llb/sourceresolver"
)
// WithMetaResolver adds a metadata resolver to an image
@ -31,30 +27,4 @@ func WithLayerLimit(l int) ImageOption {
}
// ImageMetaResolver can resolve image config metadata from a reference
type ImageMetaResolver interface {
ResolveImageConfig(ctx context.Context, ref string, opt ResolveImageConfigOpt) (string, digest.Digest, []byte, error)
}
type ResolverType int
const (
ResolverTypeRegistry ResolverType = iota
ResolverTypeOCILayout
)
type ResolveImageConfigOpt struct {
ResolverType
Platform *ocispecs.Platform
ResolveMode string
LogName string
Store ResolveImageConfigOptStore
SourcePolicies []*spb.Policy
}
type ResolveImageConfigOptStore struct {
SessionID string
StoreID string
}
type ImageMetaResolver = sourceresolver.ImageMetaResolver

View File

@ -10,6 +10,7 @@ import (
"strings"
"github.com/distribution/reference"
"github.com/moby/buildkit/client/llb/sourceresolver"
"github.com/moby/buildkit/solver/pb"
"github.com/moby/buildkit/util/apicaps"
"github.com/moby/buildkit/util/gitutil"
@ -136,10 +137,11 @@ func Image(ref string, opts ...ImageOption) State {
if p == nil {
p = c.Platform
}
_, _, dt, err := info.metaResolver.ResolveImageConfig(ctx, ref, ResolveImageConfigOpt{
Platform: p,
ResolveMode: info.resolveMode.String(),
ResolverType: ResolverTypeRegistry,
_, _, dt, err := info.metaResolver.ResolveImageConfig(ctx, ref, sourceresolver.Opt{
Platform: p,
ImageOpt: &sourceresolver.ResolveImageOpt{
ResolveMode: info.resolveMode.String(),
},
})
if err != nil {
return State{}, err
@ -152,10 +154,11 @@ func Image(ref string, opts ...ImageOption) State {
if p == nil {
p = c.Platform
}
ref, dgst, dt, err := info.metaResolver.ResolveImageConfig(context.TODO(), ref, ResolveImageConfigOpt{
Platform: p,
ResolveMode: info.resolveMode.String(),
ResolverType: ResolverTypeRegistry,
ref, dgst, dt, err := info.metaResolver.ResolveImageConfig(context.TODO(), ref, sourceresolver.Opt{
Platform: p,
ImageOpt: &sourceresolver.ResolveImageOpt{
ResolveMode: info.resolveMode.String(),
},
})
if err != nil {
return State{}, err

View File

@ -0,0 +1,59 @@
package sourceresolver
import (
"context"
"strings"
"github.com/distribution/reference"
"github.com/moby/buildkit/solver/pb"
"github.com/moby/buildkit/util/imageutil"
digest "github.com/opencontainers/go-digest"
"github.com/pkg/errors"
)
type ImageMetaResolver interface {
ResolveImageConfig(ctx context.Context, ref string, opt Opt) (string, digest.Digest, []byte, error)
}
type imageMetaResolver struct {
mr MetaResolver
}
var _ ImageMetaResolver = &imageMetaResolver{}
func NewImageMetaResolver(mr MetaResolver) ImageMetaResolver {
return &imageMetaResolver{
mr: mr,
}
}
func (imr *imageMetaResolver) ResolveImageConfig(ctx context.Context, ref string, opt Opt) (string, digest.Digest, []byte, error) {
parsed, err := reference.ParseNormalizedNamed(ref)
if err != nil {
return "", "", nil, errors.Wrapf(err, "could not parse reference %q", ref)
}
ref = parsed.String()
op := &pb.SourceOp{
Identifier: "docker-image://" + ref,
}
if opt := opt.OCILayoutOpt; opt != nil {
op.Identifier = "oci-layout://" + ref
op.Attrs = map[string]string{}
if opt.Store.SessionID != "" {
op.Attrs[pb.AttrOCILayoutSessionID] = opt.Store.SessionID
}
if opt.Store.StoreID != "" {
op.Attrs[pb.AttrOCILayoutStoreID] = opt.Store.StoreID
}
}
res, err := imr.mr.ResolveSourceMetadata(ctx, op, opt)
if err != nil {
return "", "", nil, errors.Wrapf(err, "failed to resolve source metadata for %s", ref)
}
if res.Image == nil {
return "", "", nil, &imageutil.ResolveToNonImageError{Ref: ref, Updated: res.Op.Identifier}
}
ref = strings.TrimPrefix(res.Op.Identifier, "docker-image://")
ref = strings.TrimPrefix(ref, "oci-layout://")
return ref, res.Image.Digest, res.Image.Config, nil
}

View File

@ -0,0 +1,54 @@
package sourceresolver
import (
"context"
"github.com/moby/buildkit/solver/pb"
spb "github.com/moby/buildkit/sourcepolicy/pb"
digest "github.com/opencontainers/go-digest"
ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
)
type ResolverType int
const (
ResolverTypeRegistry ResolverType = iota
ResolverTypeOCILayout
)
type MetaResolver interface {
ResolveSourceMetadata(ctx context.Context, op *pb.SourceOp, opt Opt) (*MetaResponse, error)
}
type Opt struct {
LogName string
SourcePolicies []*spb.Policy
Platform *ocispecs.Platform
ImageOpt *ResolveImageOpt
OCILayoutOpt *ResolveOCILayoutOpt
}
type MetaResponse struct {
Op *pb.SourceOp
Image *ResolveImageResponse
}
type ResolveImageOpt struct {
ResolveMode string
}
type ResolveImageResponse struct {
Digest digest.Digest
Config []byte
}
type ResolveOCILayoutOpt struct {
Store ResolveImageConfigOptStore
}
type ResolveImageConfigOptStore struct {
SessionID string
StoreID string
}

View File

@ -1,49 +0,0 @@
package image
import (
"time"
"github.com/docker/docker/api/types/strslice"
ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
)
// HealthConfig holds configuration settings for the HEALTHCHECK feature.
type HealthConfig struct {
// Test is the test to perform to check that the container is healthy.
// An empty slice means to inherit the default.
// The options are:
// {} : inherit healthcheck
// {"NONE"} : disable healthcheck
// {"CMD", args...} : exec arguments directly
// {"CMD-SHELL", command} : run command with system's default shell
Test []string `json:",omitempty"`
// Zero means to inherit. Durations are expressed as integer nanoseconds.
Interval time.Duration `json:",omitempty"` // Interval is the time to wait between checks.
Timeout time.Duration `json:",omitempty"` // Timeout is the time to wait before considering the check to have hung.
StartPeriod time.Duration `json:",omitempty"` // The start period for the container to initialize before the retries starts to count down.
StartInterval time.Duration `json:",omitempty"` // StartInterval is the time to wait between checks during the start period.
// Retries is the number of consecutive failures needed to consider a container as unhealthy.
// Zero means inherit.
Retries int `json:",omitempty"`
}
// ImageConfig is a docker compatible config for an image
type ImageConfig struct {
ocispecs.ImageConfig
Healthcheck *HealthConfig `json:",omitempty"` // Healthcheck describes how to check the container is healthy
OnBuild []string // ONBUILD metadata that were defined on the image Dockerfile
Shell strslice.StrSlice `json:",omitempty"` // Shell for shell-form of RUN, CMD, ENTRYPOINT
}
// Image is the JSON structure which describes some basic information about the image.
// This provides the `application/vnd.oci.image.config.v1+json` mediatype when marshalled to JSON.
type Image struct {
ocispecs.Image
// Config defines the execution parameters which should be used as a base when running a container using the image.
Config ImageConfig `json:"config,omitempty"`
}

View File

@ -7,14 +7,14 @@ import (
"github.com/containerd/containerd/platforms"
"github.com/moby/buildkit/exporter/containerimage/exptypes"
"github.com/moby/buildkit/exporter/containerimage/image"
"github.com/moby/buildkit/frontend/gateway/client"
dockerspec "github.com/moby/docker-image-spec/specs-go/v1"
ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
"golang.org/x/sync/errgroup"
)
type BuildFunc func(ctx context.Context, platform *ocispecs.Platform, idx int) (client.Reference, *image.Image, error)
type BuildFunc func(ctx context.Context, platform *ocispecs.Platform, idx int) (client.Reference, *dockerspec.DockerOCIImage, error)
func (bc *Client) Build(ctx context.Context, fn BuildFunc) (*ResultBuilder, error) {
res := client.NewResult()

View File

@ -13,11 +13,11 @@ import (
"github.com/distribution/reference"
controlapi "github.com/moby/buildkit/api/services/control"
"github.com/moby/buildkit/client/llb"
"github.com/moby/buildkit/exporter/containerimage/image"
"github.com/moby/buildkit/frontend/attestations"
"github.com/moby/buildkit/frontend/gateway/client"
"github.com/moby/buildkit/solver/pb"
"github.com/moby/buildkit/util/flightcontrol"
dockerspec "github.com/moby/docker-image-spec/specs-go/v1"
"github.com/moby/patternmatcher/ignorefile"
digest "github.com/opencontainers/go-digest"
ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
@ -460,7 +460,7 @@ func (bc *Client) MainContext(ctx context.Context, opts ...llb.LocalOption) (*ll
return &st, nil
}
func (bc *Client) NamedContext(ctx context.Context, name string, opt ContextOpt) (*llb.State, *image.Image, error) {
func (bc *Client) NamedContext(ctx context.Context, name string, opt ContextOpt) (*llb.State, *dockerspec.DockerOCIImage, error) {
named, err := reference.ParseNormalizedNamed(name)
if err != nil {
return nil, nil, errors.Wrapf(err, "invalid context name %s", name)

View File

@ -10,11 +10,12 @@ import (
"github.com/distribution/reference"
"github.com/moby/buildkit/client/llb"
"github.com/moby/buildkit/client/llb/sourceresolver"
"github.com/moby/buildkit/exporter/containerimage/exptypes"
"github.com/moby/buildkit/exporter/containerimage/image"
"github.com/moby/buildkit/frontend/gateway/client"
"github.com/moby/buildkit/solver/pb"
"github.com/moby/buildkit/util/imageutil"
dockerspec "github.com/moby/docker-image-spec/specs-go/v1"
"github.com/moby/patternmatcher/ignorefile"
"github.com/pkg/errors"
)
@ -25,13 +26,14 @@ const (
maxContextRecursion = 10
)
func (bc *Client) namedContext(ctx context.Context, name string, nameWithPlatform string, opt ContextOpt) (*llb.State, *image.Image, error) {
func (bc *Client) namedContext(ctx context.Context, name string, nameWithPlatform string, opt ContextOpt) (*llb.State, *dockerspec.DockerOCIImage, error) {
return bc.namedContextRecursive(ctx, name, nameWithPlatform, opt, 0)
}
func (bc *Client) namedContextRecursive(ctx context.Context, name string, nameWithPlatform string, opt ContextOpt, count int) (*llb.State, *image.Image, error) {
func (bc *Client) namedContextRecursive(ctx context.Context, name string, nameWithPlatform string, opt ContextOpt, count int) (*llb.State, *dockerspec.DockerOCIImage, error) {
opts := bc.bopts.Opts
v, ok := opts[contextPrefix+nameWithPlatform]
contextKey := contextPrefix + nameWithPlatform
v, ok := opts[contextKey]
if !ok {
return nil, nil, nil
}
@ -71,21 +73,28 @@ func (bc *Client) namedContextRecursive(ctx context.Context, name string, nameWi
named = reference.TagNameOnly(named)
ref, dgst, data, err := bc.client.ResolveImageConfig(ctx, named.String(), llb.ResolveImageConfigOpt{
Platform: opt.Platform,
ResolveMode: opt.ResolveMode,
LogName: fmt.Sprintf("[context %s] load metadata for %s", nameWithPlatform, ref),
ResolverType: llb.ResolverTypeRegistry,
ref, dgst, data, err := bc.client.ResolveImageConfig(ctx, named.String(), sourceresolver.Opt{
LogName: fmt.Sprintf("[context %s] load metadata for %s", nameWithPlatform, ref),
Platform: opt.Platform,
ImageOpt: &sourceresolver.ResolveImageOpt{
ResolveMode: opt.ResolveMode,
},
})
if err != nil {
e := &imageutil.ResolveToNonImageError{}
if errors.As(err, &e) {
return bc.namedContextRecursive(ctx, e.Updated, name, opt, count+1)
before, after, ok := strings.Cut(e.Updated, "://")
if !ok {
return nil, nil, errors.Errorf("could not parse ref: %s", e.Updated)
}
bc.bopts.Opts[contextKey] = before + ":" + after
return bc.namedContextRecursive(ctx, name, nameWithPlatform, opt, count+1)
}
return nil, nil, err
}
var img image.Image
var img dockerspec.DockerOCIImage
if err := json.Unmarshal(data, &img); err != nil {
return nil, nil, err
}
@ -139,22 +148,21 @@ func (bc *Client) namedContextRecursive(ctx context.Context, name string, nameWi
return nil, nil, errors.Wrapf(err, "could not wrap %q with digest", name)
}
// TODO: How should source policy be handled here with a dummy ref?
_, dgst, data, err := bc.client.ResolveImageConfig(ctx, dummyRef.String(), llb.ResolveImageConfigOpt{
Platform: opt.Platform,
ResolveMode: opt.ResolveMode,
LogName: fmt.Sprintf("[context %s] load metadata for %s", nameWithPlatform, dummyRef.String()),
ResolverType: llb.ResolverTypeOCILayout,
Store: llb.ResolveImageConfigOptStore{
SessionID: bc.bopts.SessionID,
StoreID: named.Name(),
_, dgst, data, err := bc.client.ResolveImageConfig(ctx, dummyRef.String(), sourceresolver.Opt{
LogName: fmt.Sprintf("[context %s] load metadata for %s", nameWithPlatform, dummyRef.String()),
Platform: opt.Platform,
OCILayoutOpt: &sourceresolver.ResolveOCILayoutOpt{
Store: sourceresolver.ResolveImageConfigOptStore{
SessionID: bc.bopts.SessionID,
StoreID: named.Name(),
},
},
})
if err != nil {
return nil, nil, err
}
var img image.Image
var img dockerspec.DockerOCIImage
if err := json.Unmarshal(data, &img); err != nil {
return nil, nil, errors.Wrap(err, "could not parse oci-layout image config")
}
@ -239,7 +247,7 @@ func (bc *Client) namedContextRecursive(ctx context.Context, name string, nameWi
if err := json.Unmarshal([]byte(md), &m); err != nil {
return nil, nil, errors.Wrapf(err, "failed to parse input metadata %s", md)
}
var img *image.Image
var img *dockerspec.DockerOCIImage
if dtic, ok := m[exptypes.ExporterImageConfigKey]; ok {
st, err = st.WithImageConfig(dtic)
if err != nil {

View File

@ -6,6 +6,7 @@ import (
"syscall"
"github.com/moby/buildkit/client/llb"
"github.com/moby/buildkit/client/llb/sourceresolver"
"github.com/moby/buildkit/solver/pb"
"github.com/moby/buildkit/solver/result"
spb "github.com/moby/buildkit/sourcepolicy/pb"
@ -26,8 +27,9 @@ func NewResult() *Result {
}
type Client interface {
sourceresolver.MetaResolver
Solve(ctx context.Context, req SolveRequest) (*Result, error)
ResolveImageConfig(ctx context.Context, ref string, opt llb.ResolveImageConfigOpt) (string, digest.Digest, []byte, error)
ResolveImageConfig(ctx context.Context, ref string, opt sourceresolver.Opt) (string, digest.Digest, []byte, error)
BuildOpts() BuildOpts
Inputs(ctx context.Context) (map[string]llb.State, error)
NewContainer(ctx context.Context, req NewContainerRequest) (Container, error)

View File

@ -12,10 +12,12 @@ import (
"syscall"
"time"
distreference "github.com/distribution/reference"
"github.com/gogo/googleapis/google/rpc"
gogotypes "github.com/gogo/protobuf/types"
"github.com/golang/protobuf/ptypes/any"
"github.com/moby/buildkit/client/llb"
"github.com/moby/buildkit/client/llb/sourceresolver"
"github.com/moby/buildkit/frontend/gateway/client"
pb "github.com/moby/buildkit/frontend/gateway/pb"
"github.com/moby/buildkit/identity"
@ -23,6 +25,7 @@ import (
"github.com/moby/buildkit/util/apicaps"
"github.com/moby/buildkit/util/bklog"
"github.com/moby/buildkit/util/grpcerrors"
"github.com/moby/buildkit/util/imageutil"
"github.com/moby/sys/signal"
digest "github.com/opencontainers/go-digest"
"github.com/pkg/errors"
@ -479,7 +482,35 @@ func (c *grpcClient) Solve(ctx context.Context, creq client.SolveRequest) (res *
return res, nil
}
func (c *grpcClient) ResolveImageConfig(ctx context.Context, ref string, opt llb.ResolveImageConfigOpt) (string, digest.Digest, []byte, error) {
func (c *grpcClient) ResolveSourceMetadata(ctx context.Context, op *opspb.SourceOp, opt sourceresolver.Opt) (*sourceresolver.MetaResponse, error) {
if c.caps.Supports(pb.CapSourceMetaResolver) != nil {
var ref string
if v, ok := strings.CutPrefix(op.Identifier, "docker-image://"); ok {
ref = v
} else if v, ok := strings.CutPrefix(op.Identifier, "oci-layout://"); ok {
ref = v
} else {
return &sourceresolver.MetaResponse{Op: op}, nil
}
retRef, dgst, config, err := c.ResolveImageConfig(ctx, ref, opt)
if err != nil {
return nil, err
}
if strings.HasPrefix(op.Identifier, "docker-image://") {
op.Identifier = "docker-image://" + retRef
} else if strings.HasPrefix(op.Identifier, "oci-layout://") {
op.Identifier = "oci-layout://" + retRef
}
return &sourceresolver.MetaResponse{
Op: op,
Image: &sourceresolver.ResolveImageResponse{
Digest: dgst,
Config: config,
},
}, nil
}
var p *opspb.Platform
if platform := opt.Platform; platform != nil {
p = &opspb.Platform{
@ -491,16 +522,97 @@ func (c *grpcClient) ResolveImageConfig(ctx context.Context, ref string, opt llb
}
}
resp, err := c.client.ResolveImageConfig(ctx, &pb.ResolveImageConfigRequest{
ResolverType: int32(opt.ResolverType),
Ref: ref,
req := &pb.ResolveSourceMetaRequest{
Source: op,
Platform: p,
ResolveMode: opt.ResolveMode,
LogName: opt.LogName,
SessionID: opt.Store.SessionID,
StoreID: opt.Store.StoreID,
SourcePolicies: opt.SourcePolicies,
})
}
resp, err := c.client.ResolveSourceMeta(ctx, req)
if err != nil {
return nil, err
}
r := &sourceresolver.MetaResponse{
Op: resp.Source,
}
if resp.Image != nil {
r.Image = &sourceresolver.ResolveImageResponse{
Digest: resp.Image.Digest,
Config: resp.Image.Config,
}
}
return r, nil
}
func (c *grpcClient) resolveImageConfigViaSourceMetadata(ctx context.Context, ref string, opt sourceresolver.Opt, p *opspb.Platform) (string, digest.Digest, []byte, error) {
op := &opspb.SourceOp{
Identifier: "docker-image://" + ref,
}
if opt.OCILayoutOpt != nil {
named, err := distreference.ParseNormalizedNamed(ref)
if err != nil {
return "", "", nil, err
}
op.Identifier = "oci-layout://" + named.String()
op.Attrs = map[string]string{
opspb.AttrOCILayoutSessionID: opt.OCILayoutOpt.Store.SessionID,
opspb.AttrOCILayoutStoreID: opt.OCILayoutOpt.Store.StoreID,
}
}
req := &pb.ResolveSourceMetaRequest{
Source: op,
Platform: p,
LogName: opt.LogName,
SourcePolicies: opt.SourcePolicies,
}
resp, err := c.client.ResolveSourceMeta(ctx, req)
if err != nil {
return "", "", nil, err
}
if resp.Image == nil {
return "", "", nil, &imageutil.ResolveToNonImageError{Ref: ref, Updated: resp.Source.Identifier}
}
ref = strings.TrimPrefix(resp.Source.Identifier, "docker-image://")
ref = strings.TrimPrefix(ref, "oci-layout://")
return ref, resp.Image.Digest, resp.Image.Config, nil
}
func (c *grpcClient) ResolveImageConfig(ctx context.Context, ref string, opt sourceresolver.Opt) (string, digest.Digest, []byte, error) {
var p *opspb.Platform
if platform := opt.Platform; platform != nil {
p = &opspb.Platform{
OS: platform.OS,
Architecture: platform.Architecture,
Variant: platform.Variant,
OSVersion: platform.OSVersion,
OSFeatures: platform.OSFeatures,
}
}
if c.caps.Supports(pb.CapSourceMetaResolver) == nil {
return c.resolveImageConfigViaSourceMetadata(ctx, ref, opt, p)
}
req := &pb.ResolveImageConfigRequest{
Ref: ref,
LogName: opt.LogName,
SourcePolicies: opt.SourcePolicies,
Platform: p,
}
if iopt := opt.ImageOpt; iopt != nil {
req.ResolveMode = iopt.ResolveMode
req.ResolverType = int32(sourceresolver.ResolverTypeRegistry)
}
if iopt := opt.OCILayoutOpt; iopt != nil {
req.ResolverType = int32(sourceresolver.ResolverTypeOCILayout)
req.StoreID = iopt.Store.StoreID
req.SessionID = iopt.Store.SessionID
}
resp, err := c.client.ResolveImageConfig(ctx, req)
if err != nil {
return "", "", nil, err
}

View File

@ -68,6 +68,10 @@ const (
// CapAttestations is the capability to indicate that attestation
// references will be attached to results
CapAttestations apicaps.CapID = "reference.attestations"
// CapSourceMetaResolver is the capability to indicates support for ResolveSourceMetadata
// function in gateway API
CapSourceMetaResolver apicaps.CapID = "source.metaresolver"
)
func init() {
@ -231,4 +235,11 @@ func init() {
Enabled: true,
Status: apicaps.CapStatusExperimental,
})
Caps.Init(apicaps.Cap{
ID: CapSourceMetaResolver,
Name: "source meta resolver",
Enabled: true,
Status: apicaps.CapStatusExperimental,
})
}

File diff suppressed because it is too large Load Diff

View File

@ -17,6 +17,8 @@ option (gogoproto.unmarshaler_all) = true;
service LLBBridge {
// apicaps:CapResolveImage
rpc ResolveImageConfig(ResolveImageConfigRequest) returns (ResolveImageConfigResponse);
// apicaps:CapSourceMetaResolver
rpc ResolveSourceMeta(ResolveSourceMetaRequest) returns (ResolveSourceMetaResponse);
// apicaps:CapSolveBase
rpc Solve(SolveRequest) returns (SolveResponse);
// apicaps:CapReadFile
@ -132,6 +134,24 @@ message ResolveImageConfigResponse {
string Ref = 3;
}
message ResolveSourceMetaRequest {
pb.SourceOp Source = 1;
pb.Platform Platform = 2;
string LogName = 3;
string ResolveMode = 4;
repeated moby.buildkit.v1.sourcepolicy.Policy SourcePolicies = 8;
}
message ResolveSourceMetaResponse {
pb.SourceOp Source = 1;
ResolveSourceImageResponse Image = 2;
}
message ResolveSourceImageResponse {
string Digest = 1 [(gogoproto.customtype) = "github.com/opencontainers/go-digest.Digest", (gogoproto.nullable) = false];
bytes Config = 2;
}
message SolveRequest {
pb.Definition Definition = 1;
string Frontend = 2;

View File

@ -57,6 +57,7 @@ const (
CapExecMountTmpfsSize apicaps.CapID = "exec.mount.tmpfs.size"
CapExecMountSecret apicaps.CapID = "exec.mount.secret"
CapExecMountSSH apicaps.CapID = "exec.mount.ssh"
CapExecMountContentCache apicaps.CapID = "exec.mount.cache.content"
CapExecCgroupsMounted apicaps.CapID = "exec.cgroup"
CapExecSecretEnv apicaps.CapID = "exec.secretenv"
@ -337,6 +338,12 @@ func init() {
Status: apicaps.CapStatusExperimental,
})
Caps.Init(apicaps.Cap{
ID: CapExecMountContentCache,
Enabled: true,
Status: apicaps.CapStatusExperimental,
})
Caps.Init(apicaps.Cap{
ID: CapExecCgroupsMounted,
Enabled: true,

View File

@ -117,6 +117,35 @@ func (MountType) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_8de16154b2733812, []int{2}
}
// MountContentCache ...
type MountContentCache int32
const (
MountContentCache_DEFAULT MountContentCache = 0
MountContentCache_ON MountContentCache = 1
MountContentCache_OFF MountContentCache = 2
)
var MountContentCache_name = map[int32]string{
0: "DEFAULT",
1: "ON",
2: "OFF",
}
var MountContentCache_value = map[string]int32{
"DEFAULT": 0,
"ON": 1,
"OFF": 2,
}
func (x MountContentCache) String() string {
return proto.EnumName(MountContentCache_name, int32(x))
}
func (MountContentCache) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_8de16154b2733812, []int{3}
}
// CacheSharingOpt defines different sharing modes for cache mount
type CacheSharingOpt int32
@ -146,7 +175,7 @@ func (x CacheSharingOpt) String() string {
}
func (CacheSharingOpt) EnumDescriptor() ([]byte, []int) {
return fileDescriptor_8de16154b2733812, []int{3}
return fileDescriptor_8de16154b2733812, []int{4}
}
// Op represents a vertex of the LLB DAG.
@ -771,17 +800,18 @@ func (m *SecretEnv) GetOptional() bool {
// Mount specifies how to mount an input Op as a filesystem.
type Mount struct {
Input InputIndex `protobuf:"varint,1,opt,name=input,proto3,customtype=InputIndex" json:"input"`
Selector string `protobuf:"bytes,2,opt,name=selector,proto3" json:"selector,omitempty"`
Dest string `protobuf:"bytes,3,opt,name=dest,proto3" json:"dest,omitempty"`
Output OutputIndex `protobuf:"varint,4,opt,name=output,proto3,customtype=OutputIndex" json:"output"`
Readonly bool `protobuf:"varint,5,opt,name=readonly,proto3" json:"readonly,omitempty"`
MountType MountType `protobuf:"varint,6,opt,name=mountType,proto3,enum=pb.MountType" json:"mountType,omitempty"`
TmpfsOpt *TmpfsOpt `protobuf:"bytes,19,opt,name=TmpfsOpt,proto3" json:"TmpfsOpt,omitempty"`
CacheOpt *CacheOpt `protobuf:"bytes,20,opt,name=cacheOpt,proto3" json:"cacheOpt,omitempty"`
SecretOpt *SecretOpt `protobuf:"bytes,21,opt,name=secretOpt,proto3" json:"secretOpt,omitempty"`
SSHOpt *SSHOpt `protobuf:"bytes,22,opt,name=SSHOpt,proto3" json:"SSHOpt,omitempty"`
ResultID string `protobuf:"bytes,23,opt,name=resultID,proto3" json:"resultID,omitempty"`
Input InputIndex `protobuf:"varint,1,opt,name=input,proto3,customtype=InputIndex" json:"input"`
Selector string `protobuf:"bytes,2,opt,name=selector,proto3" json:"selector,omitempty"`
Dest string `protobuf:"bytes,3,opt,name=dest,proto3" json:"dest,omitempty"`
Output OutputIndex `protobuf:"varint,4,opt,name=output,proto3,customtype=OutputIndex" json:"output"`
Readonly bool `protobuf:"varint,5,opt,name=readonly,proto3" json:"readonly,omitempty"`
MountType MountType `protobuf:"varint,6,opt,name=mountType,proto3,enum=pb.MountType" json:"mountType,omitempty"`
TmpfsOpt *TmpfsOpt `protobuf:"bytes,19,opt,name=TmpfsOpt,proto3" json:"TmpfsOpt,omitempty"`
CacheOpt *CacheOpt `protobuf:"bytes,20,opt,name=cacheOpt,proto3" json:"cacheOpt,omitempty"`
SecretOpt *SecretOpt `protobuf:"bytes,21,opt,name=secretOpt,proto3" json:"secretOpt,omitempty"`
SSHOpt *SSHOpt `protobuf:"bytes,22,opt,name=SSHOpt,proto3" json:"SSHOpt,omitempty"`
ResultID string `protobuf:"bytes,23,opt,name=resultID,proto3" json:"resultID,omitempty"`
ContentCache MountContentCache `protobuf:"varint,24,opt,name=contentCache,proto3,enum=pb.MountContentCache" json:"contentCache,omitempty"`
}
func (m *Mount) Reset() { *m = Mount{} }
@ -876,6 +906,13 @@ func (m *Mount) GetResultID() string {
return ""
}
func (m *Mount) GetContentCache() MountContentCache {
if m != nil {
return m.ContentCache
}
return MountContentCache_DEFAULT
}
// TmpfsOpt defines options describing tpmfs mounts
type TmpfsOpt struct {
// Specify an upper limit on the size of the filesystem.
@ -2799,6 +2836,7 @@ func init() {
proto.RegisterEnum("pb.NetMode", NetMode_name, NetMode_value)
proto.RegisterEnum("pb.SecurityMode", SecurityMode_name, SecurityMode_value)
proto.RegisterEnum("pb.MountType", MountType_name, MountType_value)
proto.RegisterEnum("pb.MountContentCache", MountContentCache_name, MountContentCache_value)
proto.RegisterEnum("pb.CacheSharingOpt", CacheSharingOpt_name, CacheSharingOpt_value)
proto.RegisterType((*Op)(nil), "pb.Op")
proto.RegisterType((*Platform)(nil), "pb.Platform")
@ -2854,169 +2892,172 @@ func init() {
func init() { proto.RegisterFile("ops.proto", fileDescriptor_8de16154b2733812) }
var fileDescriptor_8de16154b2733812 = []byte{
// 2577 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x59, 0x4f, 0x6f, 0x5b, 0xc7,
0x11, 0x17, 0xff, 0x93, 0x43, 0x89, 0x66, 0xd6, 0x4e, 0xc2, 0xa8, 0xae, 0xac, 0xbc, 0xa4, 0x81,
0x2c, 0xdb, 0x12, 0xaa, 0x00, 0x71, 0x60, 0x04, 0x45, 0x25, 0x91, 0x8e, 0x18, 0xc7, 0xa2, 0xb0,
0xb4, 0x9d, 0x1e, 0x0a, 0x18, 0x4f, 0x8f, 0x4b, 0xea, 0x41, 0xef, 0xbd, 0x7d, 0x78, 0x6f, 0x69,
0x89, 0x3d, 0xf4, 0xd0, 0x53, 0x8f, 0x01, 0x0a, 0x14, 0xbd, 0x14, 0xfd, 0x12, 0x3d, 0xb6, 0xf7,
0x00, 0xb9, 0xe4, 0xd0, 0x43, 0xd0, 0x43, 0x5a, 0x38, 0x97, 0x7e, 0x88, 0x16, 0x28, 0x66, 0x76,
0xdf, 0x1f, 0x52, 0x32, 0x6c, 0xb7, 0x45, 0x4f, 0x9c, 0x37, 0xf3, 0xdb, 0xd9, 0xd9, 0xd9, 0x99,
0x9d, 0xd9, 0x25, 0x34, 0x64, 0x18, 0x6f, 0x85, 0x91, 0x54, 0x92, 0x15, 0xc3, 0xe3, 0xd5, 0x3b,
0x13, 0x57, 0x9d, 0x4c, 0x8f, 0xb7, 0x1c, 0xe9, 0x6f, 0x4f, 0xe4, 0x44, 0x6e, 0x93, 0xe8, 0x78,
0x3a, 0xa6, 0x2f, 0xfa, 0x20, 0x4a, 0x0f, 0xb1, 0xfe, 0x51, 0x84, 0xe2, 0x20, 0x64, 0xef, 0x42,
0xd5, 0x0d, 0xc2, 0xa9, 0x8a, 0x3b, 0x85, 0xf5, 0xd2, 0x46, 0x73, 0xa7, 0xb1, 0x15, 0x1e, 0x6f,
0xf5, 0x91, 0xc3, 0x8d, 0x80, 0xad, 0x43, 0x59, 0x9c, 0x0b, 0xa7, 0x53, 0x5c, 0x2f, 0x6c, 0x34,
0x77, 0x00, 0x01, 0xbd, 0x73, 0xe1, 0x0c, 0xc2, 0x83, 0x25, 0x4e, 0x12, 0xf6, 0x01, 0x54, 0x63,
0x39, 0x8d, 0x1c, 0xd1, 0x29, 0x11, 0x66, 0x19, 0x31, 0x43, 0xe2, 0x10, 0xca, 0x48, 0x51, 0xd3,
0xd8, 0xf5, 0x44, 0xa7, 0x9c, 0x69, 0xba, 0xef, 0x7a, 0x1a, 0x43, 0x12, 0xf6, 0x1e, 0x54, 0x8e,
0xa7, 0xae, 0x37, 0xea, 0x54, 0x08, 0xd2, 0x44, 0xc8, 0x1e, 0x32, 0x08, 0xa3, 0x65, 0x08, 0xf2,
0x45, 0x34, 0x11, 0x9d, 0x6a, 0x06, 0x7a, 0x88, 0x0c, 0x0d, 0x22, 0x19, 0xce, 0x35, 0x72, 0xc7,
0xe3, 0x4e, 0x2d, 0x9b, 0xab, 0xeb, 0x8e, 0xc7, 0x7a, 0x2e, 0x94, 0xb0, 0x0d, 0xa8, 0x87, 0x9e,
0xad, 0xc6, 0x32, 0xf2, 0x3b, 0x90, 0xd9, 0x7d, 0x64, 0x78, 0x3c, 0x95, 0xb2, 0xbb, 0xd0, 0x74,
0x64, 0x10, 0xab, 0xc8, 0x76, 0x03, 0x15, 0x77, 0x9a, 0x04, 0x7e, 0x13, 0xc1, 0x5f, 0xc8, 0xe8,
0x54, 0x44, 0xfb, 0x99, 0x90, 0xe7, 0x91, 0x7b, 0x65, 0x28, 0xca, 0xd0, 0xfa, 0x6d, 0x01, 0xea,
0x89, 0x56, 0x66, 0xc1, 0xf2, 0x6e, 0xe4, 0x9c, 0xb8, 0x4a, 0x38, 0x6a, 0x1a, 0x89, 0x4e, 0x61,
0xbd, 0xb0, 0xd1, 0xe0, 0x73, 0x3c, 0xd6, 0x82, 0xe2, 0x60, 0x48, 0xfe, 0x6e, 0xf0, 0xe2, 0x60,
0xc8, 0x3a, 0x50, 0x7b, 0x62, 0x47, 0xae, 0x1d, 0x28, 0x72, 0x70, 0x83, 0x27, 0x9f, 0xec, 0x3a,
0x34, 0x06, 0xc3, 0x27, 0x22, 0x8a, 0x5d, 0x19, 0x90, 0x5b, 0x1b, 0x3c, 0x63, 0xb0, 0x35, 0x80,
0xc1, 0xf0, 0xbe, 0xb0, 0x51, 0x69, 0xdc, 0xa9, 0xac, 0x97, 0x36, 0x1a, 0x3c, 0xc7, 0xb1, 0x7e,
0x09, 0x15, 0xda, 0x6a, 0xf6, 0x19, 0x54, 0x47, 0xee, 0x44, 0xc4, 0x4a, 0x9b, 0xb3, 0xb7, 0xf3,
0xd5, 0x77, 0x37, 0x96, 0xfe, 0xfa, 0xdd, 0x8d, 0xcd, 0x5c, 0x4c, 0xc9, 0x50, 0x04, 0x8e, 0x0c,
0x94, 0xed, 0x06, 0x22, 0x8a, 0xb7, 0x27, 0xf2, 0x8e, 0x1e, 0xb2, 0xd5, 0xa5, 0x1f, 0x6e, 0x34,
0xb0, 0x9b, 0x50, 0x71, 0x83, 0x91, 0x38, 0x27, 0xfb, 0x4b, 0x7b, 0x57, 0x8d, 0xaa, 0xe6, 0x60,
0xaa, 0xc2, 0xa9, 0xea, 0xa3, 0x88, 0x6b, 0x84, 0xf5, 0x75, 0x01, 0xaa, 0x3a, 0x94, 0xd8, 0x75,
0x28, 0xfb, 0x42, 0xd9, 0x34, 0x7f, 0x73, 0xa7, 0xae, 0xb7, 0x54, 0xd9, 0x9c, 0xb8, 0x18, 0xa5,
0xbe, 0x9c, 0xa2, 0xef, 0x8b, 0x59, 0x94, 0x3e, 0x44, 0x0e, 0x37, 0x02, 0xf6, 0x23, 0xa8, 0x05,
0x42, 0x9d, 0xc9, 0xe8, 0x94, 0x7c, 0xd4, 0xd2, 0x61, 0x71, 0x28, 0xd4, 0x43, 0x39, 0x12, 0x3c,
0x91, 0xb1, 0xdb, 0x50, 0x8f, 0x85, 0x33, 0x8d, 0x5c, 0x35, 0x23, 0x7f, 0xb5, 0x76, 0xda, 0x14,
0xac, 0x86, 0x47, 0xe0, 0x14, 0xc1, 0x6e, 0x41, 0x23, 0x16, 0x4e, 0x24, 0x94, 0x08, 0x9e, 0x91,
0xff, 0x9a, 0x3b, 0x2b, 0x06, 0x1e, 0x09, 0xd5, 0x0b, 0x9e, 0xf1, 0x4c, 0x6e, 0x7d, 0x5d, 0x84,
0x32, 0xda, 0xcc, 0x18, 0x94, 0xed, 0x68, 0xa2, 0x33, 0xaa, 0xc1, 0x89, 0x66, 0x6d, 0x28, 0xa1,
0x8e, 0x22, 0xb1, 0x90, 0x44, 0x8e, 0x73, 0x36, 0x32, 0x1b, 0x8a, 0x24, 0x8e, 0x9b, 0xc6, 0x22,
0x32, 0xfb, 0x48, 0x34, 0xbb, 0x09, 0x8d, 0x30, 0x92, 0xe7, 0xb3, 0xa7, 0xda, 0x82, 0x2c, 0x4a,
0x91, 0x89, 0x06, 0xd4, 0x43, 0x43, 0xb1, 0x4d, 0x00, 0x71, 0xae, 0x22, 0xfb, 0x40, 0xc6, 0x2a,
0xee, 0x54, 0xc9, 0x5a, 0x8a, 0x7b, 0x64, 0xf4, 0x8f, 0x78, 0x4e, 0xca, 0x56, 0xa1, 0x7e, 0x22,
0x63, 0x15, 0xd8, 0xbe, 0xa0, 0x0c, 0x69, 0xf0, 0xf4, 0x9b, 0x59, 0x50, 0x9d, 0x7a, 0xae, 0xef,
0xaa, 0x4e, 0x23, 0xd3, 0xf1, 0x98, 0x38, 0xdc, 0x48, 0x30, 0x8a, 0x9d, 0x49, 0x24, 0xa7, 0xe1,
0x91, 0x1d, 0x89, 0x40, 0x51, 0xfe, 0x34, 0xf8, 0x1c, 0x8f, 0x7d, 0x02, 0xef, 0x44, 0xc2, 0x97,
0xcf, 0x04, 0x6d, 0xd4, 0x50, 0x4d, 0x8f, 0x63, 0x8e, 0x8e, 0x8d, 0xdd, 0x67, 0x82, 0x72, 0xa8,
0xce, 0x5f, 0x0c, 0xb0, 0x6e, 0x43, 0x55, 0xdb, 0x8d, 0x6e, 0x41, 0xca, 0x64, 0x0a, 0xd1, 0x98,
0x21, 0xfd, 0xa3, 0x24, 0x43, 0xfa, 0x47, 0x56, 0x17, 0xaa, 0xda, 0x42, 0x44, 0x1f, 0xe2, 0xaa,
0x0c, 0x1a, 0x69, 0xe4, 0x0d, 0xe5, 0x58, 0xe9, 0x88, 0xe4, 0x44, 0x93, 0x56, 0x3b, 0xd2, 0xfe,
0x2f, 0x71, 0xa2, 0xad, 0x07, 0xd0, 0x48, 0x77, 0x96, 0xa6, 0xe8, 0x1a, 0x35, 0xc5, 0x7e, 0x17,
0x07, 0x90, 0xbb, 0xf4, 0xa4, 0x44, 0xa3, 0x1b, 0x65, 0xa8, 0x5c, 0x19, 0xd8, 0x1e, 0x29, 0xaa,
0xf3, 0xf4, 0xdb, 0xfa, 0x5d, 0x09, 0x2a, 0xb4, 0x30, 0xb6, 0x81, 0x19, 0x11, 0x4e, 0xf5, 0x0a,
0x4a, 0x7b, 0xcc, 0x64, 0x04, 0x50, 0xee, 0xa5, 0x09, 0x81, 0x79, 0xb8, 0x8a, 0xd1, 0xe9, 0x09,
0x47, 0xc9, 0xc8, 0xcc, 0x93, 0x7e, 0xe3, 0xfc, 0x23, 0xcc, 0x50, 0x1d, 0x30, 0x44, 0xb3, 0x5b,
0x50, 0x95, 0x94, 0x56, 0x14, 0x33, 0x2f, 0x48, 0x36, 0x03, 0x41, 0xe5, 0x91, 0xb0, 0x47, 0x32,
0xf0, 0x66, 0x14, 0x49, 0x75, 0x9e, 0x7e, 0x63, 0xa0, 0x53, 0x1e, 0x3d, 0x9a, 0x85, 0xfa, 0x58,
0x6d, 0xe9, 0x40, 0x7f, 0x98, 0x30, 0x79, 0x26, 0xc7, 0x83, 0xf3, 0x91, 0x1f, 0x8e, 0xe3, 0x41,
0xa8, 0x3a, 0x57, 0xb3, 0x90, 0x4c, 0x78, 0x3c, 0x95, 0x22, 0xd2, 0xb1, 0x9d, 0x13, 0x81, 0xc8,
0x6b, 0x19, 0x72, 0xdf, 0xf0, 0x78, 0x2a, 0xcd, 0x32, 0x0d, 0xa1, 0x6f, 0x12, 0x34, 0x97, 0x69,
0x88, 0xcd, 0xe4, 0x18, 0xa1, 0xc3, 0xe1, 0x01, 0x22, 0xdf, 0xca, 0x4e, 0x77, 0xcd, 0xe1, 0x46,
0xa2, 0x57, 0x1b, 0x4f, 0x3d, 0xd5, 0xef, 0x76, 0xde, 0xd6, 0xae, 0x4c, 0xbe, 0xad, 0xb5, 0x6c,
0x01, 0xe8, 0xd6, 0xd8, 0xfd, 0x85, 0x8e, 0x97, 0x12, 0x27, 0xda, 0xea, 0x43, 0x3d, 0x31, 0xf1,
0x42, 0x18, 0xdc, 0x81, 0x5a, 0x7c, 0x62, 0x47, 0x6e, 0x30, 0xa1, 0x1d, 0x6a, 0xed, 0x5c, 0x4d,
0x57, 0x34, 0xd4, 0x7c, 0xb4, 0x22, 0xc1, 0x58, 0x32, 0x09, 0xa9, 0xcb, 0x74, 0xb5, 0xa1, 0x34,
0x75, 0x47, 0xa4, 0x67, 0x85, 0x23, 0x89, 0x9c, 0x89, 0xab, 0x83, 0x72, 0x85, 0x23, 0x89, 0xf6,
0xf9, 0x72, 0xa4, 0x6b, 0xe6, 0x0a, 0x27, 0x7a, 0x2e, 0xec, 0x2a, 0x0b, 0x61, 0xe7, 0x25, 0xbe,
0xf9, 0xbf, 0xcc, 0xf6, 0x9b, 0x02, 0xd4, 0x93, 0x42, 0x8f, 0xe5, 0xc6, 0x1d, 0x89, 0x40, 0xb9,
0x63, 0x57, 0x44, 0x66, 0xe2, 0x1c, 0x87, 0xdd, 0x81, 0x8a, 0xad, 0x54, 0x94, 0x1c, 0xe2, 0x6f,
0xe7, 0xbb, 0x84, 0xad, 0x5d, 0x94, 0xf4, 0x02, 0x15, 0xcd, 0xb8, 0x46, 0xad, 0x7e, 0x0c, 0x90,
0x31, 0xd1, 0xd6, 0x53, 0x31, 0x33, 0x5a, 0x91, 0x64, 0xd7, 0xa0, 0xf2, 0xcc, 0xf6, 0xa6, 0x49,
0x46, 0xea, 0x8f, 0x7b, 0xc5, 0x8f, 0x0b, 0xd6, 0x9f, 0x8b, 0x50, 0x33, 0x5d, 0x03, 0xbb, 0x0d,
0x35, 0xea, 0x1a, 0x8c, 0x45, 0x97, 0xa7, 0x5f, 0x02, 0x61, 0xdb, 0x69, 0x3b, 0x94, 0xb3, 0xd1,
0xa8, 0xd2, 0x6d, 0x91, 0xb1, 0x31, 0x6b, 0x8e, 0x4a, 0x23, 0x31, 0x36, 0x7d, 0x4f, 0x8b, 0xba,
0x0c, 0x31, 0x76, 0x03, 0x17, 0xfd, 0xc3, 0x51, 0xc4, 0x6e, 0x27, 0xab, 0x2e, 0x93, 0xc6, 0xb7,
0xf2, 0x1a, 0x2f, 0x2e, 0xba, 0x0f, 0xcd, 0xdc, 0x34, 0x97, 0xac, 0xfa, 0xfd, 0xfc, 0xaa, 0xcd,
0x94, 0xa4, 0x4e, 0x37, 0x6d, 0x99, 0x17, 0xfe, 0x0b, 0xff, 0x7d, 0x04, 0x90, 0xa9, 0x7c, 0xf5,
0xe3, 0xcb, 0xfa, 0x53, 0x09, 0x60, 0x10, 0x62, 0x0d, 0x1c, 0xd9, 0x54, 0xb5, 0x97, 0xdd, 0x49,
0x20, 0x23, 0xf1, 0x94, 0xd2, 0x9c, 0xc6, 0xd7, 0x79, 0x53, 0xf3, 0x28, 0x63, 0xd8, 0x2e, 0x34,
0x47, 0x22, 0x76, 0x22, 0x97, 0x02, 0xca, 0x38, 0xfd, 0x06, 0xae, 0x29, 0xd3, 0xb3, 0xd5, 0xcd,
0x10, 0xda, 0x57, 0xf9, 0x31, 0x6c, 0x07, 0x96, 0xc5, 0x79, 0x28, 0x23, 0x65, 0x66, 0xd1, 0xcd,
0xe5, 0x15, 0xdd, 0xa6, 0x22, 0x9f, 0x66, 0xe2, 0x4d, 0x91, 0x7d, 0x30, 0x1b, 0xca, 0x8e, 0x1d,
0xc6, 0xa6, 0xa4, 0x77, 0x16, 0xe6, 0xdb, 0xb7, 0x43, 0xed, 0xb4, 0xbd, 0x0f, 0x71, 0xad, 0xbf,
0xfa, 0xdb, 0x8d, 0x5b, 0xb9, 0x3e, 0xc8, 0x97, 0xc7, 0xb3, 0x6d, 0x8a, 0x97, 0x53, 0x57, 0x6d,
0x4f, 0x95, 0xeb, 0x6d, 0xdb, 0xa1, 0x8b, 0xea, 0x70, 0x60, 0xbf, 0xcb, 0x49, 0x35, 0xfb, 0x18,
0x5a, 0x61, 0x24, 0x27, 0x91, 0x88, 0xe3, 0xa7, 0x54, 0x15, 0x4d, 0xb7, 0xfa, 0x86, 0xa9, 0xde,
0x24, 0xf9, 0x14, 0x05, 0x7c, 0x25, 0xcc, 0x7f, 0xae, 0xfe, 0x04, 0xda, 0x8b, 0x2b, 0x7e, 0x9d,
0xdd, 0x5b, 0xbd, 0x0b, 0x8d, 0x74, 0x05, 0x2f, 0x1b, 0x58, 0xcf, 0x6f, 0xfb, 0x1f, 0x0b, 0x50,
0xd5, 0xf9, 0xc8, 0xee, 0x42, 0xc3, 0x93, 0x8e, 0x8d, 0x06, 0x24, 0x37, 0x83, 0x77, 0xb2, 0x74,
0xdd, 0xfa, 0x3c, 0x91, 0xe9, 0xfd, 0xc8, 0xb0, 0x18, 0x9e, 0x6e, 0x30, 0x96, 0x49, 0xfe, 0xb4,
0xb2, 0x41, 0xfd, 0x60, 0x2c, 0xb9, 0x16, 0xae, 0x3e, 0x80, 0xd6, 0xbc, 0x8a, 0x4b, 0xec, 0x7c,
0x6f, 0x3e, 0xd0, 0xa9, 0x1a, 0xa4, 0x83, 0xf2, 0x66, 0xdf, 0x85, 0x46, 0xca, 0x67, 0x9b, 0x17,
0x0d, 0x5f, 0xce, 0x8f, 0xcc, 0xd9, 0x6a, 0xfd, 0xba, 0x00, 0x90, 0xd9, 0x86, 0xe7, 0x1c, 0xde,
0x41, 0x82, 0xac, 0x7b, 0x48, 0xbf, 0xa9, 0xf8, 0xda, 0xca, 0x26, 0x5b, 0x96, 0x39, 0xd1, 0x6c,
0x0b, 0x60, 0x94, 0xe6, 0xfa, 0x0b, 0x4e, 0x80, 0x1c, 0x02, 0xf5, 0x7b, 0x76, 0x30, 0x99, 0xda,
0x13, 0x61, 0x5a, 0xbc, 0xf4, 0xdb, 0x1a, 0x40, 0x3d, 0xb1, 0x90, 0xad, 0x43, 0x33, 0x36, 0x56,
0x61, 0x1b, 0x8d, 0xa6, 0x54, 0x78, 0x9e, 0x85, 0xed, 0x70, 0x64, 0x07, 0x13, 0x31, 0xd7, 0x0e,
0x73, 0xe4, 0x70, 0x23, 0xb0, 0xbe, 0x80, 0x0a, 0x31, 0x30, 0x7b, 0x63, 0x65, 0x47, 0xca, 0x74,
0xd6, 0xba, 0x79, 0x94, 0x31, 0x99, 0xb4, 0x57, 0xc6, 0xf8, 0xe6, 0x1a, 0xc0, 0xde, 0xc7, 0x16,
0x75, 0x64, 0xdc, 0x7d, 0x19, 0x0e, 0xc5, 0xd6, 0x27, 0x50, 0x4f, 0xd8, 0xe8, 0x15, 0xcf, 0x0d,
0x84, 0x31, 0x91, 0x68, 0xbc, 0x91, 0x38, 0x27, 0x76, 0x64, 0x3b, 0x4a, 0xe8, 0x1e, 0xa6, 0xc2,
0x33, 0x86, 0xf5, 0x1e, 0x34, 0x73, 0x49, 0x89, 0xb1, 0xf8, 0x84, 0xf6, 0x58, 0x1f, 0x0d, 0xfa,
0xc3, 0xfa, 0x14, 0x56, 0xe6, 0x12, 0x04, 0x2b, 0x99, 0x3b, 0x4a, 0x2a, 0x99, 0xae, 0x52, 0x17,
0x5a, 0x31, 0x06, 0xe5, 0x33, 0x61, 0x9f, 0x9a, 0x36, 0x8c, 0x68, 0xeb, 0x0f, 0x78, 0xf1, 0x4a,
0xda, 0xe3, 0x1f, 0x02, 0x9c, 0x28, 0x15, 0x3e, 0xa5, 0x7e, 0xd9, 0x28, 0x6b, 0x20, 0x87, 0x10,
0xec, 0x06, 0x34, 0xf1, 0x23, 0x36, 0x72, 0xad, 0x9a, 0x46, 0xc4, 0x1a, 0xf0, 0x03, 0x68, 0x8c,
0xd3, 0xe1, 0x25, 0x13, 0x1f, 0xc9, 0xe8, 0x77, 0xa0, 0x1e, 0x48, 0x23, 0xd3, 0x7b, 0x5b, 0x0b,
0x64, 0x3a, 0xce, 0xf6, 0x3c, 0x23, 0xab, 0xe8, 0x71, 0xb6, 0xe7, 0x91, 0xd0, 0xba, 0x05, 0x6f,
0x5c, 0xb8, 0x42, 0xb2, 0xb7, 0xa0, 0x3a, 0x76, 0x3d, 0x45, 0x15, 0x0b, 0xaf, 0x0b, 0xe6, 0xcb,
0xfa, 0x57, 0x01, 0x20, 0x8b, 0x2d, 0x4c, 0x19, 0x2c, 0x3d, 0x88, 0x59, 0xd6, 0xa5, 0xc6, 0x83,
0xba, 0x6f, 0x0e, 0x31, 0x13, 0x19, 0xd7, 0xe7, 0xe3, 0x71, 0x2b, 0x39, 0xe3, 0xf4, 0xf1, 0xb6,
0x63, 0x8e, 0xb7, 0xd7, 0xb9, 0xe6, 0xa5, 0x33, 0x50, 0x17, 0x96, 0xbf, 0xf5, 0x43, 0x96, 0xeb,
0xdc, 0x48, 0x56, 0x1f, 0xc0, 0xca, 0xdc, 0x94, 0xaf, 0x58, 0xd0, 0xb2, 0xc3, 0x38, 0x9f, 0xe8,
0x3b, 0x50, 0xd5, 0xcf, 0x05, 0x6c, 0x03, 0x6a, 0xb6, 0xa3, 0x73, 0x3c, 0x77, 0xce, 0xa0, 0x70,
0x97, 0xd8, 0x3c, 0x11, 0x5b, 0x7f, 0x29, 0x02, 0x64, 0xfc, 0xd7, 0x68, 0xc5, 0xef, 0x41, 0x2b,
0x16, 0x8e, 0x0c, 0x46, 0x76, 0x34, 0x23, 0xa9, 0xb9, 0xcf, 0x5e, 0x36, 0x64, 0x01, 0x99, 0x6b,
0xcb, 0x4b, 0x2f, 0x6f, 0xcb, 0x37, 0xa0, 0xec, 0xc8, 0x70, 0x66, 0xea, 0x16, 0x9b, 0x5f, 0xc8,
0xbe, 0x0c, 0x67, 0x07, 0x4b, 0x9c, 0x10, 0x6c, 0x0b, 0xaa, 0xfe, 0x29, 0x3d, 0xa0, 0xe8, 0x8b,
0xe0, 0xb5, 0x79, 0xec, 0xc3, 0x53, 0xa4, 0x0f, 0x96, 0xb8, 0x41, 0xb1, 0x5b, 0x50, 0xf1, 0x4f,
0x47, 0x6e, 0x64, 0x2a, 0xcf, 0xd5, 0x45, 0x78, 0xd7, 0x8d, 0xe8, 0xbd, 0x04, 0x31, 0xcc, 0x82,
0x62, 0xe4, 0x9b, 0xd7, 0x92, 0xf6, 0x82, 0x37, 0xfd, 0x83, 0x25, 0x5e, 0x8c, 0xfc, 0xbd, 0x3a,
0x54, 0xb5, 0x5f, 0xad, 0x7f, 0x96, 0xa0, 0x35, 0x6f, 0x25, 0xee, 0x6c, 0x1c, 0x39, 0xc9, 0xce,
0xc6, 0x91, 0x93, 0xde, 0x58, 0x8a, 0xb9, 0x1b, 0x8b, 0x05, 0x15, 0x79, 0x16, 0x88, 0x28, 0xff,
0x52, 0xb4, 0x7f, 0x22, 0xcf, 0x02, 0xec, 0x9a, 0xb5, 0x68, 0xae, 0x09, 0xad, 0x98, 0x26, 0xf4,
0x7d, 0x58, 0x19, 0x4b, 0xcf, 0x93, 0x67, 0xc3, 0x99, 0xef, 0xb9, 0xc1, 0xa9, 0xe9, 0x44, 0xe7,
0x99, 0x6c, 0x03, 0xae, 0x8c, 0xdc, 0x08, 0xcd, 0xd9, 0x97, 0x81, 0x12, 0x01, 0xdd, 0x83, 0x11,
0xb7, 0xc8, 0x66, 0x9f, 0xc1, 0xba, 0xad, 0x94, 0xf0, 0x43, 0xf5, 0x38, 0x08, 0x6d, 0xe7, 0xb4,
0x2b, 0x1d, 0xca, 0x42, 0x3f, 0xb4, 0x95, 0x7b, 0xec, 0x7a, 0xae, 0x9a, 0x91, 0x33, 0xea, 0xfc,
0xa5, 0x38, 0xf6, 0x01, 0xb4, 0x9c, 0x48, 0xd8, 0x4a, 0x74, 0x45, 0xac, 0x8e, 0x6c, 0x75, 0xd2,
0xa9, 0xd3, 0xc8, 0x05, 0x2e, 0xae, 0xc1, 0x46, 0x6b, 0xbf, 0x70, 0xbd, 0x91, 0x83, 0x77, 0xcf,
0x86, 0x5e, 0xc3, 0x1c, 0x93, 0x6d, 0x01, 0x23, 0x46, 0xcf, 0x0f, 0xd5, 0x2c, 0x85, 0x02, 0x41,
0x2f, 0x91, 0xe0, 0x81, 0xab, 0x5c, 0x5f, 0xc4, 0xca, 0xf6, 0x43, 0xba, 0x56, 0x97, 0x78, 0xc6,
0x60, 0x37, 0xa1, 0xed, 0x06, 0x8e, 0x37, 0x1d, 0x89, 0xa7, 0x21, 0x2e, 0x24, 0x0a, 0xe2, 0xce,
0x32, 0x9d, 0x2a, 0x57, 0x0c, 0xff, 0xc8, 0xb0, 0x11, 0x2a, 0xce, 0x17, 0xa0, 0x2b, 0x1a, 0x6a,
0xf8, 0x09, 0xd4, 0xfa, 0xb2, 0x00, 0xed, 0xc5, 0xc0, 0xc3, 0x6d, 0x0b, 0x71, 0xf1, 0xe6, 0xe6,
0x8d, 0x74, 0xba, 0x95, 0xc5, 0xdc, 0x56, 0x26, 0xb5, 0xb4, 0x94, 0xab, 0xa5, 0x69, 0x58, 0x94,
0x5f, 0x1c, 0x16, 0x73, 0x0b, 0xad, 0x2c, 0x2c, 0xd4, 0xfa, 0x7d, 0x01, 0xae, 0x2c, 0x04, 0xf7,
0x2b, 0x5b, 0xb4, 0x0e, 0x4d, 0xdf, 0x3e, 0x15, 0xfa, 0xdd, 0x22, 0x36, 0x25, 0x24, 0xcf, 0xfa,
0x1f, 0xd8, 0x17, 0xc0, 0x72, 0x3e, 0xa3, 0x2e, 0xb5, 0x2d, 0x09, 0x90, 0x43, 0xa9, 0xee, 0xcb,
0xa9, 0xa9, 0xc5, 0x49, 0x80, 0x24, 0xcc, 0x8b, 0x61, 0x54, 0xba, 0x24, 0x8c, 0xac, 0x43, 0xa8,
0x27, 0x06, 0xb2, 0x1b, 0xe6, 0x61, 0xa9, 0x90, 0xbd, 0x97, 0x3e, 0x8e, 0x45, 0x84, 0xb6, 0xeb,
0x57, 0xa6, 0x77, 0xa1, 0xa2, 0x7b, 0xd4, 0xe2, 0x45, 0x84, 0x96, 0x58, 0x43, 0xa8, 0x19, 0x0e,
0xdb, 0x84, 0xea, 0xf1, 0x2c, 0x7d, 0x64, 0x31, 0xc7, 0x05, 0x7e, 0x8f, 0x0c, 0x02, 0xcf, 0x20,
0x8d, 0x60, 0xd7, 0xa0, 0x7c, 0x3c, 0xeb, 0x77, 0xf5, 0xad, 0x13, 0x4f, 0x32, 0xfc, 0xda, 0xab,
0x6a, 0x83, 0xac, 0xcf, 0x61, 0x39, 0x3f, 0x2e, 0x2d, 0xec, 0x85, 0x5c, 0x61, 0x4f, 0x8f, 0xec,
0xe2, 0xcb, 0xae, 0x1f, 0x1f, 0x01, 0xd0, 0x33, 0xf0, 0xeb, 0x5e, 0x5b, 0x7e, 0x0c, 0x35, 0xf3,
0x7c, 0xcc, 0x3e, 0x58, 0x78, 0x0e, 0x6f, 0xa5, 0x6f, 0xcb, 0x73, 0x6f, 0xe2, 0xd6, 0x3d, 0x6c,
0x60, 0xcf, 0x44, 0xd4, 0x75, 0xc7, 0xe3, 0xd7, 0x9d, 0xee, 0x1e, 0xb4, 0x1e, 0x87, 0xe1, 0x7f,
0x36, 0xf6, 0xe7, 0x50, 0xd5, 0xaf, 0xd8, 0x38, 0xc6, 0x43, 0x0b, 0xcc, 0x1e, 0x30, 0xdd, 0xe4,
0xe6, 0x4d, 0xe2, 0x1a, 0x80, 0xc8, 0x29, 0xce, 0x67, 0x36, 0x97, 0x90, 0xf3, 0x06, 0x70, 0x0d,
0xd8, 0xdc, 0x80, 0x9a, 0x79, 0x30, 0x65, 0x0d, 0xa8, 0x3c, 0x3e, 0x1c, 0xf6, 0x1e, 0xb5, 0x97,
0x58, 0x1d, 0xca, 0x07, 0x83, 0xe1, 0xa3, 0x76, 0x01, 0xa9, 0xc3, 0xc1, 0x61, 0xaf, 0x5d, 0xdc,
0xbc, 0x09, 0xcb, 0xf9, 0x27, 0x53, 0xd6, 0x84, 0xda, 0x70, 0xf7, 0xb0, 0xbb, 0x37, 0xf8, 0x59,
0x7b, 0x89, 0x2d, 0x43, 0xbd, 0x7f, 0x38, 0xec, 0xed, 0x3f, 0xe6, 0xbd, 0x76, 0x61, 0xf3, 0xa7,
0xd0, 0x48, 0x5f, 0x91, 0x50, 0xc3, 0x5e, 0xff, 0xb0, 0xdb, 0x5e, 0x62, 0x00, 0xd5, 0x61, 0x6f,
0x9f, 0xf7, 0x50, 0x6f, 0x0d, 0x4a, 0xc3, 0xe1, 0x41, 0xbb, 0x88, 0xb3, 0xee, 0xef, 0xee, 0x1f,
0xf4, 0xda, 0x25, 0x24, 0x1f, 0x3d, 0x3c, 0xba, 0x3f, 0x6c, 0x97, 0x37, 0x3f, 0x82, 0x2b, 0x0b,
0xef, 0x2b, 0x34, 0xfa, 0x60, 0x97, 0xf7, 0x50, 0x53, 0x13, 0x6a, 0x47, 0xbc, 0xff, 0x64, 0xf7,
0x51, 0xaf, 0x5d, 0x40, 0xc1, 0xe7, 0x83, 0xfd, 0x07, 0xbd, 0x6e, 0xbb, 0xb8, 0x77, 0xfd, 0xab,
0xe7, 0x6b, 0x85, 0x6f, 0x9e, 0xaf, 0x15, 0xbe, 0x7d, 0xbe, 0x56, 0xf8, 0xfb, 0xf3, 0xb5, 0xc2,
0x97, 0xdf, 0xaf, 0x2d, 0x7d, 0xf3, 0xfd, 0xda, 0xd2, 0xb7, 0xdf, 0xaf, 0x2d, 0x1d, 0x57, 0xe9,
0x7f, 0x90, 0x0f, 0xff, 0x1d, 0x00, 0x00, 0xff, 0xff, 0xd5, 0x3c, 0x38, 0x7a, 0x47, 0x19, 0x00,
0x00,
// 2627 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x59, 0xcf, 0x6f, 0x5b, 0xc7,
0xf1, 0x17, 0x7f, 0x93, 0x43, 0x8a, 0x66, 0xd6, 0x4e, 0xc2, 0xe8, 0xeb, 0xaf, 0xac, 0xbc, 0xe4,
0x1b, 0xc8, 0xb2, 0x2d, 0x21, 0x0a, 0x10, 0xe7, 0x6b, 0x04, 0x45, 0x25, 0x91, 0x8a, 0x18, 0xdb,
0xa2, 0xb0, 0xb4, 0x9c, 0x1e, 0x0a, 0x18, 0x4f, 0x8f, 0x4b, 0xea, 0x41, 0x8f, 0xef, 0x3d, 0xec,
0x5b, 0x5a, 0x62, 0x0f, 0x3d, 0xf4, 0xd4, 0x63, 0x80, 0x02, 0xbd, 0x15, 0xfd, 0x27, 0x7a, 0x6c,
0xef, 0x01, 0x72, 0x09, 0xd0, 0x1e, 0x82, 0x1e, 0xd2, 0xc2, 0xb9, 0xf4, 0x8f, 0x68, 0x81, 0x62,
0x66, 0xf7, 0xfd, 0x20, 0x25, 0xc3, 0x71, 0x5b, 0xf4, 0xc4, 0xd9, 0x99, 0xcf, 0xce, 0xce, 0xcc,
0xce, 0xec, 0xce, 0x5b, 0x42, 0x2d, 0x08, 0xa3, 0xcd, 0x50, 0x06, 0x2a, 0x60, 0xf9, 0xf0, 0x64,
0xe5, 0xde, 0xd8, 0x55, 0xa7, 0xd3, 0x93, 0x4d, 0x27, 0x98, 0x6c, 0x8d, 0x83, 0x71, 0xb0, 0x45,
0xa2, 0x93, 0xe9, 0x88, 0x46, 0x34, 0x20, 0x4a, 0x4f, 0xb1, 0xfe, 0x96, 0x87, 0x7c, 0x3f, 0x64,
0xef, 0x42, 0xd9, 0xf5, 0xc3, 0xa9, 0x8a, 0xda, 0xb9, 0xb5, 0xc2, 0x7a, 0x7d, 0xbb, 0xb6, 0x19,
0x9e, 0x6c, 0xf6, 0x90, 0xc3, 0x8d, 0x80, 0xad, 0x41, 0x51, 0x5c, 0x08, 0xa7, 0x9d, 0x5f, 0xcb,
0xad, 0xd7, 0xb7, 0x01, 0x01, 0xdd, 0x0b, 0xe1, 0xf4, 0xc3, 0x83, 0x25, 0x4e, 0x12, 0xf6, 0x01,
0x94, 0xa3, 0x60, 0x2a, 0x1d, 0xd1, 0x2e, 0x10, 0xa6, 0x81, 0x98, 0x01, 0x71, 0x08, 0x65, 0xa4,
0xa8, 0x69, 0xe4, 0x7a, 0xa2, 0x5d, 0x4c, 0x35, 0xed, 0xbb, 0x9e, 0xc6, 0x90, 0x84, 0xbd, 0x07,
0xa5, 0x93, 0xa9, 0xeb, 0x0d, 0xdb, 0x25, 0x82, 0xd4, 0x11, 0xb2, 0x8b, 0x0c, 0xc2, 0x68, 0x19,
0x82, 0x26, 0x42, 0x8e, 0x45, 0xbb, 0x9c, 0x82, 0x1e, 0x23, 0x43, 0x83, 0x48, 0x86, 0x6b, 0x0d,
0xdd, 0xd1, 0xa8, 0x5d, 0x49, 0xd7, 0xea, 0xb8, 0xa3, 0x91, 0x5e, 0x0b, 0x25, 0x6c, 0x1d, 0xaa,
0xa1, 0x67, 0xab, 0x51, 0x20, 0x27, 0x6d, 0x48, 0xed, 0x3e, 0x32, 0x3c, 0x9e, 0x48, 0xd9, 0x7d,
0xa8, 0x3b, 0x81, 0x1f, 0x29, 0x69, 0xbb, 0xbe, 0x8a, 0xda, 0x75, 0x02, 0xbf, 0x89, 0xe0, 0x2f,
0x02, 0x79, 0x26, 0xe4, 0x5e, 0x2a, 0xe4, 0x59, 0xe4, 0x6e, 0x11, 0xf2, 0x41, 0x68, 0xfd, 0x3a,
0x07, 0xd5, 0x58, 0x2b, 0xb3, 0xa0, 0xb1, 0x23, 0x9d, 0x53, 0x57, 0x09, 0x47, 0x4d, 0xa5, 0x68,
0xe7, 0xd6, 0x72, 0xeb, 0x35, 0x3e, 0xc7, 0x63, 0x4d, 0xc8, 0xf7, 0x07, 0x14, 0xef, 0x1a, 0xcf,
0xf7, 0x07, 0xac, 0x0d, 0x95, 0xa7, 0xb6, 0x74, 0x6d, 0x5f, 0x51, 0x80, 0x6b, 0x3c, 0x1e, 0xb2,
0x9b, 0x50, 0xeb, 0x0f, 0x9e, 0x0a, 0x19, 0xb9, 0x81, 0x4f, 0x61, 0xad, 0xf1, 0x94, 0xc1, 0x56,
0x01, 0xfa, 0x83, 0x7d, 0x61, 0xa3, 0xd2, 0xa8, 0x5d, 0x5a, 0x2b, 0xac, 0xd7, 0x78, 0x86, 0x63,
0xfd, 0x1c, 0x4a, 0xb4, 0xd5, 0xec, 0x73, 0x28, 0x0f, 0xdd, 0xb1, 0x88, 0x94, 0x36, 0x67, 0x77,
0xfb, 0xab, 0xef, 0x6e, 0x2d, 0xfd, 0xf9, 0xbb, 0x5b, 0x1b, 0x99, 0x9c, 0x0a, 0x42, 0xe1, 0x3b,
0x81, 0xaf, 0x6c, 0xd7, 0x17, 0x32, 0xda, 0x1a, 0x07, 0xf7, 0xf4, 0x94, 0xcd, 0x0e, 0xfd, 0x70,
0xa3, 0x81, 0xdd, 0x86, 0x92, 0xeb, 0x0f, 0xc5, 0x05, 0xd9, 0x5f, 0xd8, 0xbd, 0x6e, 0x54, 0xd5,
0xfb, 0x53, 0x15, 0x4e, 0x55, 0x0f, 0x45, 0x5c, 0x23, 0xac, 0xaf, 0x73, 0x50, 0xd6, 0xa9, 0xc4,
0x6e, 0x42, 0x71, 0x22, 0x94, 0x4d, 0xeb, 0xd7, 0xb7, 0xab, 0x7a, 0x4b, 0x95, 0xcd, 0x89, 0x8b,
0x59, 0x3a, 0x09, 0xa6, 0x18, 0xfb, 0x7c, 0x9a, 0xa5, 0x8f, 0x91, 0xc3, 0x8d, 0x80, 0xfd, 0x1f,
0x54, 0x7c, 0xa1, 0xce, 0x03, 0x79, 0x46, 0x31, 0x6a, 0xea, 0xb4, 0x38, 0x14, 0xea, 0x71, 0x30,
0x14, 0x3c, 0x96, 0xb1, 0xbb, 0x50, 0x8d, 0x84, 0x33, 0x95, 0xae, 0x9a, 0x51, 0xbc, 0x9a, 0xdb,
0x2d, 0x4a, 0x56, 0xc3, 0x23, 0x70, 0x82, 0x60, 0x77, 0xa0, 0x16, 0x09, 0x47, 0x0a, 0x25, 0xfc,
0xe7, 0x14, 0xbf, 0xfa, 0xf6, 0xb2, 0x81, 0x4b, 0xa1, 0xba, 0xfe, 0x73, 0x9e, 0xca, 0xad, 0xaf,
0xf3, 0x50, 0x44, 0x9b, 0x19, 0x83, 0xa2, 0x2d, 0xc7, 0xba, 0xa2, 0x6a, 0x9c, 0x68, 0xd6, 0x82,
0x02, 0xea, 0xc8, 0x13, 0x0b, 0x49, 0xe4, 0x38, 0xe7, 0x43, 0xb3, 0xa1, 0x48, 0xe2, 0xbc, 0x69,
0x24, 0xa4, 0xd9, 0x47, 0xa2, 0xd9, 0x6d, 0xa8, 0x85, 0x32, 0xb8, 0x98, 0x3d, 0xd3, 0x16, 0xa4,
0x59, 0x8a, 0x4c, 0x34, 0xa0, 0x1a, 0x1a, 0x8a, 0x6d, 0x00, 0x88, 0x0b, 0x25, 0xed, 0x83, 0x20,
0x52, 0x51, 0xbb, 0x4c, 0xd6, 0x52, 0xde, 0x23, 0xa3, 0x77, 0xc4, 0x33, 0x52, 0xb6, 0x02, 0xd5,
0xd3, 0x20, 0x52, 0xbe, 0x3d, 0x11, 0x54, 0x21, 0x35, 0x9e, 0x8c, 0x99, 0x05, 0xe5, 0xa9, 0xe7,
0x4e, 0x5c, 0xd5, 0xae, 0xa5, 0x3a, 0x8e, 0x89, 0xc3, 0x8d, 0x04, 0xb3, 0xd8, 0x19, 0xcb, 0x60,
0x1a, 0x1e, 0xd9, 0x52, 0xf8, 0x8a, 0xea, 0xa7, 0xc6, 0xe7, 0x78, 0xec, 0x53, 0x78, 0x47, 0x8a,
0x49, 0xf0, 0x5c, 0xd0, 0x46, 0x0d, 0xd4, 0xf4, 0x24, 0xe2, 0x18, 0xd8, 0xc8, 0x7d, 0x2e, 0xa8,
0x86, 0xaa, 0xfc, 0xe5, 0x00, 0xeb, 0x2e, 0x94, 0xb5, 0xdd, 0x18, 0x16, 0xa4, 0x4c, 0xa5, 0x10,
0x8d, 0x15, 0xd2, 0x3b, 0x8a, 0x2b, 0xa4, 0x77, 0x64, 0x75, 0xa0, 0xac, 0x2d, 0x44, 0xf4, 0x21,
0x7a, 0x65, 0xd0, 0x48, 0x23, 0x6f, 0x10, 0x8c, 0x94, 0xce, 0x48, 0x4e, 0x34, 0x69, 0xb5, 0xa5,
0x8e, 0x7f, 0x81, 0x13, 0x6d, 0x3d, 0x84, 0x5a, 0xb2, 0xb3, 0xb4, 0x44, 0xc7, 0xa8, 0xc9, 0xf7,
0x3a, 0x38, 0x81, 0xc2, 0xa5, 0x17, 0x25, 0x1a, 0xc3, 0x18, 0x84, 0xca, 0x0d, 0x7c, 0xdb, 0x23,
0x45, 0x55, 0x9e, 0x8c, 0xad, 0x3f, 0x16, 0xa0, 0x44, 0x8e, 0xb1, 0x75, 0xac, 0x88, 0x70, 0xaa,
0x3d, 0x28, 0xec, 0x32, 0x53, 0x11, 0x40, 0xb5, 0x97, 0x14, 0x04, 0xd6, 0xe1, 0x0a, 0x66, 0xa7,
0x27, 0x1c, 0x15, 0x48, 0xb3, 0x4e, 0x32, 0xc6, 0xf5, 0x87, 0x58, 0xa1, 0x3a, 0x61, 0x88, 0x66,
0x77, 0xa0, 0x1c, 0x50, 0x59, 0x51, 0xce, 0xbc, 0xa4, 0xd8, 0x0c, 0x04, 0x95, 0x4b, 0x61, 0x0f,
0x03, 0xdf, 0x9b, 0x51, 0x26, 0x55, 0x79, 0x32, 0xc6, 0x44, 0xa7, 0x3a, 0x7a, 0x32, 0x0b, 0xf5,
0xb1, 0xda, 0xd4, 0x89, 0xfe, 0x38, 0x66, 0xf2, 0x54, 0x8e, 0x07, 0xe7, 0x93, 0x49, 0x38, 0x8a,
0xfa, 0xa1, 0x6a, 0x5f, 0x4f, 0x53, 0x32, 0xe6, 0xf1, 0x44, 0x8a, 0x48, 0xc7, 0x76, 0x4e, 0x05,
0x22, 0x6f, 0xa4, 0xc8, 0x3d, 0xc3, 0xe3, 0x89, 0x34, 0xad, 0x34, 0x84, 0xbe, 0x49, 0xd0, 0x4c,
0xa5, 0x21, 0x36, 0x95, 0x63, 0x86, 0x0e, 0x06, 0x07, 0x88, 0x7c, 0x2b, 0x3d, 0xdd, 0x35, 0x87,
0x1b, 0x89, 0xf6, 0x36, 0x9a, 0x7a, 0xaa, 0xd7, 0x69, 0xbf, 0xad, 0x43, 0x19, 0x8f, 0xd9, 0xff,
0x43, 0x03, 0x4f, 0x32, 0xe1, 0x2b, 0xb2, 0xa4, 0xdd, 0x26, 0x87, 0xdf, 0x4c, 0x1c, 0xde, 0xcb,
0x08, 0xf9, 0x1c, 0xd4, 0x5a, 0x4d, 0x7d, 0xc7, 0x1d, 0x89, 0xdc, 0x9f, 0xe9, 0x54, 0x2b, 0x70,
0xa2, 0xad, 0x1e, 0x54, 0x63, 0xef, 0x2e, 0x65, 0xd0, 0x3d, 0xa8, 0x44, 0xa7, 0xb6, 0x74, 0xfd,
0x31, 0x6d, 0x6e, 0x73, 0xfb, 0x7a, 0x12, 0x8c, 0x81, 0xe6, 0xa3, 0x03, 0x31, 0xc6, 0x0a, 0xe2,
0x6c, 0xbc, 0x4a, 0x57, 0x0b, 0x0a, 0x53, 0x77, 0x48, 0x7a, 0x96, 0x39, 0x92, 0xc8, 0x19, 0xbb,
0x3a, 0x9f, 0x97, 0x39, 0x92, 0x68, 0xdf, 0x24, 0x18, 0xea, 0xeb, 0x76, 0x99, 0x13, 0x3d, 0x97,
0xb1, 0xa5, 0x85, 0x8c, 0xf5, 0xe2, 0xb0, 0xfe, 0x57, 0x56, 0xfb, 0x55, 0x0e, 0xaa, 0x71, 0x8f,
0x80, 0x37, 0x95, 0x3b, 0x14, 0xbe, 0x72, 0x47, 0xae, 0x90, 0x66, 0xe1, 0x0c, 0x87, 0xdd, 0x83,
0x92, 0xad, 0x94, 0x8c, 0xcf, 0xff, 0xb7, 0xb3, 0x0d, 0xc6, 0xe6, 0x0e, 0x4a, 0xba, 0xbe, 0x92,
0x33, 0xae, 0x51, 0x2b, 0x9f, 0x00, 0xa4, 0x4c, 0xb4, 0xf5, 0x4c, 0xcc, 0x8c, 0x56, 0x24, 0xd9,
0x0d, 0x28, 0x3d, 0xb7, 0xbd, 0x69, 0x5c, 0xcc, 0x7a, 0xf0, 0x20, 0xff, 0x49, 0xce, 0xfa, 0x43,
0x1e, 0x2a, 0xa6, 0xe1, 0x60, 0x77, 0xa1, 0x42, 0x0d, 0x87, 0xb1, 0xe8, 0xea, 0xca, 0x8d, 0x21,
0x6c, 0x2b, 0xe9, 0xa4, 0x32, 0x36, 0x1a, 0x55, 0xba, 0xa3, 0x32, 0x36, 0xa6, 0x7d, 0x55, 0x61,
0x28, 0x46, 0xa6, 0x65, 0x6a, 0x52, 0x83, 0x22, 0x46, 0xae, 0xef, 0x62, 0x7c, 0x38, 0x8a, 0xd8,
0xdd, 0xd8, 0xeb, 0x22, 0x69, 0x7c, 0x2b, 0xab, 0xf1, 0xb2, 0xd3, 0x3d, 0xa8, 0x67, 0x96, 0xb9,
0xc2, 0xeb, 0xf7, 0xb3, 0x5e, 0x9b, 0x25, 0x49, 0x9d, 0xee, 0xf7, 0xd2, 0x28, 0xfc, 0x1b, 0xf1,
0xfb, 0x18, 0x20, 0x55, 0xf9, 0xc3, 0x4f, 0x3e, 0xeb, 0xf7, 0x05, 0x80, 0x7e, 0x88, 0xd7, 0xe7,
0xd0, 0xa6, 0x0b, 0xbf, 0xe1, 0x8e, 0xfd, 0x40, 0x8a, 0x67, 0x74, 0x42, 0xd0, 0xfc, 0x2a, 0xaf,
0x6b, 0x1e, 0x55, 0x0c, 0xdb, 0x81, 0xfa, 0x50, 0x44, 0x8e, 0x74, 0x29, 0xa1, 0x4c, 0xd0, 0x6f,
0xa1, 0x4f, 0xa9, 0x9e, 0xcd, 0x4e, 0x8a, 0xd0, 0xb1, 0xca, 0xce, 0x61, 0xdb, 0xd0, 0x10, 0x17,
0x61, 0x20, 0x95, 0x59, 0x45, 0xf7, 0xa5, 0xd7, 0x74, 0x87, 0x8b, 0x7c, 0x7d, 0x02, 0xd4, 0x45,
0x3a, 0x60, 0x36, 0x14, 0x1d, 0x3b, 0x8c, 0x4c, 0x37, 0xd0, 0x5e, 0x58, 0x6f, 0xcf, 0x0e, 0x75,
0xd0, 0x76, 0x3f, 0x42, 0x5f, 0x7f, 0xf1, 0x97, 0x5b, 0x77, 0x32, 0x2d, 0xd4, 0x24, 0x38, 0x99,
0x6d, 0x51, 0xbe, 0x9c, 0xb9, 0x6a, 0x6b, 0xaa, 0x5c, 0x6f, 0xcb, 0x0e, 0x5d, 0x54, 0x87, 0x13,
0x7b, 0x1d, 0x4e, 0xaa, 0xd9, 0x27, 0xd0, 0x0c, 0x65, 0x30, 0x96, 0x22, 0x8a, 0x9e, 0xd1, 0x85,
0x6a, 0x1a, 0xdd, 0x37, 0xcc, 0xc5, 0x4f, 0x92, 0xcf, 0x50, 0xc0, 0x97, 0xc3, 0xec, 0x70, 0xe5,
0x47, 0xd0, 0x5a, 0xf4, 0xf8, 0x75, 0x76, 0x6f, 0xe5, 0x3e, 0xd4, 0x12, 0x0f, 0x5e, 0x35, 0xb1,
0x9a, 0xdd, 0xf6, 0xdf, 0xe5, 0xa0, 0xac, 0xeb, 0x91, 0xdd, 0x87, 0x9a, 0x17, 0x38, 0x36, 0x1a,
0x10, 0x7f, 0x54, 0xbc, 0x93, 0x96, 0xeb, 0xe6, 0xa3, 0x58, 0xa6, 0xf7, 0x23, 0xc5, 0x62, 0x7a,
0xba, 0xfe, 0x28, 0x88, 0xeb, 0xa7, 0x99, 0x4e, 0xea, 0xf9, 0xa3, 0x80, 0x6b, 0xe1, 0xca, 0x43,
0x68, 0xce, 0xab, 0xb8, 0xc2, 0xce, 0xf7, 0xe6, 0x13, 0x9d, 0x2e, 0x92, 0x64, 0x52, 0xd6, 0xec,
0xfb, 0x50, 0x4b, 0xf8, 0x6c, 0xe3, 0xb2, 0xe1, 0x8d, 0xec, 0xcc, 0x8c, 0xad, 0xd6, 0x2f, 0x73,
0x00, 0xa9, 0x6d, 0x78, 0xce, 0xe1, 0xe7, 0x8b, 0x9f, 0x36, 0x1e, 0xc9, 0x98, 0xee, 0x6d, 0x5b,
0xd9, 0x64, 0x4b, 0x83, 0x13, 0xcd, 0x36, 0x01, 0x86, 0x49, 0xad, 0xbf, 0xe4, 0x04, 0xc8, 0x20,
0x50, 0xbf, 0x67, 0xfb, 0xe3, 0xa9, 0x3d, 0x16, 0xa6, 0x3b, 0x4c, 0xc6, 0x56, 0x1f, 0xaa, 0xb1,
0x85, 0x6c, 0x0d, 0xea, 0x91, 0xb1, 0x0a, 0x3b, 0x70, 0x34, 0xa5, 0xc4, 0xb3, 0x2c, 0xec, 0xa4,
0xa5, 0xed, 0x8f, 0xc5, 0x5c, 0x27, 0xcd, 0x91, 0xc3, 0x8d, 0xc0, 0xfa, 0x02, 0x4a, 0xc4, 0xc0,
0xea, 0x8d, 0x94, 0x2d, 0x95, 0x69, 0xca, 0x75, 0xdf, 0x19, 0x44, 0x64, 0xd2, 0x6e, 0x11, 0xf3,
0x9b, 0x6b, 0x00, 0x7b, 0x1f, 0xbb, 0xdb, 0xa1, 0x09, 0xf7, 0x55, 0x38, 0x14, 0x5b, 0x9f, 0x42,
0x35, 0x66, 0x63, 0x54, 0x3c, 0xd7, 0x17, 0xc6, 0x44, 0xa2, 0xf1, 0x63, 0xc6, 0x39, 0xb5, 0xa5,
0xed, 0x28, 0xa1, 0xdb, 0x9f, 0x12, 0x4f, 0x19, 0xd6, 0x7b, 0x50, 0xcf, 0x14, 0x25, 0xe6, 0xe2,
0x53, 0xda, 0x63, 0x7d, 0x34, 0xe8, 0x81, 0xf5, 0x19, 0x2c, 0xcf, 0x15, 0x08, 0xde, 0x64, 0xee,
0x30, 0xbe, 0xc9, 0xf4, 0x2d, 0x75, 0xa9, 0x8b, 0x63, 0x50, 0x3c, 0x17, 0xf6, 0x99, 0xe9, 0xe0,
0x88, 0xb6, 0x7e, 0x8b, 0xdf, 0x6c, 0x71, 0x67, 0xfd, 0xbf, 0x00, 0xa7, 0x4a, 0x85, 0xcf, 0xa8,
0xd5, 0x36, 0xca, 0x6a, 0xc8, 0x21, 0x04, 0xbb, 0x05, 0x75, 0x1c, 0x44, 0x46, 0xae, 0x55, 0xd3,
0x8c, 0x48, 0x03, 0xfe, 0x07, 0x6a, 0xa3, 0x64, 0x7a, 0xc1, 0xe4, 0x47, 0x3c, 0xfb, 0x1d, 0xa8,
0xfa, 0x81, 0x91, 0xe9, 0xbd, 0xad, 0xf8, 0x41, 0x32, 0xcf, 0xf6, 0x3c, 0x23, 0x2b, 0xe9, 0x79,
0xb6, 0xe7, 0x91, 0xd0, 0xba, 0x03, 0x6f, 0x5c, 0xfa, 0xfa, 0x64, 0x6f, 0x41, 0x79, 0xe4, 0x7a,
0x8a, 0x6e, 0x2c, 0xfc, 0xd2, 0x30, 0x23, 0xeb, 0x1f, 0x39, 0x80, 0x34, 0xb7, 0xb0, 0x64, 0xf0,
0xea, 0x41, 0x4c, 0x43, 0x5f, 0x35, 0x1e, 0x54, 0x27, 0xe6, 0x10, 0x33, 0x99, 0x71, 0x73, 0x3e,
0x1f, 0x37, 0xe3, 0x33, 0x4e, 0x1f, 0x6f, 0xdb, 0xe6, 0x78, 0x7b, 0x9d, 0x2f, 0xc4, 0x64, 0x05,
0x6a, 0xe0, 0xb2, 0x0f, 0x06, 0x90, 0xd6, 0x3a, 0x37, 0x92, 0x95, 0x87, 0xb0, 0x3c, 0xb7, 0xe4,
0x0f, 0xbc, 0xd0, 0xd2, 0xc3, 0x38, 0x5b, 0xe8, 0xdb, 0x50, 0xd6, 0x2f, 0x0d, 0x6c, 0x1d, 0x2a,
0xb6, 0xa3, 0x6b, 0x3c, 0x73, 0xce, 0xa0, 0x70, 0x87, 0xd8, 0x3c, 0x16, 0x5b, 0x7f, 0xca, 0x03,
0xa4, 0xfc, 0xd7, 0xe8, 0xe2, 0x1f, 0x40, 0x33, 0x12, 0x4e, 0xe0, 0x0f, 0x6d, 0x39, 0x23, 0xa9,
0xf9, 0x14, 0xbe, 0x6a, 0xca, 0x02, 0x32, 0xd3, 0xd1, 0x17, 0x5e, 0xdd, 0xd1, 0xaf, 0x43, 0xd1,
0x09, 0xc2, 0x99, 0xb9, 0xb7, 0xd8, 0xbc, 0x23, 0x7b, 0x41, 0x38, 0x3b, 0x58, 0xe2, 0x84, 0x60,
0x9b, 0x50, 0x9e, 0x9c, 0xd1, 0xdb, 0x8b, 0xfe, 0x86, 0xbc, 0x31, 0x8f, 0x7d, 0x7c, 0x86, 0xf4,
0xc1, 0x12, 0x37, 0x28, 0x76, 0x07, 0x4a, 0x93, 0xb3, 0xa1, 0x2b, 0xcd, 0xcd, 0x73, 0x7d, 0x11,
0xde, 0x71, 0x25, 0x3d, 0xb5, 0x20, 0x86, 0x59, 0x90, 0x97, 0x13, 0xf3, 0xd0, 0xd2, 0x5a, 0x88,
0xe6, 0xe4, 0x60, 0x89, 0xe7, 0xe5, 0x64, 0xb7, 0x0a, 0x65, 0x1d, 0x57, 0xeb, 0xef, 0x05, 0x68,
0xce, 0x5b, 0x89, 0x3b, 0x1b, 0x49, 0x27, 0xde, 0xd9, 0x48, 0x3a, 0xc9, 0xc7, 0x4e, 0x3e, 0xf3,
0xb1, 0x63, 0x41, 0x29, 0x38, 0xf7, 0x85, 0xcc, 0x3e, 0x32, 0xed, 0x9d, 0x06, 0xe7, 0x3e, 0x76,
0xcd, 0x5a, 0x34, 0xd7, 0x84, 0x96, 0x4c, 0x13, 0xfa, 0x3e, 0x2c, 0x8f, 0x02, 0xcf, 0x0b, 0xce,
0x07, 0xb3, 0x89, 0xe7, 0xfa, 0x67, 0xa6, 0x13, 0x9d, 0x67, 0xb2, 0x75, 0xb8, 0x36, 0x74, 0x25,
0x9a, 0x63, 0xba, 0xff, 0x88, 0x7c, 0xaf, 0xf2, 0x45, 0x36, 0xfb, 0x1c, 0xd6, 0x6c, 0xa5, 0xc4,
0x24, 0x54, 0xc7, 0x7e, 0x68, 0x3b, 0x67, 0x9d, 0xc0, 0xa1, 0x2a, 0x9c, 0x84, 0xb6, 0x72, 0x4f,
0x5c, 0xcf, 0x55, 0x33, 0x0a, 0x46, 0x95, 0xbf, 0x12, 0xc7, 0x3e, 0x80, 0xa6, 0x23, 0x85, 0xad,
0x44, 0x47, 0x44, 0xea, 0xc8, 0x56, 0xa7, 0xed, 0x2a, 0xcd, 0x5c, 0xe0, 0xa2, 0x0f, 0x36, 0x5a,
0xfb, 0x85, 0xeb, 0x0d, 0x1d, 0xfc, 0x6c, 0xad, 0x69, 0x1f, 0xe6, 0x98, 0x6c, 0x13, 0x18, 0x31,
0xba, 0x93, 0x50, 0xcd, 0x12, 0x28, 0x10, 0xf4, 0x0a, 0x09, 0x1e, 0xb8, 0xca, 0x9d, 0x88, 0x48,
0xd9, 0x93, 0x90, 0xbe, 0xc8, 0x0b, 0x3c, 0x65, 0xb0, 0xdb, 0xd0, 0x72, 0x7d, 0xc7, 0x9b, 0x0e,
0xc5, 0xb3, 0x10, 0x1d, 0x91, 0x7e, 0xd4, 0x6e, 0xd0, 0xa9, 0x72, 0xcd, 0xf0, 0x8f, 0x0c, 0x1b,
0xa1, 0xe2, 0x62, 0x01, 0xba, 0xac, 0xa1, 0x86, 0x1f, 0x43, 0xad, 0x2f, 0x73, 0xd0, 0x5a, 0x4c,
0x3c, 0xdc, 0xb6, 0x10, 0x9d, 0x37, 0x1f, 0xed, 0x48, 0x27, 0x5b, 0x99, 0xcf, 0x6c, 0x65, 0x7c,
0x97, 0x16, 0x32, 0x77, 0x69, 0x92, 0x16, 0xc5, 0x97, 0xa7, 0xc5, 0x9c, 0xa3, 0xa5, 0x05, 0x47,
0xad, 0xdf, 0xe4, 0xe0, 0xda, 0x42, 0x72, 0xff, 0x60, 0x8b, 0xd6, 0xa0, 0x3e, 0xb1, 0xcf, 0x84,
0x7e, 0xf2, 0x88, 0xcc, 0x15, 0x92, 0x65, 0xfd, 0x07, 0xec, 0xf3, 0xa1, 0x91, 0xad, 0xa8, 0x2b,
0x6d, 0x8b, 0x13, 0xe4, 0x30, 0x50, 0xfb, 0xc1, 0xd4, 0xdc, 0xc5, 0x71, 0x82, 0xc4, 0xcc, 0xcb,
0x69, 0x54, 0xb8, 0x22, 0x8d, 0xac, 0x43, 0xa8, 0xc6, 0x06, 0xb2, 0x5b, 0xe6, 0x4d, 0x2a, 0x97,
0x3e, 0xb5, 0x1e, 0x47, 0x42, 0xa2, 0xed, 0xfa, 0x81, 0xea, 0x5d, 0x28, 0xe9, 0x1e, 0x35, 0x7f,
0x19, 0xa1, 0x25, 0xd6, 0x00, 0x2a, 0x86, 0xc3, 0x36, 0xa0, 0x7c, 0x32, 0x4b, 0xde, 0x67, 0xcc,
0x71, 0x81, 0xe3, 0xa1, 0x41, 0xe0, 0x19, 0xa4, 0x11, 0xec, 0x06, 0x14, 0x4f, 0x66, 0xbd, 0x8e,
0xfe, 0xea, 0xc4, 0x93, 0x0c, 0x47, 0xbb, 0x65, 0x6d, 0x90, 0xf5, 0x08, 0x1a, 0xd9, 0x79, 0xc9,
0xc5, 0x9e, 0xcb, 0x5c, 0xec, 0xc9, 0x91, 0x9d, 0x7f, 0xd5, 0xe7, 0xc7, 0xc7, 0x00, 0xf4, 0x82,
0xfc, 0xba, 0x9f, 0x2d, 0x1f, 0x42, 0xc5, 0xbc, 0x3c, 0xb3, 0x0f, 0x16, 0x5e, 0xd2, 0x9b, 0xc9,
0xb3, 0xf4, 0xdc, 0x73, 0xba, 0xf5, 0x00, 0x1b, 0xd8, 0x73, 0x21, 0x3b, 0xee, 0x68, 0xf4, 0xba,
0xcb, 0x3d, 0x80, 0xe6, 0x71, 0x18, 0xfe, 0x6b, 0x73, 0x7f, 0x0a, 0x65, 0xfd, 0x00, 0x8e, 0x73,
0x3c, 0xb4, 0xc0, 0xec, 0x01, 0xd3, 0x4d, 0x6e, 0xd6, 0x24, 0xae, 0x01, 0x88, 0x9c, 0xe2, 0x7a,
0x66, 0x73, 0x09, 0x39, 0x6f, 0x00, 0xd7, 0x80, 0x8d, 0x75, 0xa8, 0x98, 0xb7, 0x56, 0x56, 0x83,
0xd2, 0xf1, 0xe1, 0xa0, 0xfb, 0xa4, 0xb5, 0xc4, 0xaa, 0x50, 0x3c, 0xe8, 0x0f, 0x9e, 0xb4, 0x72,
0x48, 0x1d, 0xf6, 0x0f, 0xbb, 0xad, 0xfc, 0xc6, 0x6d, 0x68, 0x64, 0x5f, 0x5b, 0x59, 0x1d, 0x2a,
0x83, 0x9d, 0xc3, 0xce, 0x6e, 0xff, 0x27, 0xad, 0x25, 0xd6, 0x80, 0x6a, 0xef, 0x70, 0xd0, 0xdd,
0x3b, 0xe6, 0xdd, 0x56, 0x6e, 0xe3, 0xc7, 0x50, 0x4b, 0x1e, 0xa0, 0x50, 0xc3, 0x6e, 0xef, 0xb0,
0xd3, 0x5a, 0x62, 0x00, 0xe5, 0x41, 0x77, 0x8f, 0x77, 0x51, 0x6f, 0x05, 0x0a, 0x83, 0xc1, 0x41,
0x2b, 0x8f, 0xab, 0xee, 0xed, 0xec, 0x1d, 0x74, 0x5b, 0x05, 0x24, 0x9f, 0x3c, 0x3e, 0xda, 0x1f,
0xb4, 0x8a, 0x1b, 0x1f, 0xc2, 0x1b, 0x97, 0x5e, 0x74, 0x70, 0xc5, 0x4e, 0x77, 0x7f, 0xe7, 0xf8,
0x11, 0x9a, 0x58, 0x86, 0x7c, 0xff, 0x50, 0x2b, 0xea, 0xef, 0xef, 0xb7, 0xf2, 0x1b, 0x1f, 0xc3,
0xb5, 0x85, 0x27, 0x19, 0x5a, 0xf0, 0x60, 0x87, 0x77, 0x71, 0xf1, 0x3a, 0x54, 0x8e, 0x78, 0xef,
0xe9, 0xce, 0x93, 0x6e, 0x2b, 0x87, 0x82, 0x47, 0xfd, 0xbd, 0x87, 0xdd, 0x4e, 0x2b, 0xbf, 0x7b,
0xf3, 0xab, 0x17, 0xab, 0xb9, 0x6f, 0x5e, 0xac, 0xe6, 0xbe, 0x7d, 0xb1, 0x9a, 0xfb, 0xeb, 0x8b,
0xd5, 0xdc, 0x97, 0xdf, 0xaf, 0x2e, 0x7d, 0xf3, 0xfd, 0xea, 0xd2, 0xb7, 0xdf, 0xaf, 0x2e, 0x9d,
0x94, 0xe9, 0x5f, 0x97, 0x8f, 0xfe, 0x19, 0x00, 0x00, 0xff, 0xff, 0xe8, 0xd7, 0x00, 0x96, 0xb5,
0x19, 0x00, 0x00,
}
func (m *Op) Marshal() (dAtA []byte, err error) {
@ -3646,6 +3687,13 @@ func (m *Mount) MarshalToSizedBuffer(dAtA []byte) (int, error) {
_ = i
var l int
_ = l
if m.ContentCache != 0 {
i = encodeVarintOps(dAtA, i, uint64(m.ContentCache))
i--
dAtA[i] = 0x1
i--
dAtA[i] = 0xc0
}
if len(m.ResultID) > 0 {
i -= len(m.ResultID)
copy(dAtA[i:], m.ResultID)
@ -5869,6 +5917,9 @@ func (m *Mount) Size() (n int) {
if l > 0 {
n += 2 + l + sovOps(uint64(l))
}
if m.ContentCache != 0 {
n += 2 + sovOps(uint64(m.ContentCache))
}
return n
}
@ -8576,6 +8627,25 @@ func (m *Mount) Unmarshal(dAtA []byte) error {
}
m.ResultID = string(dAtA[iNdEx:postIndex])
iNdEx = postIndex
case 24:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field ContentCache", wireType)
}
m.ContentCache = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowOps
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := dAtA[iNdEx]
iNdEx++
m.ContentCache |= MountContentCache(b&0x7F) << shift
if b < 0x80 {
break
}
}
default:
iNdEx = preIndex
skippy, err := skipOps(dAtA[iNdEx:])

View File

@ -109,6 +109,7 @@ message Mount {
SecretOpt secretOpt = 21;
SSHOpt SSHOpt = 22;
string resultID = 23;
MountContentCache contentCache = 24;
}
// MountType defines a type of a mount from a supported set
@ -120,6 +121,13 @@ enum MountType {
TMPFS = 4;
}
// MountContentCache ...
enum MountContentCache {
DEFAULT = 0;
ON = 1;
OFF = 2;
}
// TmpfsOpt defines options describing tpmfs mounts
message TmpfsOpt {
// Specify an upper limit on the size of the filesystem.

View File

@ -1,161 +0,0 @@
package sourcepolicy
import (
"context"
"github.com/moby/buildkit/solver/pb"
spb "github.com/moby/buildkit/sourcepolicy/pb"
"github.com/moby/buildkit/util/bklog"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
var (
// ErrSourceDenied is returned by the policy engine when a source is denied by the policy.
ErrSourceDenied = errors.New("source denied by policy")
// ErrTooManyOps is returned by the policy engine when there are too many converts for a single source op.
ErrTooManyOps = errors.New("too many operations")
)
// Engine is the source policy engine.
// It is responsible for evaluating a source policy against a source operation.
// Create one with `NewEngine`
//
// Rule matching is delegated to the `Matcher` interface.
// Mutations are delegated to the `Mutater` interface.
type Engine struct {
pol []*spb.Policy
sources map[string]*selectorCache
}
// NewEngine creates a new source policy engine.
func NewEngine(pol []*spb.Policy) *Engine {
return &Engine{
pol: pol,
}
}
// TODO: The key here can't be used to cache attr constraint regexes.
func (e *Engine) selectorCache(src *spb.Selector) *selectorCache {
if e.sources == nil {
e.sources = map[string]*selectorCache{}
}
key := src.MatchType.String() + " " + src.Identifier
if s, ok := e.sources[key]; ok {
return s
}
s := &selectorCache{Selector: src}
e.sources[key] = s
return s
}
// Evaluate evaluates a source operation against the policy.
//
// Policies are re-evaluated for each convert rule.
// Evaluate will error if the there are too many converts for a single source op to prevent infinite loops.
// This function may error out even if the op was mutated, in which case `true` will be returned along with the error.
//
// An error is returned when the source is denied by the policy.
func (e *Engine) Evaluate(ctx context.Context, op *pb.Op) (bool, error) {
if len(e.pol) == 0 {
return false, nil
}
var mutated bool
const maxIterr = 20
for i := 0; ; i++ {
if i > maxIterr {
return mutated, errors.Wrapf(ErrTooManyOps, "too many mutations on a single source")
}
srcOp := op.GetSource()
if srcOp == nil {
return false, nil
}
if i == 0 {
ctx = bklog.WithLogger(ctx, bklog.G(ctx).WithField("orig", *srcOp).WithField("updated", op.GetSource()))
}
mut, err := e.evaluatePolicies(ctx, srcOp)
if mut {
mutated = true
}
if err != nil {
return mutated, err
}
if !mut {
break
}
}
return mutated, nil
}
func (e *Engine) evaluatePolicies(ctx context.Context, srcOp *pb.SourceOp) (bool, error) {
for _, pol := range e.pol {
mut, err := e.evaluatePolicy(ctx, pol, srcOp)
if mut || err != nil {
return mut, err
}
}
return false, nil
}
// evaluatePolicy evaluates a single policy against a source operation.
// If the source is mutated the policy is short-circuited and `true` is returned.
// If the source is denied, an error will be returned.
//
// For Allow/Deny rules, the last matching rule wins.
// E.g. `ALLOW foo; DENY foo` will deny `foo`, `DENY foo; ALLOW foo` will allow `foo`.
func (e *Engine) evaluatePolicy(ctx context.Context, pol *spb.Policy, srcOp *pb.SourceOp) (retMut bool, retErr error) {
ident := srcOp.GetIdentifier()
ctx = bklog.WithLogger(ctx, bklog.G(ctx).WithField("ref", ident))
defer func() {
if retMut || retErr != nil {
bklog.G(ctx).WithFields(
logrus.Fields{
"mutated": retMut,
"updated": srcOp.GetIdentifier(),
logrus.ErrorKey: retErr,
}).Debug("Evaluated source policy")
}
}()
var deny bool
for _, rule := range pol.Rules {
selector := e.selectorCache(rule.Selector)
matched, err := match(ctx, selector, ident, srcOp.Attrs)
if err != nil {
return false, errors.Wrap(err, "error matching source policy")
}
if !matched {
continue
}
switch rule.Action {
case spb.PolicyAction_ALLOW:
deny = false
case spb.PolicyAction_DENY:
deny = true
case spb.PolicyAction_CONVERT:
mut, err := mutate(ctx, srcOp, rule, selector, ident)
if err != nil || mut {
return mut, errors.Wrap(err, "error mutating source policy")
}
default:
return false, errors.Errorf("source policy: rule %s %s: unknown type %q", rule.Action, rule.Selector.Identifier, ident)
}
}
if deny {
return false, errors.Wrapf(ErrSourceDenied, "source %q denied by policy", ident)
}
return false, nil
}

View File

@ -1,92 +0,0 @@
package sourcepolicy
import (
"regexp"
spb "github.com/moby/buildkit/sourcepolicy/pb"
"github.com/moby/buildkit/util/wildcard"
"github.com/pkg/errors"
)
// Source wraps a a protobuf source in order to store cached state such as the compiled regexes.
type selectorCache struct {
*spb.Selector
re *regexp.Regexp
w *wildcardCache
}
// Format formats the provided ref according to the match/type of the source.
//
// For example, if the source is a wildcard, the ref will be formatted with the wildcard in the source replacing the parameters in the destination.
//
// matcher: wildcard source: "docker.io/library/golang:*" match: "docker.io/library/golang:1.19" format: "docker.io/library/golang:${1}-alpine" result: "docker.io/library/golang:1.19-alpine"
func (s *selectorCache) Format(match, format string) (string, error) {
switch s.MatchType {
case spb.MatchType_EXACT:
return s.Identifier, nil
case spb.MatchType_REGEX:
re, err := s.regex()
if err != nil {
return "", err
}
return re.ReplaceAllString(match, format), nil
case spb.MatchType_WILDCARD:
w, err := s.wildcard()
if err != nil {
return "", err
}
m := w.Match(match)
if m == nil {
return match, nil
}
return m.Format(format)
}
return "", errors.Errorf("unknown match type: %s", s.MatchType)
}
// wildcardCache wraps a wildcard.Wildcard to cache returned matches by ref.
// This way a match only needs to be computed once per ref.
type wildcardCache struct {
w *wildcard.Wildcard
m map[string]*wildcard.Match
}
func (w *wildcardCache) Match(ref string) *wildcard.Match {
if w.m == nil {
w.m = make(map[string]*wildcard.Match)
}
if m, ok := w.m[ref]; ok {
return m
}
m := w.w.Match(ref)
w.m[ref] = m
return m
}
func (s *selectorCache) wildcard() (*wildcardCache, error) {
if s.w != nil {
return s.w, nil
}
w, err := wildcard.New(s.Identifier)
if err != nil {
return nil, err
}
s.w = &wildcardCache{w: w}
return s.w, nil
}
func (s *selectorCache) regex() (*regexp.Regexp, error) {
if s.re != nil {
return s.re, nil
}
re, err := regexp.Compile(s.Identifier)
if err != nil {
return nil, err
}
s.re = re
return re, nil
}

View File

@ -1,61 +0,0 @@
package sourcepolicy
import (
"context"
"regexp"
spb "github.com/moby/buildkit/sourcepolicy/pb"
"github.com/pkg/errors"
)
func match(ctx context.Context, src *selectorCache, ref string, attrs map[string]string) (bool, error) {
for _, c := range src.Constraints {
if c == nil {
return false, errors.Errorf("invalid nil constraint for %v", src)
}
switch c.Condition {
case spb.AttrMatch_EQUAL:
if attrs[c.Key] != c.Value {
return false, nil
}
case spb.AttrMatch_NOTEQUAL:
if attrs[c.Key] == c.Value {
return false, nil
}
case spb.AttrMatch_MATCHES:
// TODO: Cache the compiled regex
matches, err := regexp.MatchString(c.Value, attrs[c.Key])
if err != nil {
return false, errors.Errorf("invalid regex %q: %v", c.Value, err)
}
if !matches {
return false, nil
}
default:
return false, errors.Errorf("unknown attr condition: %s", c.Condition)
}
}
if src.Identifier == ref {
return true, nil
}
switch src.MatchType {
case spb.MatchType_EXACT:
return false, nil
case spb.MatchType_REGEX:
re, err := src.regex()
if err != nil {
return false, err
}
return re.MatchString(ref), nil
case spb.MatchType_WILDCARD:
w, err := src.wildcard()
if err != nil {
return false, err
}
return w.Match(ref) != nil, nil
default:
return false, errors.Errorf("unknown match type: %s", src.MatchType)
}
}

View File

@ -1,50 +0,0 @@
package sourcepolicy
import (
"context"
"github.com/moby/buildkit/solver/pb"
spb "github.com/moby/buildkit/sourcepolicy/pb"
"github.com/moby/buildkit/util/bklog"
"github.com/pkg/errors"
)
// mutate is a MutateFn which converts the source operation to the identifier and attributes provided by the policy.
// If there is no change, then the return value should be false and is not considered an error.
func mutate(ctx context.Context, op *pb.SourceOp, rule *spb.Rule, selector *selectorCache, ref string) (bool, error) {
if rule.Updates == nil {
return false, errors.Errorf("missing destination for convert rule")
}
dest := rule.Updates.Identifier
if dest == "" {
dest = rule.Selector.Identifier
}
dest, err := selector.Format(ref, dest)
if err != nil {
return false, errors.Wrap(err, "error formatting destination")
}
bklog.G(ctx).Debugf("sourcepolicy: converting %s to %s, pattern: %s", ref, dest, rule.Updates.Identifier)
var mutated bool
if op.Identifier != dest && dest != "" {
mutated = true
op.Identifier = dest
}
if rule.Updates.Attrs != nil {
if op.Attrs == nil {
op.Attrs = make(map[string]string, len(rule.Updates.Attrs))
}
for k, v := range rule.Updates.Attrs {
if op.Attrs[k] != v {
bklog.G(ctx).Debugf("setting attr %s=%s", k, v)
op.Attrs[k] = v
mutated = true
}
}
}
return mutated, nil
}

View File

@ -4,7 +4,6 @@ import (
"context"
"encoding/json"
"fmt"
"strings"
"sync"
"time"
@ -16,10 +15,7 @@ import (
"github.com/containerd/containerd/remotes"
"github.com/containerd/containerd/remotes/docker"
intoto "github.com/in-toto/in-toto-golang/in_toto"
"github.com/moby/buildkit/solver/pb"
srctypes "github.com/moby/buildkit/source/types"
"github.com/moby/buildkit/sourcepolicy"
spb "github.com/moby/buildkit/sourcepolicy/pb"
"github.com/moby/buildkit/util/contentutil"
"github.com/moby/buildkit/util/leaseutil"
"github.com/moby/buildkit/util/resolver/limited"
@ -63,8 +59,7 @@ func (e ResolveToNonImageError) Error() string {
return fmt.Sprintf("ref mutated by policy to non-image: %s://%s -> %s", srctypes.DockerImageScheme, e.Ref, e.Updated)
}
func Config(ctx context.Context, str string, resolver remotes.Resolver, cache ContentCache, leaseManager leases.Manager, p *ocispecs.Platform, spls []*spb.Policy) (string, digest.Digest, []byte, error) {
// TODO: fix buildkit to take interface instead of struct
func Config(ctx context.Context, str string, resolver remotes.Resolver, cache ContentCache, leaseManager leases.Manager, p *ocispecs.Platform) (digest.Digest, []byte, error) {
var platform platforms.MatchComparer
if p != nil {
platform = platforms.Only(*p)
@ -73,44 +68,13 @@ func Config(ctx context.Context, str string, resolver remotes.Resolver, cache Co
}
ref, err := reference.Parse(str)
if err != nil {
return "", "", nil, errors.WithStack(err)
}
op := &pb.Op{
Op: &pb.Op_Source{
Source: &pb.SourceOp{
Identifier: srctypes.DockerImageScheme + "://" + ref.String(),
},
},
}
mut, err := sourcepolicy.NewEngine(spls).Evaluate(ctx, op)
if err != nil {
return "", "", nil, errors.Wrap(err, "could not resolve image due to policy")
}
if mut {
var (
t string
ok bool
)
t, newRef, ok := strings.Cut(op.GetSource().GetIdentifier(), "://")
if !ok {
return "", "", nil, errors.Errorf("could not parse ref: %s", op.GetSource().GetIdentifier())
}
if ok && t != srctypes.DockerImageScheme {
return "", "", nil, &ResolveToNonImageError{Ref: str, Updated: newRef}
}
ref, err = reference.Parse(newRef)
if err != nil {
return "", "", nil, errors.WithStack(err)
}
return "", nil, errors.WithStack(err)
}
if leaseManager != nil {
ctx2, done, err := leaseutil.WithLease(ctx, leaseManager, leases.WithExpiration(5*time.Minute), leaseutil.MakeTemporary)
if err != nil {
return "", "", nil, errors.WithStack(err)
return "", nil, errors.WithStack(err)
}
ctx = ctx2
defer func() {
@ -141,18 +105,18 @@ func Config(ctx context.Context, str string, resolver remotes.Resolver, cache Co
if desc.MediaType == "" {
_, desc, err = resolver.Resolve(ctx, ref.String())
if err != nil {
return "", "", nil, err
return "", nil, err
}
}
fetcher, err := resolver.Fetcher(ctx, ref.String())
if err != nil {
return "", "", nil, err
return "", nil, err
}
if desc.MediaType == images.MediaTypeDockerSchema1Manifest {
dgst, dt, err := readSchema1Config(ctx, ref.String(), desc, fetcher, cache)
return ref.String(), dgst, dt, err
return dgst, dt, err
}
children := childrenConfigHandler(cache, platform)
@ -160,7 +124,7 @@ func Config(ctx context.Context, str string, resolver remotes.Resolver, cache Co
dslHandler, err := docker.AppendDistributionSourceLabel(cache, ref.String())
if err != nil {
return "", "", nil, err
return "", nil, err
}
handlers := []images.Handler{
@ -169,19 +133,19 @@ func Config(ctx context.Context, str string, resolver remotes.Resolver, cache Co
children,
}
if err := images.Dispatch(ctx, images.Handlers(handlers...), nil, desc); err != nil {
return "", "", nil, err
return "", nil, err
}
config, err := images.Config(ctx, cache, desc, platform)
if err != nil {
return "", "", nil, err
return "", nil, err
}
dt, err := content.ReadBlob(ctx, cache, config)
if err != nil {
return "", "", nil, err
return "", nil, err
}
return ref.String(), desc.Digest, dt, nil
return desc.Digest, dt, nil
}
func childrenConfigHandler(provider content.Provider, platform platforms.MatchComparer) images.HandlerFunc {

View File

@ -8,7 +8,7 @@ import (
"time"
"github.com/containerd/containerd/remotes"
"github.com/moby/buildkit/exporter/containerimage/image"
dockerspec "github.com/moby/docker-image-spec/specs-go/v1"
digest "github.com/opencontainers/go-digest"
ocispecs "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
@ -45,7 +45,7 @@ func convertSchema1ConfigMeta(in []byte) ([]byte, error) {
return nil, errors.Errorf("invalid schema1 manifest")
}
var img image.Image
var img dockerspec.DockerOCIImage
if err := json.Unmarshal([]byte(m.History[0].V1Compatibility), &img); err != nil {
return nil, errors.Wrap(err, "failed to unmarshal image from schema 1 history")
}

View File

@ -16,12 +16,23 @@ import (
"github.com/containerd/continuity/fs/fstest"
"github.com/pkg/errors"
"github.com/stretchr/testify/require"
"github.com/tonistiigi/fsutil"
"golang.org/x/sync/errgroup"
)
var ErrRequirements = errors.Errorf("missing requirements")
func Tmpdir(t *testing.T, appliers ...fstest.Applier) string {
type TmpDirWithName struct {
fsutil.FS
Name string
}
// This allows TmpDirWithName to continue being used with the `%s` 'verb' on Printf.
func (d *TmpDirWithName) String() string {
return d.Name
}
func Tmpdir(t *testing.T, appliers ...fstest.Applier) *TmpDirWithName {
t.Helper()
// We cannot use t.TempDir() to create a temporary directory here because
@ -38,7 +49,10 @@ func Tmpdir(t *testing.T, appliers ...fstest.Applier) string {
err = fstest.Apply(appliers...).Apply(tmpdir)
require.NoError(t, err)
return tmpdir
mount, err := fsutil.NewFS(tmpdir)
require.NoError(t, err)
return &TmpDirWithName{FS: mount, Name: tmpdir}
}
func RunCmd(cmd *exec.Cmd, logs map[string]*bytes.Buffer) error {

View File

@ -1,87 +0,0 @@
package wildcard
import (
"regexp"
"strings"
"github.com/pkg/errors"
)
// New returns a wildcard object for a string that contains "*" symbols.
func New(s string) (*Wildcard, error) {
reStr, err := Wildcard2Regexp(s)
if err != nil {
return nil, errors.Wrapf(err, "failed to translate wildcard %q to regexp", s)
}
re, err := regexp.Compile(reStr)
if err != nil {
return nil, errors.Wrapf(err, "failed to compile regexp %q (translated from wildcard %q)", reStr, s)
}
w := &Wildcard{
orig: s,
re: re,
}
return w, nil
}
// Wildcard2Regexp translates a wildcard string to a regexp string.
func Wildcard2Regexp(wildcard string) (string, error) {
s := regexp.QuoteMeta(wildcard)
if strings.Contains(s, "\\*\\*") {
return "", errors.New("invalid wildcard: \"**\"")
}
s = strings.ReplaceAll(s, "\\*", "(.*)")
s = "^" + s + "$"
return s, nil
}
// Wildcard is a wildcard matcher object.
type Wildcard struct {
orig string
re *regexp.Regexp
}
// String implements fmt.Stringer.
func (w *Wildcard) String() string {
return w.orig
}
// Match returns a non-nil Match on match.
func (w *Wildcard) Match(q string) *Match {
submatches := w.re.FindStringSubmatch(q)
if len(submatches) == 0 {
return nil
}
m := &Match{
w: w,
Submatches: submatches,
// FIXME: avoid executing regexp twice
idx: w.re.FindStringSubmatchIndex(q),
}
return m
}
// Match is a matched result.
type Match struct {
w *Wildcard
Submatches []string // 0: the entire query, 1: the first submatch, 2: the second submatch, ...
idx []int
}
// String implements fmt.Stringer.
func (m *Match) String() string {
if len(m.Submatches) == 0 {
return ""
}
return m.Submatches[0]
}
// Format formats submatch strings like "$1", "$2".
func (m *Match) Format(f string) (string, error) {
if m.w == nil || len(m.Submatches) == 0 || len(m.idx) == 0 {
return "", errors.New("invalid state")
}
var b []byte
b = m.w.re.ExpandString(b, f, m.Submatches[0], m.idx)
return string(b), nil
}

201
vendor/github.com/moby/docker-image-spec/LICENSE generated vendored Normal file
View File

@ -0,0 +1,201 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -0,0 +1,54 @@
package v1
import (
"time"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
)
const DockerOCIImageMediaType = "application/vnd.docker.container.image.v1+json"
// DockerOCIImage is a ocispec.Image extended with Docker specific Config.
type DockerOCIImage struct {
ocispec.Image
// Shadow ocispec.Image.Config
Config DockerOCIImageConfig `json:"config,omitempty"`
}
// DockerOCIImageConfig is a ocispec.ImageConfig extended with Docker specific fields.
type DockerOCIImageConfig struct {
ocispec.ImageConfig
DockerOCIImageConfigExt
}
// DockerOCIImageConfigExt contains Docker-specific fields in DockerImageConfig.
type DockerOCIImageConfigExt struct {
Healthcheck *HealthcheckConfig `json:",omitempty"` // Healthcheck describes how to check the container is healthy
OnBuild []string `json:",omitempty"` // ONBUILD metadata that were defined on the image Dockerfile
Shell []string `json:",omitempty"` // Shell for shell-form of RUN, CMD, ENTRYPOINT
}
// HealthcheckConfig holds configuration settings for the HEALTHCHECK feature.
type HealthcheckConfig struct {
// Test is the test to perform to check that the container is healthy.
// An empty slice means to inherit the default.
// The options are:
// {} : inherit healthcheck
// {"NONE"} : disable healthcheck
// {"CMD", args...} : exec arguments directly
// {"CMD-SHELL", command} : run command with system's default shell
Test []string `json:",omitempty"`
// Zero means to inherit. Durations are expressed as integer nanoseconds.
Interval time.Duration `json:",omitempty"` // Interval is the time to wait between checks.
Timeout time.Duration `json:",omitempty"` // Timeout is the time to wait before considering the check to have hung.
StartPeriod time.Duration `json:",omitempty"` // The start period for the container to initialize before the retries starts to count down.
StartInterval time.Duration `json:",omitempty"` // The interval to attempt healthchecks at during the start period
// Retries is the number of consecutive failures needed to consider a container as unhealthy.
// Zero means inherit.
Retries int `json:",omitempty"`
}

View File

@ -20,6 +20,7 @@ import (
"time"
dto "github.com/prometheus/client_model/go"
"google.golang.org/protobuf/types/known/timestamppb"
)
// Counter is a Metric that represents a single numerical value that only ever
@ -66,7 +67,7 @@ type CounterVecOpts struct {
CounterOpts
// VariableLabels are used to partition the metric vector by the given set
// of labels. Each label value will be constrained with the optional Contraint
// of labels. Each label value will be constrained with the optional Constraint
// function, if provided.
VariableLabels ConstrainableLabels
}
@ -90,8 +91,12 @@ func NewCounter(opts CounterOpts) Counter {
nil,
opts.ConstLabels,
)
result := &counter{desc: desc, labelPairs: desc.constLabelPairs, now: time.Now}
if opts.now == nil {
opts.now = time.Now
}
result := &counter{desc: desc, labelPairs: desc.constLabelPairs, now: opts.now}
result.init(result) // Init self-collection.
result.createdTs = timestamppb.New(opts.now())
return result
}
@ -106,10 +111,12 @@ type counter struct {
selfCollector
desc *Desc
createdTs *timestamppb.Timestamp
labelPairs []*dto.LabelPair
exemplar atomic.Value // Containing nil or a *dto.Exemplar.
now func() time.Time // To mock out time.Now() for testing.
// now is for testing purposes, by default it's time.Now.
now func() time.Time
}
func (c *counter) Desc() *Desc {
@ -159,8 +166,7 @@ func (c *counter) Write(out *dto.Metric) error {
exemplar = e.(*dto.Exemplar)
}
val := c.get()
return populateMetric(CounterValue, val, c.labelPairs, exemplar, out)
return populateMetric(CounterValue, val, c.labelPairs, exemplar, out, c.createdTs)
}
func (c *counter) updateExemplar(v float64, l Labels) {
@ -200,13 +206,17 @@ func (v2) NewCounterVec(opts CounterVecOpts) *CounterVec {
opts.VariableLabels,
opts.ConstLabels,
)
if opts.now == nil {
opts.now = time.Now
}
return &CounterVec{
MetricVec: NewMetricVec(desc, func(lvs ...string) Metric {
if len(lvs) != len(desc.variableLabels) {
panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.labelNames(), lvs))
if len(lvs) != len(desc.variableLabels.names) {
panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.names, lvs))
}
result := &counter{desc: desc, labelPairs: MakeLabelPairs(desc, lvs), now: time.Now}
result := &counter{desc: desc, labelPairs: MakeLabelPairs(desc, lvs), now: opts.now}
result.init(result) // Init self-collection.
result.createdTs = timestamppb.New(opts.now())
return result
}),
}

View File

@ -52,7 +52,7 @@ type Desc struct {
constLabelPairs []*dto.LabelPair
// variableLabels contains names of labels and normalization function for
// which the metric maintains variable values.
variableLabels ConstrainedLabels
variableLabels *compiledLabels
// id is a hash of the values of the ConstLabels and fqName. This
// must be unique among all registered descriptors and can therefore be
// used as an identifier of the descriptor.
@ -93,7 +93,7 @@ func (v2) NewDesc(fqName, help string, variableLabels ConstrainableLabels, const
d := &Desc{
fqName: fqName,
help: help,
variableLabels: variableLabels.constrainedLabels(),
variableLabels: variableLabels.compile(),
}
if !model.IsValidMetricName(model.LabelValue(fqName)) {
d.err = fmt.Errorf("%q is not a valid metric name", fqName)
@ -103,7 +103,7 @@ func (v2) NewDesc(fqName, help string, variableLabels ConstrainableLabels, const
// their sorted label names) plus the fqName (at position 0).
labelValues := make([]string, 1, len(constLabels)+1)
labelValues[0] = fqName
labelNames := make([]string, 0, len(constLabels)+len(d.variableLabels))
labelNames := make([]string, 0, len(constLabels)+len(d.variableLabels.names))
labelNameSet := map[string]struct{}{}
// First add only the const label names and sort them...
for labelName := range constLabels {
@ -128,13 +128,13 @@ func (v2) NewDesc(fqName, help string, variableLabels ConstrainableLabels, const
// Now add the variable label names, but prefix them with something that
// cannot be in a regular label name. That prevents matching the label
// dimension with a different mix between preset and variable labels.
for _, label := range d.variableLabels {
if !checkLabelName(label.Name) {
d.err = fmt.Errorf("%q is not a valid label name for metric %q", label.Name, fqName)
for _, label := range d.variableLabels.names {
if !checkLabelName(label) {
d.err = fmt.Errorf("%q is not a valid label name for metric %q", label, fqName)
return d
}
labelNames = append(labelNames, "$"+label.Name)
labelNameSet[label.Name] = struct{}{}
labelNames = append(labelNames, "$"+label)
labelNameSet[label] = struct{}{}
}
if len(labelNames) != len(labelNameSet) {
d.err = fmt.Errorf("duplicate label names in constant and variable labels for metric %q", fqName)
@ -189,11 +189,19 @@ func (d *Desc) String() string {
fmt.Sprintf("%s=%q", lp.GetName(), lp.GetValue()),
)
}
vlStrings := make([]string, 0, len(d.variableLabels.names))
for _, vl := range d.variableLabels.names {
if fn, ok := d.variableLabels.labelConstraints[vl]; ok && fn != nil {
vlStrings = append(vlStrings, fmt.Sprintf("c(%s)", vl))
} else {
vlStrings = append(vlStrings, vl)
}
}
return fmt.Sprintf(
"Desc{fqName: %q, help: %q, constLabels: {%s}, variableLabels: %v}",
"Desc{fqName: %q, help: %q, constLabels: {%s}, variableLabels: {%s}}",
d.fqName,
d.help,
strings.Join(lpStrings, ","),
d.variableLabels,
strings.Join(vlStrings, ","),
)
}

View File

@ -48,7 +48,7 @@ func (e *expvarCollector) Collect(ch chan<- Metric) {
continue
}
var v interface{}
labels := make([]string, len(desc.variableLabels))
labels := make([]string, len(desc.variableLabels.names))
if err := json.Unmarshal([]byte(expVar.String()), &v); err != nil {
ch <- NewInvalidMetric(desc, err)
continue

View File

@ -62,7 +62,7 @@ type GaugeVecOpts struct {
GaugeOpts
// VariableLabels are used to partition the metric vector by the given set
// of labels. Each label value will be constrained with the optional Contraint
// of labels. Each label value will be constrained with the optional Constraint
// function, if provided.
VariableLabels ConstrainableLabels
}
@ -135,7 +135,7 @@ func (g *gauge) Sub(val float64) {
func (g *gauge) Write(out *dto.Metric) error {
val := math.Float64frombits(atomic.LoadUint64(&g.valBits))
return populateMetric(GaugeValue, val, g.labelPairs, nil, out)
return populateMetric(GaugeValue, val, g.labelPairs, nil, out, nil)
}
// GaugeVec is a Collector that bundles a set of Gauges that all share the same
@ -166,8 +166,8 @@ func (v2) NewGaugeVec(opts GaugeVecOpts) *GaugeVec {
)
return &GaugeVec{
MetricVec: NewMetricVec(desc, func(lvs ...string) Metric {
if len(lvs) != len(desc.variableLabels) {
panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.labelNames(), lvs))
if len(lvs) != len(desc.variableLabels.names) {
panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.names, lvs))
}
result := &gauge{desc: desc, labelPairs: MakeLabelPairs(desc, lvs)}
result.init(result) // Init self-collection.

View File

@ -25,6 +25,7 @@ import (
dto "github.com/prometheus/client_model/go"
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/types/known/timestamppb"
)
// nativeHistogramBounds for the frac of observed values. Only relevant for
@ -391,7 +392,7 @@ type HistogramOpts struct {
// zero, it is replaced by default buckets. The default buckets are
// DefBuckets if no buckets for a native histogram (see below) are used,
// otherwise the default is no buckets. (In other words, if you want to
// use both reguler buckets and buckets for a native histogram, you have
// use both regular buckets and buckets for a native histogram, you have
// to define the regular buckets here explicitly.)
Buckets []float64
@ -413,8 +414,8 @@ type HistogramOpts struct {
// and 2, same as between 2 and 4, and 4 and 8, etc.).
//
// Details about the actually used factor: The factor is calculated as
// 2^(2^n), where n is an integer number between (and including) -8 and
// 4. n is chosen so that the resulting factor is the largest that is
// 2^(2^-n), where n is an integer number between (and including) -4 and
// 8. n is chosen so that the resulting factor is the largest that is
// still smaller or equal to NativeHistogramBucketFactor. Note that the
// smallest possible factor is therefore approx. 1.00271 (i.e. 2^(2^-8)
// ). If NativeHistogramBucketFactor is greater than 1 but smaller than
@ -428,12 +429,12 @@ type HistogramOpts struct {
// a major version bump.
NativeHistogramBucketFactor float64
// All observations with an absolute value of less or equal
// NativeHistogramZeroThreshold are accumulated into a “zero”
// bucket. For best results, this should be close to a bucket
// boundary. This is usually the case if picking a power of two. If
// NativeHistogramZeroThreshold are accumulated into a “zero” bucket.
// For best results, this should be close to a bucket boundary. This is
// usually the case if picking a power of two. If
// NativeHistogramZeroThreshold is left at zero,
// DefNativeHistogramZeroThreshold is used as the threshold. To configure
// a zero bucket with an actual threshold of zero (i.e. only
// DefNativeHistogramZeroThreshold is used as the threshold. To
// configure a zero bucket with an actual threshold of zero (i.e. only
// observations of precisely zero will go into the zero bucket), set
// NativeHistogramZeroThreshold to the NativeHistogramZeroThresholdZero
// constant (or any negative float value).
@ -446,26 +447,34 @@ type HistogramOpts struct {
// Histogram are sufficiently wide-spread. In particular, this could be
// used as a DoS attack vector. Where the observed values depend on
// external inputs, it is highly recommended to set a
// NativeHistogramMaxBucketNumber.) Once the set
// NativeHistogramMaxBucketNumber.) Once the set
// NativeHistogramMaxBucketNumber is exceeded, the following strategy is
// enacted: First, if the last reset (or the creation) of the histogram
// is at least NativeHistogramMinResetDuration ago, then the whole
// histogram is reset to its initial state (including regular
// buckets). If less time has passed, or if
// NativeHistogramMinResetDuration is zero, no reset is
// performed. Instead, the zero threshold is increased sufficiently to
// reduce the number of buckets to or below
// NativeHistogramMaxBucketNumber, but not to more than
// NativeHistogramMaxZeroThreshold. Thus, if
// NativeHistogramMaxZeroThreshold is already at or below the current
// zero threshold, nothing happens at this step. After that, if the
// number of buckets still exceeds NativeHistogramMaxBucketNumber, the
// resolution of the histogram is reduced by doubling the width of the
// sparse buckets (up to a growth factor between one bucket to the next
// of 2^(2^4) = 65536, see above).
// enacted:
// - First, if the last reset (or the creation) of the histogram is at
// least NativeHistogramMinResetDuration ago, then the whole
// histogram is reset to its initial state (including regular
// buckets).
// - If less time has passed, or if NativeHistogramMinResetDuration is
// zero, no reset is performed. Instead, the zero threshold is
// increased sufficiently to reduce the number of buckets to or below
// NativeHistogramMaxBucketNumber, but not to more than
// NativeHistogramMaxZeroThreshold. Thus, if
// NativeHistogramMaxZeroThreshold is already at or below the current
// zero threshold, nothing happens at this step.
// - After that, if the number of buckets still exceeds
// NativeHistogramMaxBucketNumber, the resolution of the histogram is
// reduced by doubling the width of the sparse buckets (up to a
// growth factor between one bucket to the next of 2^(2^4) = 65536,
// see above).
// - Any increased zero threshold or reduced resolution is reset back
// to their original values once NativeHistogramMinResetDuration has
// passed (since the last reset or the creation of the histogram).
NativeHistogramMaxBucketNumber uint32
NativeHistogramMinResetDuration time.Duration
NativeHistogramMaxZeroThreshold float64
// now is for testing purposes, by default it's time.Now.
now func() time.Time
}
// HistogramVecOpts bundles the options to create a HistogramVec metric.
@ -475,7 +484,7 @@ type HistogramVecOpts struct {
HistogramOpts
// VariableLabels are used to partition the metric vector by the given set
// of labels. Each label value will be constrained with the optional Contraint
// of labels. Each label value will be constrained with the optional Constraint
// function, if provided.
VariableLabels ConstrainableLabels
}
@ -499,12 +508,12 @@ func NewHistogram(opts HistogramOpts) Histogram {
}
func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogram {
if len(desc.variableLabels) != len(labelValues) {
panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.labelNames(), labelValues))
if len(desc.variableLabels.names) != len(labelValues) {
panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.names, labelValues))
}
for _, n := range desc.variableLabels {
if n.Name == bucketLabel {
for _, n := range desc.variableLabels.names {
if n == bucketLabel {
panic(errBucketLabelNotAllowed)
}
}
@ -514,6 +523,10 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr
}
}
if opts.now == nil {
opts.now = time.Now
}
h := &histogram{
desc: desc,
upperBounds: opts.Buckets,
@ -521,8 +534,8 @@ func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogr
nativeHistogramMaxBuckets: opts.NativeHistogramMaxBucketNumber,
nativeHistogramMaxZeroThreshold: opts.NativeHistogramMaxZeroThreshold,
nativeHistogramMinResetDuration: opts.NativeHistogramMinResetDuration,
lastResetTime: time.Now(),
now: time.Now,
lastResetTime: opts.now(),
now: opts.now,
}
if len(h.upperBounds) == 0 && opts.NativeHistogramBucketFactor <= 1 {
h.upperBounds = DefBuckets
@ -701,9 +714,11 @@ type histogram struct {
nativeHistogramMaxZeroThreshold float64
nativeHistogramMaxBuckets uint32
nativeHistogramMinResetDuration time.Duration
lastResetTime time.Time // Protected by mtx.
// lastResetTime is protected by mtx. It is also used as created timestamp.
lastResetTime time.Time
now func() time.Time // To mock out time.Now() for testing.
// now is for testing purposes, by default it's time.Now.
now func() time.Time
}
func (h *histogram) Desc() *Desc {
@ -742,9 +757,10 @@ func (h *histogram) Write(out *dto.Metric) error {
waitForCooldown(count, coldCounts)
his := &dto.Histogram{
Bucket: make([]*dto.Bucket, len(h.upperBounds)),
SampleCount: proto.Uint64(count),
SampleSum: proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))),
Bucket: make([]*dto.Bucket, len(h.upperBounds)),
SampleCount: proto.Uint64(count),
SampleSum: proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))),
CreatedTimestamp: timestamppb.New(h.lastResetTime),
}
out.Histogram = his
out.Label = h.labelPairs
@ -782,6 +798,16 @@ func (h *histogram) Write(out *dto.Metric) error {
his.ZeroCount = proto.Uint64(zeroBucket)
his.NegativeSpan, his.NegativeDelta = makeBuckets(&coldCounts.nativeHistogramBucketsNegative)
his.PositiveSpan, his.PositiveDelta = makeBuckets(&coldCounts.nativeHistogramBucketsPositive)
// Add a no-op span to a histogram without observations and with
// a zero threshold of zero. Otherwise, a native histogram would
// look like a classic histogram to scrapers.
if *his.ZeroThreshold == 0 && *his.ZeroCount == 0 && len(his.PositiveSpan) == 0 && len(his.NegativeSpan) == 0 {
his.PositiveSpan = []*dto.BucketSpan{{
Offset: proto.Int32(0),
Length: proto.Uint32(0),
}}
}
}
addAndResetCounts(hotCounts, coldCounts)
return nil
@ -854,20 +880,23 @@ func (h *histogram) limitBuckets(counts *histogramCounts, value float64, bucket
h.doubleBucketWidth(hotCounts, coldCounts)
}
// maybeReset resests the whole histogram if at least h.nativeHistogramMinResetDuration
// maybeReset resets the whole histogram if at least h.nativeHistogramMinResetDuration
// has been passed. It returns true if the histogram has been reset. The caller
// must have locked h.mtx.
func (h *histogram) maybeReset(hot, cold *histogramCounts, coldIdx uint64, value float64, bucket int) bool {
func (h *histogram) maybeReset(
hot, cold *histogramCounts, coldIdx uint64, value float64, bucket int,
) bool {
// We are using the possibly mocked h.now() rather than
// time.Since(h.lastResetTime) to enable testing.
if h.nativeHistogramMinResetDuration == 0 || h.now().Sub(h.lastResetTime) < h.nativeHistogramMinResetDuration {
if h.nativeHistogramMinResetDuration == 0 ||
h.now().Sub(h.lastResetTime) < h.nativeHistogramMinResetDuration {
return false
}
// Completely reset coldCounts.
h.resetCounts(cold)
// Repeat the latest observation to not lose it completely.
cold.observe(value, bucket, true)
// Make coldCounts the new hot counts while ressetting countAndHotIdx.
// Make coldCounts the new hot counts while resetting countAndHotIdx.
n := atomic.SwapUint64(&h.countAndHotIdx, (coldIdx<<63)+1)
count := n & ((1 << 63) - 1)
waitForCooldown(count, hot)
@ -1176,6 +1205,7 @@ type constHistogram struct {
sum float64
buckets map[float64]uint64
labelPairs []*dto.LabelPair
createdTs *timestamppb.Timestamp
}
func (h *constHistogram) Desc() *Desc {
@ -1183,7 +1213,9 @@ func (h *constHistogram) Desc() *Desc {
}
func (h *constHistogram) Write(out *dto.Metric) error {
his := &dto.Histogram{}
his := &dto.Histogram{
CreatedTimestamp: h.createdTs,
}
buckets := make([]*dto.Bucket, 0, len(h.buckets))
@ -1230,7 +1262,7 @@ func NewConstHistogram(
if desc.err != nil {
return nil, desc.err
}
if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil {
if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil {
return nil, err
}
return &constHistogram{
@ -1324,7 +1356,7 @@ func makeBuckets(buckets *sync.Map) ([]*dto.BucketSpan, []int64) {
// Multiple spans with only small gaps in between are probably
// encoded more efficiently as one larger span with a few empty
// buckets. Needs some research to find the sweet spot. For now,
// we assume that gaps of one ore two buckets should not create
// we assume that gaps of one or two buckets should not create
// a new span.
iDelta := int32(i - nextI)
if n == 0 || iDelta > 2 {

View File

@ -14,7 +14,7 @@
// It provides tools to compare sequences of strings and generate textual diffs.
//
// Maintaining `GetUnifiedDiffString` here because original repository
// (https://github.com/pmezard/go-difflib) is no loger maintained.
// (https://github.com/pmezard/go-difflib) is no longer maintained.
package internal
import (

View File

@ -32,19 +32,15 @@ import (
// create a Desc.
type Labels map[string]string
// LabelConstraint normalizes label values.
type LabelConstraint func(string) string
// ConstrainedLabels represents a label name and its constrain function
// to normalize label values. This type is commonly used when constructing
// metric vector Collectors.
type ConstrainedLabel struct {
Name string
Constraint func(string) string
}
func (cl ConstrainedLabel) Constrain(v string) string {
if cl.Constraint == nil {
return v
}
return cl.Constraint(v)
Constraint LabelConstraint
}
// ConstrainableLabels is an interface that allows creating of labels that can
@ -58,7 +54,7 @@ func (cl ConstrainedLabel) Constrain(v string) string {
// },
// })
type ConstrainableLabels interface {
constrainedLabels() ConstrainedLabels
compile() *compiledLabels
labelNames() []string
}
@ -67,8 +63,20 @@ type ConstrainableLabels interface {
// metric vector Collectors.
type ConstrainedLabels []ConstrainedLabel
func (cls ConstrainedLabels) constrainedLabels() ConstrainedLabels {
return cls
func (cls ConstrainedLabels) compile() *compiledLabels {
compiled := &compiledLabels{
names: make([]string, len(cls)),
labelConstraints: map[string]LabelConstraint{},
}
for i, label := range cls {
compiled.names[i] = label.Name
if label.Constraint != nil {
compiled.labelConstraints[label.Name] = label.Constraint
}
}
return compiled
}
func (cls ConstrainedLabels) labelNames() []string {
@ -92,18 +100,36 @@ func (cls ConstrainedLabels) labelNames() []string {
// }
type UnconstrainedLabels []string
func (uls UnconstrainedLabels) constrainedLabels() ConstrainedLabels {
constrainedLabels := make([]ConstrainedLabel, len(uls))
for i, l := range uls {
constrainedLabels[i] = ConstrainedLabel{Name: l}
func (uls UnconstrainedLabels) compile() *compiledLabels {
return &compiledLabels{
names: uls,
}
return constrainedLabels
}
func (uls UnconstrainedLabels) labelNames() []string {
return uls
}
type compiledLabels struct {
names []string
labelConstraints map[string]LabelConstraint
}
func (cls *compiledLabels) compile() *compiledLabels {
return cls
}
func (cls *compiledLabels) labelNames() []string {
return cls.names
}
func (cls *compiledLabels) constrain(labelName, value string) string {
if fn, ok := cls.labelConstraints[labelName]; ok && fn != nil {
return fn(value)
}
return value
}
// reservedLabelPrefix is a prefix which is not legal in user-supplied
// label names.
const reservedLabelPrefix = "__"

View File

@ -92,6 +92,9 @@ type Opts struct {
// machine_role metric). See also
// https://prometheus.io/docs/instrumenting/writing_exporters/#target-labels-not-static-scraped-labels
ConstLabels Labels
// now is for testing purposes, by default it's time.Now.
now func() time.Time
}
// BuildFQName joins the given three name components by "_". Empty name

View File

@ -389,16 +389,13 @@ func isLabelCurried(c prometheus.Collector, label string) bool {
return true
}
// emptyLabels is a one-time allocation for non-partitioned metrics to avoid
// unnecessary allocations on each request.
var emptyLabels = prometheus.Labels{}
func labels(code, method bool, reqMethod string, status int, extraMethods ...string) prometheus.Labels {
if !(code || method) {
return emptyLabels
}
labels := prometheus.Labels{}
if !(code || method) {
return labels
}
if code {
labels["code"] = sanitizeCode(status)
}

View File

@ -548,7 +548,7 @@ func (r *Registry) Gather() ([]*dto.MetricFamily, error) {
goroutineBudget--
runtime.Gosched()
}
// Once both checkedMetricChan and uncheckdMetricChan are closed
// Once both checkedMetricChan and uncheckedMetricChan are closed
// and drained, the contraption above will nil out cmc and umc,
// and then we can leave the collect loop here.
if cmc == nil && umc == nil {
@ -963,9 +963,9 @@ func checkDescConsistency(
// Is the desc consistent with the content of the metric?
lpsFromDesc := make([]*dto.LabelPair, len(desc.constLabelPairs), len(dtoMetric.Label))
copy(lpsFromDesc, desc.constLabelPairs)
for _, l := range desc.variableLabels {
for _, l := range desc.variableLabels.names {
lpsFromDesc = append(lpsFromDesc, &dto.LabelPair{
Name: proto.String(l.Name),
Name: proto.String(l),
})
}
if len(lpsFromDesc) != len(dtoMetric.Label) {

View File

@ -26,6 +26,7 @@ import (
"github.com/beorn7/perks/quantile"
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/types/known/timestamppb"
)
// quantileLabel is used for the label that defines the quantile in a
@ -145,6 +146,9 @@ type SummaryOpts struct {
// is the internal buffer size of the underlying package
// "github.com/bmizerany/perks/quantile").
BufCap uint32
// now is for testing purposes, by default it's time.Now.
now func() time.Time
}
// SummaryVecOpts bundles the options to create a SummaryVec metric.
@ -154,7 +158,7 @@ type SummaryVecOpts struct {
SummaryOpts
// VariableLabels are used to partition the metric vector by the given set
// of labels. Each label value will be constrained with the optional Contraint
// of labels. Each label value will be constrained with the optional Constraint
// function, if provided.
VariableLabels ConstrainableLabels
}
@ -188,12 +192,12 @@ func NewSummary(opts SummaryOpts) Summary {
}
func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary {
if len(desc.variableLabels) != len(labelValues) {
panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.labelNames(), labelValues))
if len(desc.variableLabels.names) != len(labelValues) {
panic(makeInconsistentCardinalityError(desc.fqName, desc.variableLabels.names, labelValues))
}
for _, n := range desc.variableLabels {
if n.Name == quantileLabel {
for _, n := range desc.variableLabels.names {
if n == quantileLabel {
panic(errQuantileLabelNotAllowed)
}
}
@ -222,6 +226,9 @@ func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary {
opts.BufCap = DefBufCap
}
if opts.now == nil {
opts.now = time.Now
}
if len(opts.Objectives) == 0 {
// Use the lock-free implementation of a Summary without objectives.
s := &noObjectivesSummary{
@ -230,6 +237,7 @@ func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary {
counts: [2]*summaryCounts{{}, {}},
}
s.init(s) // Init self-collection.
s.createdTs = timestamppb.New(opts.now())
return s
}
@ -245,7 +253,7 @@ func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary {
coldBuf: make([]float64, 0, opts.BufCap),
streamDuration: opts.MaxAge / time.Duration(opts.AgeBuckets),
}
s.headStreamExpTime = time.Now().Add(s.streamDuration)
s.headStreamExpTime = opts.now().Add(s.streamDuration)
s.hotBufExpTime = s.headStreamExpTime
for i := uint32(0); i < opts.AgeBuckets; i++ {
@ -259,6 +267,7 @@ func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary {
sort.Float64s(s.sortedObjectives)
s.init(s) // Init self-collection.
s.createdTs = timestamppb.New(opts.now())
return s
}
@ -286,6 +295,8 @@ type summary struct {
headStream *quantile.Stream
headStreamIdx int
headStreamExpTime, hotBufExpTime time.Time
createdTs *timestamppb.Timestamp
}
func (s *summary) Desc() *Desc {
@ -307,7 +318,9 @@ func (s *summary) Observe(v float64) {
}
func (s *summary) Write(out *dto.Metric) error {
sum := &dto.Summary{}
sum := &dto.Summary{
CreatedTimestamp: s.createdTs,
}
qs := make([]*dto.Quantile, 0, len(s.objectives))
s.bufMtx.Lock()
@ -440,6 +453,8 @@ type noObjectivesSummary struct {
counts [2]*summaryCounts
labelPairs []*dto.LabelPair
createdTs *timestamppb.Timestamp
}
func (s *noObjectivesSummary) Desc() *Desc {
@ -490,8 +505,9 @@ func (s *noObjectivesSummary) Write(out *dto.Metric) error {
}
sum := &dto.Summary{
SampleCount: proto.Uint64(count),
SampleSum: proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))),
SampleCount: proto.Uint64(count),
SampleSum: proto.Float64(math.Float64frombits(atomic.LoadUint64(&coldCounts.sumBits))),
CreatedTimestamp: s.createdTs,
}
out.Summary = sum
@ -681,6 +697,7 @@ type constSummary struct {
sum float64
quantiles map[float64]float64
labelPairs []*dto.LabelPair
createdTs *timestamppb.Timestamp
}
func (s *constSummary) Desc() *Desc {
@ -688,7 +705,9 @@ func (s *constSummary) Desc() *Desc {
}
func (s *constSummary) Write(out *dto.Metric) error {
sum := &dto.Summary{}
sum := &dto.Summary{
CreatedTimestamp: s.createdTs,
}
qs := make([]*dto.Quantile, 0, len(s.quantiles))
sum.SampleCount = proto.Uint64(s.count)
@ -737,7 +756,7 @@ func NewConstSummary(
if desc.err != nil {
return nil, desc.err
}
if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil {
if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil {
return nil, err
}
return &constSummary{

View File

@ -14,6 +14,7 @@
package prometheus
import (
"errors"
"fmt"
"sort"
"time"
@ -91,7 +92,7 @@ func (v *valueFunc) Desc() *Desc {
}
func (v *valueFunc) Write(out *dto.Metric) error {
return populateMetric(v.valType, v.function(), v.labelPairs, nil, out)
return populateMetric(v.valType, v.function(), v.labelPairs, nil, out, nil)
}
// NewConstMetric returns a metric with one fixed value that cannot be
@ -105,12 +106,12 @@ func NewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues
if desc.err != nil {
return nil, desc.err
}
if err := validateLabelValues(labelValues, len(desc.variableLabels)); err != nil {
if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil {
return nil, err
}
metric := &dto.Metric{}
if err := populateMetric(valueType, value, MakeLabelPairs(desc, labelValues), nil, metric); err != nil {
if err := populateMetric(valueType, value, MakeLabelPairs(desc, labelValues), nil, metric, nil); err != nil {
return nil, err
}
@ -130,6 +131,43 @@ func MustNewConstMetric(desc *Desc, valueType ValueType, value float64, labelVal
return m
}
// NewConstMetricWithCreatedTimestamp does the same thing as NewConstMetric, but generates Counters
// with created timestamp set and returns an error for other metric types.
func NewConstMetricWithCreatedTimestamp(desc *Desc, valueType ValueType, value float64, ct time.Time, labelValues ...string) (Metric, error) {
if desc.err != nil {
return nil, desc.err
}
if err := validateLabelValues(labelValues, len(desc.variableLabels.names)); err != nil {
return nil, err
}
switch valueType {
case CounterValue:
break
default:
return nil, errors.New("created timestamps are only supported for counters")
}
metric := &dto.Metric{}
if err := populateMetric(valueType, value, MakeLabelPairs(desc, labelValues), nil, metric, timestamppb.New(ct)); err != nil {
return nil, err
}
return &constMetric{
desc: desc,
metric: metric,
}, nil
}
// MustNewConstMetricWithCreatedTimestamp is a version of NewConstMetricWithCreatedTimestamp that panics where
// NewConstMetricWithCreatedTimestamp would have returned an error.
func MustNewConstMetricWithCreatedTimestamp(desc *Desc, valueType ValueType, value float64, ct time.Time, labelValues ...string) Metric {
m, err := NewConstMetricWithCreatedTimestamp(desc, valueType, value, ct, labelValues...)
if err != nil {
panic(err)
}
return m
}
type constMetric struct {
desc *Desc
metric *dto.Metric
@ -153,11 +191,12 @@ func populateMetric(
labelPairs []*dto.LabelPair,
e *dto.Exemplar,
m *dto.Metric,
ct *timestamppb.Timestamp,
) error {
m.Label = labelPairs
switch t {
case CounterValue:
m.Counter = &dto.Counter{Value: proto.Float64(v), Exemplar: e}
m.Counter = &dto.Counter{Value: proto.Float64(v), Exemplar: e, CreatedTimestamp: ct}
case GaugeValue:
m.Gauge = &dto.Gauge{Value: proto.Float64(v)}
case UntypedValue:
@ -176,19 +215,19 @@ func populateMetric(
// This function is only needed for custom Metric implementations. See MetricVec
// example.
func MakeLabelPairs(desc *Desc, labelValues []string) []*dto.LabelPair {
totalLen := len(desc.variableLabels) + len(desc.constLabelPairs)
totalLen := len(desc.variableLabels.names) + len(desc.constLabelPairs)
if totalLen == 0 {
// Super fast path.
return nil
}
if len(desc.variableLabels) == 0 {
if len(desc.variableLabels.names) == 0 {
// Moderately fast path.
return desc.constLabelPairs
}
labelPairs := make([]*dto.LabelPair, 0, totalLen)
for i, l := range desc.variableLabels {
for i, l := range desc.variableLabels.names {
labelPairs = append(labelPairs, &dto.LabelPair{
Name: proto.String(l.Name),
Name: proto.String(l),
Value: proto.String(labelValues[i]),
})
}

View File

@ -20,24 +20,6 @@ import (
"github.com/prometheus/common/model"
)
var labelsPool = &sync.Pool{
New: func() interface{} {
return make(Labels)
},
}
func getLabelsFromPool() Labels {
return labelsPool.Get().(Labels)
}
func putLabelsToPool(labels Labels) {
for k := range labels {
delete(labels, k)
}
labelsPool.Put(labels)
}
// MetricVec is a Collector to bundle metrics of the same name that differ in
// their label values. MetricVec is not used directly but as a building block
// for implementations of vectors of a given metric type, like GaugeVec,
@ -91,6 +73,7 @@ func NewMetricVec(desc *Desc, newMetric func(lvs ...string) Metric) *MetricVec {
// See also the CounterVec example.
func (m *MetricVec) DeleteLabelValues(lvs ...string) bool {
lvs = constrainLabelValues(m.desc, lvs, m.curry)
h, err := m.hashLabelValues(lvs)
if err != nil {
return false
@ -110,8 +93,8 @@ func (m *MetricVec) DeleteLabelValues(lvs ...string) bool {
// This method is used for the same purpose as DeleteLabelValues(...string). See
// there for pros and cons of the two methods.
func (m *MetricVec) Delete(labels Labels) bool {
labels = constrainLabels(m.desc, labels)
defer putLabelsToPool(labels)
labels, closer := constrainLabels(m.desc, labels)
defer closer()
h, err := m.hashLabels(labels)
if err != nil {
@ -128,8 +111,8 @@ func (m *MetricVec) Delete(labels Labels) bool {
// Note that curried labels will never be matched if deleting from the curried vector.
// To match curried labels with DeletePartialMatch, it must be called on the base vector.
func (m *MetricVec) DeletePartialMatch(labels Labels) int {
labels = constrainLabels(m.desc, labels)
defer putLabelsToPool(labels)
labels, closer := constrainLabels(m.desc, labels)
defer closer()
return m.metricMap.deleteByLabels(labels, m.curry)
}
@ -169,11 +152,11 @@ func (m *MetricVec) CurryWith(labels Labels) (*MetricVec, error) {
oldCurry = m.curry
iCurry int
)
for i, label := range m.desc.variableLabels {
val, ok := labels[label.Name]
for i, labelName := range m.desc.variableLabels.names {
val, ok := labels[labelName]
if iCurry < len(oldCurry) && oldCurry[iCurry].index == i {
if ok {
return nil, fmt.Errorf("label name %q is already curried", label.Name)
return nil, fmt.Errorf("label name %q is already curried", labelName)
}
newCurry = append(newCurry, oldCurry[iCurry])
iCurry++
@ -181,7 +164,10 @@ func (m *MetricVec) CurryWith(labels Labels) (*MetricVec, error) {
if !ok {
continue // Label stays uncurried.
}
newCurry = append(newCurry, curriedLabelValue{i, label.Constrain(val)})
newCurry = append(newCurry, curriedLabelValue{
i,
m.desc.variableLabels.constrain(labelName, val),
})
}
}
if l := len(oldCurry) + len(labels) - len(newCurry); l > 0 {
@ -250,8 +236,8 @@ func (m *MetricVec) GetMetricWithLabelValues(lvs ...string) (Metric, error) {
// around MetricVec, implementing a vector for a specific Metric implementation,
// for example GaugeVec.
func (m *MetricVec) GetMetricWith(labels Labels) (Metric, error) {
labels = constrainLabels(m.desc, labels)
defer putLabelsToPool(labels)
labels, closer := constrainLabels(m.desc, labels)
defer closer()
h, err := m.hashLabels(labels)
if err != nil {
@ -262,7 +248,7 @@ func (m *MetricVec) GetMetricWith(labels Labels) (Metric, error) {
}
func (m *MetricVec) hashLabelValues(vals []string) (uint64, error) {
if err := validateLabelValues(vals, len(m.desc.variableLabels)-len(m.curry)); err != nil {
if err := validateLabelValues(vals, len(m.desc.variableLabels.names)-len(m.curry)); err != nil {
return 0, err
}
@ -271,7 +257,7 @@ func (m *MetricVec) hashLabelValues(vals []string) (uint64, error) {
curry = m.curry
iVals, iCurry int
)
for i := 0; i < len(m.desc.variableLabels); i++ {
for i := 0; i < len(m.desc.variableLabels.names); i++ {
if iCurry < len(curry) && curry[iCurry].index == i {
h = m.hashAdd(h, curry[iCurry].value)
iCurry++
@ -285,7 +271,7 @@ func (m *MetricVec) hashLabelValues(vals []string) (uint64, error) {
}
func (m *MetricVec) hashLabels(labels Labels) (uint64, error) {
if err := validateValuesInLabels(labels, len(m.desc.variableLabels)-len(m.curry)); err != nil {
if err := validateValuesInLabels(labels, len(m.desc.variableLabels.names)-len(m.curry)); err != nil {
return 0, err
}
@ -294,17 +280,17 @@ func (m *MetricVec) hashLabels(labels Labels) (uint64, error) {
curry = m.curry
iCurry int
)
for i, label := range m.desc.variableLabels {
val, ok := labels[label.Name]
for i, labelName := range m.desc.variableLabels.names {
val, ok := labels[labelName]
if iCurry < len(curry) && curry[iCurry].index == i {
if ok {
return 0, fmt.Errorf("label name %q is already curried", label.Name)
return 0, fmt.Errorf("label name %q is already curried", labelName)
}
h = m.hashAdd(h, curry[iCurry].value)
iCurry++
} else {
if !ok {
return 0, fmt.Errorf("label name %q missing in label map", label.Name)
return 0, fmt.Errorf("label name %q missing in label map", labelName)
}
h = m.hashAdd(h, val)
}
@ -482,7 +468,7 @@ func valueMatchesVariableOrCurriedValue(targetValue string, index int, values []
func matchPartialLabels(desc *Desc, values []string, labels Labels, curry []curriedLabelValue) bool {
for l, v := range labels {
// Check if the target label exists in our metrics and get the index.
varLabelIndex, validLabel := indexOf(l, desc.variableLabels.labelNames())
varLabelIndex, validLabel := indexOf(l, desc.variableLabels.names)
if validLabel {
// Check the value of that label against the target value.
// We don't consider curried values in partial matches.
@ -626,7 +612,7 @@ func matchLabels(desc *Desc, values []string, labels Labels, curry []curriedLabe
return false
}
iCurry := 0
for i, k := range desc.variableLabels {
for i, k := range desc.variableLabels.names {
if iCurry < len(curry) && curry[iCurry].index == i {
if values[i] != curry[iCurry].value {
return false
@ -634,7 +620,7 @@ func matchLabels(desc *Desc, values []string, labels Labels, curry []curriedLabe
iCurry++
continue
}
if values[i] != labels[k.Name] {
if values[i] != labels[k] {
return false
}
}
@ -644,13 +630,13 @@ func matchLabels(desc *Desc, values []string, labels Labels, curry []curriedLabe
func extractLabelValues(desc *Desc, labels Labels, curry []curriedLabelValue) []string {
labelValues := make([]string, len(labels)+len(curry))
iCurry := 0
for i, k := range desc.variableLabels {
for i, k := range desc.variableLabels.names {
if iCurry < len(curry) && curry[iCurry].index == i {
labelValues[i] = curry[iCurry].value
iCurry++
continue
}
labelValues[i] = labels[k.Name]
labelValues[i] = labels[k]
}
return labelValues
}
@ -670,20 +656,37 @@ func inlineLabelValues(lvs []string, curry []curriedLabelValue) []string {
return labelValues
}
func constrainLabels(desc *Desc, labels Labels) Labels {
constrainedLabels := getLabelsFromPool()
for l, v := range labels {
if i, ok := indexOf(l, desc.variableLabels.labelNames()); ok {
v = desc.variableLabels[i].Constrain(v)
}
var labelsPool = &sync.Pool{
New: func() interface{} {
return make(Labels)
},
}
constrainedLabels[l] = v
func constrainLabels(desc *Desc, labels Labels) (Labels, func()) {
if len(desc.variableLabels.labelConstraints) == 0 {
// Fast path when there's no constraints
return labels, func() {}
}
return constrainedLabels
constrainedLabels := labelsPool.Get().(Labels)
for l, v := range labels {
constrainedLabels[l] = desc.variableLabels.constrain(l, v)
}
return constrainedLabels, func() {
for k := range constrainedLabels {
delete(constrainedLabels, k)
}
labelsPool.Put(constrainedLabels)
}
}
func constrainLabelValues(desc *Desc, lvs []string, curry []curriedLabelValue) []string {
if len(desc.variableLabels.labelConstraints) == 0 {
// Fast path when there's no constraints
return lvs
}
constrainedValues := make([]string, len(lvs))
var iCurry, iLVs int
for i := 0; i < len(lvs)+len(curry); i++ {
@ -692,8 +695,11 @@ func constrainLabelValues(desc *Desc, lvs []string, curry []curriedLabelValue) [
continue
}
if i < len(desc.variableLabels) {
constrainedValues[iLVs] = desc.variableLabels[i].Constrain(lvs[iLVs])
if i < len(desc.variableLabels.names) {
constrainedValues[iLVs] = desc.variableLabels.constrain(
desc.variableLabels.names[i],
lvs[iLVs],
)
} else {
constrainedValues[iLVs] = lvs[iLVs]
}

View File

@ -215,8 +215,9 @@ type Counter struct {
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
Exemplar *Exemplar `protobuf:"bytes,2,opt,name=exemplar" json:"exemplar,omitempty"`
Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"`
Exemplar *Exemplar `protobuf:"bytes,2,opt,name=exemplar" json:"exemplar,omitempty"`
CreatedTimestamp *timestamppb.Timestamp `protobuf:"bytes,3,opt,name=created_timestamp,json=createdTimestamp" json:"created_timestamp,omitempty"`
}
func (x *Counter) Reset() {
@ -265,6 +266,13 @@ func (x *Counter) GetExemplar() *Exemplar {
return nil
}
func (x *Counter) GetCreatedTimestamp() *timestamppb.Timestamp {
if x != nil {
return x.CreatedTimestamp
}
return nil
}
type Quantile struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
@ -325,9 +333,10 @@ type Summary struct {
sizeCache protoimpl.SizeCache
unknownFields protoimpl.UnknownFields
SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount" json:"sample_count,omitempty"`
SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"`
Quantile []*Quantile `protobuf:"bytes,3,rep,name=quantile" json:"quantile,omitempty"`
SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count,json=sampleCount" json:"sample_count,omitempty"`
SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"`
Quantile []*Quantile `protobuf:"bytes,3,rep,name=quantile" json:"quantile,omitempty"`
CreatedTimestamp *timestamppb.Timestamp `protobuf:"bytes,4,opt,name=created_timestamp,json=createdTimestamp" json:"created_timestamp,omitempty"`
}
func (x *Summary) Reset() {
@ -383,6 +392,13 @@ func (x *Summary) GetQuantile() []*Quantile {
return nil
}
func (x *Summary) GetCreatedTimestamp() *timestamppb.Timestamp {
if x != nil {
return x.CreatedTimestamp
}
return nil
}
type Untyped struct {
state protoimpl.MessageState
sizeCache protoimpl.SizeCache
@ -439,7 +455,8 @@ type Histogram struct {
SampleCountFloat *float64 `protobuf:"fixed64,4,opt,name=sample_count_float,json=sampleCountFloat" json:"sample_count_float,omitempty"` // Overrides sample_count if > 0.
SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum,json=sampleSum" json:"sample_sum,omitempty"`
// Buckets for the conventional histogram.
Bucket []*Bucket `protobuf:"bytes,3,rep,name=bucket" json:"bucket,omitempty"` // Ordered in increasing order of upper_bound, +Inf bucket is optional.
Bucket []*Bucket `protobuf:"bytes,3,rep,name=bucket" json:"bucket,omitempty"` // Ordered in increasing order of upper_bound, +Inf bucket is optional.
CreatedTimestamp *timestamppb.Timestamp `protobuf:"bytes,15,opt,name=created_timestamp,json=createdTimestamp" json:"created_timestamp,omitempty"`
// schema defines the bucket schema. Currently, valid numbers are -4 <= n <= 8.
// They are all for base-2 bucket schemas, where 1 is a bucket boundary in each case, and
// then each power of two is divided into 2^n logarithmic buckets.
@ -457,6 +474,9 @@ type Histogram struct {
NegativeDelta []int64 `protobuf:"zigzag64,10,rep,name=negative_delta,json=negativeDelta" json:"negative_delta,omitempty"` // Count delta of each bucket compared to previous one (or to zero for 1st bucket).
NegativeCount []float64 `protobuf:"fixed64,11,rep,name=negative_count,json=negativeCount" json:"negative_count,omitempty"` // Absolute count of each bucket.
// Positive buckets for the native histogram.
// Use a no-op span (offset 0, length 0) for a native histogram without any
// observations yet and with a zero_threshold of 0. Otherwise, it would be
// indistinguishable from a classic histogram.
PositiveSpan []*BucketSpan `protobuf:"bytes,12,rep,name=positive_span,json=positiveSpan" json:"positive_span,omitempty"`
// Use either "positive_delta" or "positive_count", the former for
// regular histograms with integer counts, the latter for float
@ -525,6 +545,13 @@ func (x *Histogram) GetBucket() []*Bucket {
return nil
}
func (x *Histogram) GetCreatedTimestamp() *timestamppb.Timestamp {
if x != nil {
return x.CreatedTimestamp
}
return nil
}
func (x *Histogram) GetSchema() int32 {
if x != nil && x.Schema != nil {
return *x.Schema
@ -972,137 +999,151 @@ var file_io_prometheus_client_metrics_proto_rawDesc = []byte{
0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x05, 0x76, 0x61, 0x6c,
0x75, 0x65, 0x22, 0x1d, 0x0a, 0x05, 0x47, 0x61, 0x75, 0x67, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76,
0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75,
0x65, 0x22, 0x5b, 0x0a, 0x07, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x12, 0x14, 0x0a, 0x05,
0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c,
0x75, 0x65, 0x12, 0x3a, 0x0a, 0x08, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x18, 0x02,
0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74,
0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x78, 0x65, 0x6d,
0x70, 0x6c, 0x61, 0x72, 0x52, 0x08, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x22, 0x3c,
0x0a, 0x08, 0x51, 0x75, 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x71, 0x75,
0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x08, 0x71, 0x75,
0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18,
0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0x87, 0x01, 0x0a,
0x07, 0x53, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x61, 0x6d, 0x70,
0x65, 0x22, 0xa4, 0x01, 0x0a, 0x07, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x12, 0x14, 0x0a,
0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61,
0x6c, 0x75, 0x65, 0x12, 0x3a, 0x0a, 0x08, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x18,
0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65,
0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x45, 0x78, 0x65,
0x6d, 0x70, 0x6c, 0x61, 0x72, 0x52, 0x08, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x12,
0x47, 0x0a, 0x11, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73,
0x74, 0x61, 0x6d, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f,
0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d,
0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x10, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x54,
0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x22, 0x3c, 0x0a, 0x08, 0x51, 0x75, 0x61, 0x6e,
0x74, 0x69, 0x6c, 0x65, 0x12, 0x1a, 0x0a, 0x08, 0x71, 0x75, 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65,
0x18, 0x01, 0x20, 0x01, 0x28, 0x01, 0x52, 0x08, 0x71, 0x75, 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65,
0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52,
0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xd0, 0x01, 0x0a, 0x07, 0x53, 0x75, 0x6d, 0x6d, 0x61,
0x72, 0x79, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x75,
0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65,
0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x5f,
0x73, 0x75, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x09, 0x73, 0x61, 0x6d, 0x70, 0x6c,
0x65, 0x53, 0x75, 0x6d, 0x12, 0x3a, 0x0a, 0x08, 0x71, 0x75, 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65,
0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d,
0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x51, 0x75,
0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, 0x52, 0x08, 0x71, 0x75, 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65,
0x12, 0x47, 0x0a, 0x11, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65,
0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f,
0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69,
0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x10, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64,
0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x22, 0x1f, 0x0a, 0x07, 0x55, 0x6e, 0x74,
0x79, 0x70, 0x65, 0x64, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20,
0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xac, 0x05, 0x0a, 0x09, 0x48,
0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x61, 0x6d, 0x70,
0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b,
0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73,
0x61, 0x6d, 0x70, 0x6c, 0x65, 0x5f, 0x73, 0x75, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52,
0x09, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x53, 0x75, 0x6d, 0x12, 0x3a, 0x0a, 0x08, 0x71, 0x75,
0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x69,
0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69,
0x65, 0x6e, 0x74, 0x2e, 0x51, 0x75, 0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, 0x52, 0x08, 0x71, 0x75,
0x61, 0x6e, 0x74, 0x69, 0x6c, 0x65, 0x22, 0x1f, 0x0a, 0x07, 0x55, 0x6e, 0x74, 0x79, 0x70, 0x65,
0x64, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x01,
0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x22, 0xe3, 0x04, 0x0a, 0x09, 0x48, 0x69, 0x73, 0x74,
0x6f, 0x67, 0x72, 0x61, 0x6d, 0x12, 0x21, 0x0a, 0x0c, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x5f,
0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0b, 0x73, 0x61, 0x6d,
0x70, 0x6c, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x2c, 0x0a, 0x12, 0x73, 0x61, 0x6d, 0x70,
0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x66, 0x6c, 0x6f, 0x61, 0x74, 0x18, 0x04,
0x20, 0x01, 0x28, 0x01, 0x52, 0x10, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x43, 0x6f, 0x75, 0x6e,
0x74, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65,
0x5f, 0x73, 0x75, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x09, 0x73, 0x61, 0x6d, 0x70,
0x6c, 0x65, 0x53, 0x75, 0x6d, 0x12, 0x34, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x18,
0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65,
0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x42, 0x75, 0x63,
0x6b, 0x65, 0x74, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x73,
0x63, 0x68, 0x65, 0x6d, 0x61, 0x18, 0x05, 0x20, 0x01, 0x28, 0x11, 0x52, 0x06, 0x73, 0x63, 0x68,
0x65, 0x6d, 0x61, 0x12, 0x25, 0x0a, 0x0e, 0x7a, 0x65, 0x72, 0x6f, 0x5f, 0x74, 0x68, 0x72, 0x65,
0x73, 0x68, 0x6f, 0x6c, 0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0d, 0x7a, 0x65, 0x72,
0x6f, 0x54, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x7a, 0x65,
0x72, 0x6f, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09,
0x7a, 0x65, 0x72, 0x6f, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x28, 0x0a, 0x10, 0x7a, 0x65, 0x72,
0x6f, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x66, 0x6c, 0x6f, 0x61, 0x74, 0x18, 0x08, 0x20,
0x01, 0x28, 0x01, 0x52, 0x0e, 0x7a, 0x65, 0x72, 0x6f, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x46, 0x6c,
0x6f, 0x61, 0x74, 0x12, 0x45, 0x0a, 0x0d, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f,
0x73, 0x70, 0x61, 0x6e, 0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x69, 0x6f, 0x2e,
0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e,
0x74, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x53, 0x70, 0x61, 0x6e, 0x52, 0x0c, 0x6e, 0x65,
0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x12, 0x25, 0x0a, 0x0e, 0x6e, 0x65,
0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x64, 0x65, 0x6c, 0x74, 0x61, 0x18, 0x0a, 0x20, 0x03,
0x28, 0x12, 0x52, 0x0d, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x44, 0x65, 0x6c, 0x74,
0x61, 0x12, 0x25, 0x0a, 0x0e, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x6f,
0x75, 0x6e, 0x74, 0x18, 0x0b, 0x20, 0x03, 0x28, 0x01, 0x52, 0x0d, 0x6e, 0x65, 0x67, 0x61, 0x74,
0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x45, 0x0a, 0x0d, 0x70, 0x6f, 0x73, 0x69,
0x74, 0x69, 0x76, 0x65, 0x5f, 0x73, 0x70, 0x61, 0x6e, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32,
0x20, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e,
0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x53, 0x70, 0x61,
0x6e, 0x52, 0x0c, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x12,
0x25, 0x0a, 0x0e, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x64, 0x65, 0x6c, 0x74,
0x61, 0x18, 0x0d, 0x20, 0x03, 0x28, 0x12, 0x52, 0x0d, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76,
0x65, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69,
0x76, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x0e, 0x20, 0x03, 0x28, 0x01, 0x52, 0x0d,
0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0xc6, 0x01,
0x0a, 0x06, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x75, 0x6d, 0x75,
0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01,
0x28, 0x04, 0x52, 0x0f, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f,
0x75, 0x6e, 0x74, 0x12, 0x34, 0x0a, 0x16, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76,
0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x66, 0x6c, 0x6f, 0x61, 0x74, 0x18, 0x04, 0x20,
0x01, 0x28, 0x01, 0x52, 0x14, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x43,
0x6f, 0x75, 0x6e, 0x74, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x75, 0x70, 0x70,
0x65, 0x72, 0x5f, 0x62, 0x6f, 0x75, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0a,
0x75, 0x70, 0x70, 0x65, 0x72, 0x42, 0x6f, 0x75, 0x6e, 0x64, 0x12, 0x3a, 0x0a, 0x08, 0x65, 0x78,
0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x69,
0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69,
0x65, 0x6e, 0x74, 0x2e, 0x45, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x52, 0x08, 0x65, 0x78,
0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x22, 0x3c, 0x0a, 0x0a, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74,
0x53, 0x70, 0x61, 0x6e, 0x12, 0x16, 0x0a, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x01,
0x20, 0x01, 0x28, 0x11, 0x52, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x16, 0x0a, 0x06,
0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x6c, 0x65,
0x6e, 0x67, 0x74, 0x68, 0x22, 0x91, 0x01, 0x0a, 0x08, 0x45, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61,
0x72, 0x12, 0x35, 0x0a, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b,
0x32, 0x1f, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73,
0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x50, 0x61, 0x69,
0x72, 0x52, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75,
0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x38,
0x0a, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28,
0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f,
0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x74,
0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x22, 0xff, 0x02, 0x0a, 0x06, 0x4d, 0x65, 0x74,
0x72, 0x69, 0x63, 0x12, 0x35, 0x0a, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x01, 0x20, 0x03,
0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65,
0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x50,
0x61, 0x69, 0x72, 0x52, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x31, 0x0a, 0x05, 0x67, 0x61,
0x75, 0x67, 0x65, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x69, 0x6f, 0x2e, 0x70,
0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74,
0x2e, 0x47, 0x61, 0x75, 0x67, 0x65, 0x52, 0x05, 0x67, 0x61, 0x75, 0x67, 0x65, 0x12, 0x37, 0x0a,
0x07, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d,
0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63,
0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x52, 0x07, 0x63,
0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x12, 0x37, 0x0a, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72,
0x79, 0x18, 0x04, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f,
0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x53,
0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x52, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12,
0x37, 0x0a, 0x07, 0x75, 0x6e, 0x74, 0x79, 0x70, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b,
0x32, 0x1d, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73,
0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x55, 0x6e, 0x74, 0x79, 0x70, 0x65, 0x64, 0x52,
0x07, 0x75, 0x6e, 0x74, 0x79, 0x70, 0x65, 0x64, 0x12, 0x3d, 0x0a, 0x09, 0x68, 0x69, 0x73, 0x74,
0x6f, 0x67, 0x72, 0x61, 0x6d, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x69, 0x6f,
0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x2c, 0x0a, 0x12, 0x73,
0x61, 0x6d, 0x70, 0x6c, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x5f, 0x66, 0x6c, 0x6f, 0x61,
0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x01, 0x52, 0x10, 0x73, 0x61, 0x6d, 0x70, 0x6c, 0x65, 0x43,
0x6f, 0x75, 0x6e, 0x74, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x12, 0x1d, 0x0a, 0x0a, 0x73, 0x61, 0x6d,
0x70, 0x6c, 0x65, 0x5f, 0x73, 0x75, 0x6d, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x09, 0x73,
0x61, 0x6d, 0x70, 0x6c, 0x65, 0x53, 0x75, 0x6d, 0x12, 0x34, 0x0a, 0x06, 0x62, 0x75, 0x63, 0x6b,
0x65, 0x74, 0x18, 0x03, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72,
0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e,
0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x52, 0x06, 0x62, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x12, 0x47,
0x0a, 0x11, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x5f, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74,
0x61, 0x6d, 0x70, 0x18, 0x0f, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e, 0x67, 0x6f, 0x6f, 0x67,
0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e, 0x54, 0x69, 0x6d, 0x65,
0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x10, 0x63, 0x72, 0x65, 0x61, 0x74, 0x65, 0x64, 0x54, 0x69,
0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x12, 0x16, 0x0a, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d,
0x61, 0x18, 0x05, 0x20, 0x01, 0x28, 0x11, 0x52, 0x06, 0x73, 0x63, 0x68, 0x65, 0x6d, 0x61, 0x12,
0x25, 0x0a, 0x0e, 0x7a, 0x65, 0x72, 0x6f, 0x5f, 0x74, 0x68, 0x72, 0x65, 0x73, 0x68, 0x6f, 0x6c,
0x64, 0x18, 0x06, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0d, 0x7a, 0x65, 0x72, 0x6f, 0x54, 0x68, 0x72,
0x65, 0x73, 0x68, 0x6f, 0x6c, 0x64, 0x12, 0x1d, 0x0a, 0x0a, 0x7a, 0x65, 0x72, 0x6f, 0x5f, 0x63,
0x6f, 0x75, 0x6e, 0x74, 0x18, 0x07, 0x20, 0x01, 0x28, 0x04, 0x52, 0x09, 0x7a, 0x65, 0x72, 0x6f,
0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12, 0x28, 0x0a, 0x10, 0x7a, 0x65, 0x72, 0x6f, 0x5f, 0x63, 0x6f,
0x75, 0x6e, 0x74, 0x5f, 0x66, 0x6c, 0x6f, 0x61, 0x74, 0x18, 0x08, 0x20, 0x01, 0x28, 0x01, 0x52,
0x0e, 0x7a, 0x65, 0x72, 0x6f, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x46, 0x6c, 0x6f, 0x61, 0x74, 0x12,
0x45, 0x0a, 0x0d, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x73, 0x70, 0x61, 0x6e,
0x18, 0x09, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d,
0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x42, 0x75,
0x63, 0x6b, 0x65, 0x74, 0x53, 0x70, 0x61, 0x6e, 0x52, 0x0c, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69,
0x76, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x12, 0x25, 0x0a, 0x0e, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69,
0x76, 0x65, 0x5f, 0x64, 0x65, 0x6c, 0x74, 0x61, 0x18, 0x0a, 0x20, 0x03, 0x28, 0x12, 0x52, 0x0d,
0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x44, 0x65, 0x6c, 0x74, 0x61, 0x12, 0x25, 0x0a,
0x0e, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18,
0x0b, 0x20, 0x03, 0x28, 0x01, 0x52, 0x0d, 0x6e, 0x65, 0x67, 0x61, 0x74, 0x69, 0x76, 0x65, 0x43,
0x6f, 0x75, 0x6e, 0x74, 0x12, 0x45, 0x0a, 0x0d, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65,
0x5f, 0x73, 0x70, 0x61, 0x6e, 0x18, 0x0c, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x20, 0x2e, 0x69, 0x6f,
0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65,
0x6e, 0x74, 0x2e, 0x48, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x52, 0x09, 0x68, 0x69,
0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x12, 0x21, 0x0a, 0x0c, 0x74, 0x69, 0x6d, 0x65, 0x73,
0x74, 0x61, 0x6d, 0x70, 0x5f, 0x6d, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x74,
0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x4d, 0x73, 0x22, 0xa2, 0x01, 0x0a, 0x0c, 0x4d,
0x65, 0x74, 0x72, 0x69, 0x63, 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x6e,
0x61, 0x6d, 0x65, 0x18, 0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12,
0x12, 0x0a, 0x04, 0x68, 0x65, 0x6c, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x68,
0x65, 0x6c, 0x70, 0x12, 0x34, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28,
0x0e, 0x32, 0x20, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75,
0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x54,
0x79, 0x70, 0x65, 0x52, 0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x34, 0x0a, 0x06, 0x6d, 0x65, 0x74,
0x72, 0x69, 0x63, 0x18, 0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x69, 0x6f, 0x2e, 0x70,
0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74,
0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x2a,
0x62, 0x0a, 0x0a, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0b, 0x0a,
0x07, 0x43, 0x4f, 0x55, 0x4e, 0x54, 0x45, 0x52, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x47, 0x41,
0x55, 0x47, 0x45, 0x10, 0x01, 0x12, 0x0b, 0x0a, 0x07, 0x53, 0x55, 0x4d, 0x4d, 0x41, 0x52, 0x59,
0x10, 0x02, 0x12, 0x0b, 0x0a, 0x07, 0x55, 0x4e, 0x54, 0x59, 0x50, 0x45, 0x44, 0x10, 0x03, 0x12,
0x0d, 0x0a, 0x09, 0x48, 0x49, 0x53, 0x54, 0x4f, 0x47, 0x52, 0x41, 0x4d, 0x10, 0x04, 0x12, 0x13,
0x0a, 0x0f, 0x47, 0x41, 0x55, 0x47, 0x45, 0x5f, 0x48, 0x49, 0x53, 0x54, 0x4f, 0x47, 0x52, 0x41,
0x4d, 0x10, 0x05, 0x42, 0x52, 0x0a, 0x14, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74,
0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5a, 0x3a, 0x67, 0x69, 0x74,
0x68, 0x75, 0x62, 0x2e, 0x63, 0x6f, 0x6d, 0x2f, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65,
0x75, 0x73, 0x2f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x2f,
0x67, 0x6f, 0x3b, 0x69, 0x6f, 0x5f, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73,
0x5f, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74,
0x6e, 0x74, 0x2e, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x53, 0x70, 0x61, 0x6e, 0x52, 0x0c, 0x70,
0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x53, 0x70, 0x61, 0x6e, 0x12, 0x25, 0x0a, 0x0e, 0x70,
0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x64, 0x65, 0x6c, 0x74, 0x61, 0x18, 0x0d, 0x20,
0x03, 0x28, 0x12, 0x52, 0x0d, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x44, 0x65, 0x6c,
0x74, 0x61, 0x12, 0x25, 0x0a, 0x0e, 0x70, 0x6f, 0x73, 0x69, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63,
0x6f, 0x75, 0x6e, 0x74, 0x18, 0x0e, 0x20, 0x03, 0x28, 0x01, 0x52, 0x0d, 0x70, 0x6f, 0x73, 0x69,
0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x22, 0xc6, 0x01, 0x0a, 0x06, 0x42, 0x75,
0x63, 0x6b, 0x65, 0x74, 0x12, 0x29, 0x0a, 0x10, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69,
0x76, 0x65, 0x5f, 0x63, 0x6f, 0x75, 0x6e, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x04, 0x52, 0x0f,
0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x12,
0x34, 0x0a, 0x16, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x5f, 0x63, 0x6f,
0x75, 0x6e, 0x74, 0x5f, 0x66, 0x6c, 0x6f, 0x61, 0x74, 0x18, 0x04, 0x20, 0x01, 0x28, 0x01, 0x52,
0x14, 0x63, 0x75, 0x6d, 0x75, 0x6c, 0x61, 0x74, 0x69, 0x76, 0x65, 0x43, 0x6f, 0x75, 0x6e, 0x74,
0x46, 0x6c, 0x6f, 0x61, 0x74, 0x12, 0x1f, 0x0a, 0x0b, 0x75, 0x70, 0x70, 0x65, 0x72, 0x5f, 0x62,
0x6f, 0x75, 0x6e, 0x64, 0x18, 0x02, 0x20, 0x01, 0x28, 0x01, 0x52, 0x0a, 0x75, 0x70, 0x70, 0x65,
0x72, 0x42, 0x6f, 0x75, 0x6e, 0x64, 0x12, 0x3a, 0x0a, 0x08, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c,
0x61, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1e, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72,
0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e,
0x45, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x52, 0x08, 0x65, 0x78, 0x65, 0x6d, 0x70, 0x6c,
0x61, 0x72, 0x22, 0x3c, 0x0a, 0x0a, 0x42, 0x75, 0x63, 0x6b, 0x65, 0x74, 0x53, 0x70, 0x61, 0x6e,
0x12, 0x16, 0x0a, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x18, 0x01, 0x20, 0x01, 0x28, 0x11,
0x52, 0x06, 0x6f, 0x66, 0x66, 0x73, 0x65, 0x74, 0x12, 0x16, 0x0a, 0x06, 0x6c, 0x65, 0x6e, 0x67,
0x74, 0x68, 0x18, 0x02, 0x20, 0x01, 0x28, 0x0d, 0x52, 0x06, 0x6c, 0x65, 0x6e, 0x67, 0x74, 0x68,
0x22, 0x91, 0x01, 0x0a, 0x08, 0x45, 0x78, 0x65, 0x6d, 0x70, 0x6c, 0x61, 0x72, 0x12, 0x35, 0x0a,
0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x69,
0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69,
0x65, 0x6e, 0x74, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x50, 0x61, 0x69, 0x72, 0x52, 0x05, 0x6c,
0x61, 0x62, 0x65, 0x6c, 0x12, 0x14, 0x0a, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x18, 0x02, 0x20,
0x01, 0x28, 0x01, 0x52, 0x05, 0x76, 0x61, 0x6c, 0x75, 0x65, 0x12, 0x38, 0x0a, 0x09, 0x74, 0x69,
0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1a, 0x2e,
0x67, 0x6f, 0x6f, 0x67, 0x6c, 0x65, 0x2e, 0x70, 0x72, 0x6f, 0x74, 0x6f, 0x62, 0x75, 0x66, 0x2e,
0x54, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70, 0x52, 0x09, 0x74, 0x69, 0x6d, 0x65, 0x73,
0x74, 0x61, 0x6d, 0x70, 0x22, 0xff, 0x02, 0x0a, 0x06, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x12,
0x35, 0x0a, 0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x18, 0x01, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1f,
0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63,
0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x4c, 0x61, 0x62, 0x65, 0x6c, 0x50, 0x61, 0x69, 0x72, 0x52,
0x05, 0x6c, 0x61, 0x62, 0x65, 0x6c, 0x12, 0x31, 0x0a, 0x05, 0x67, 0x61, 0x75, 0x67, 0x65, 0x18,
0x02, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1b, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65,
0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x47, 0x61, 0x75,
0x67, 0x65, 0x52, 0x05, 0x67, 0x61, 0x75, 0x67, 0x65, 0x12, 0x37, 0x0a, 0x07, 0x63, 0x6f, 0x75,
0x6e, 0x74, 0x65, 0x72, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x69, 0x6f, 0x2e,
0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e,
0x74, 0x2e, 0x43, 0x6f, 0x75, 0x6e, 0x74, 0x65, 0x72, 0x52, 0x07, 0x63, 0x6f, 0x75, 0x6e, 0x74,
0x65, 0x72, 0x12, 0x37, 0x0a, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x18, 0x04, 0x20,
0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68,
0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x53, 0x75, 0x6d, 0x6d, 0x61,
0x72, 0x79, 0x52, 0x07, 0x73, 0x75, 0x6d, 0x6d, 0x61, 0x72, 0x79, 0x12, 0x37, 0x0a, 0x07, 0x75,
0x6e, 0x74, 0x79, 0x70, 0x65, 0x64, 0x18, 0x05, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1d, 0x2e, 0x69,
0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69,
0x65, 0x6e, 0x74, 0x2e, 0x55, 0x6e, 0x74, 0x79, 0x70, 0x65, 0x64, 0x52, 0x07, 0x75, 0x6e, 0x74,
0x79, 0x70, 0x65, 0x64, 0x12, 0x3d, 0x0a, 0x09, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61,
0x6d, 0x18, 0x07, 0x20, 0x01, 0x28, 0x0b, 0x32, 0x1f, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f,
0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x48,
0x69, 0x73, 0x74, 0x6f, 0x67, 0x72, 0x61, 0x6d, 0x52, 0x09, 0x68, 0x69, 0x73, 0x74, 0x6f, 0x67,
0x72, 0x61, 0x6d, 0x12, 0x21, 0x0a, 0x0c, 0x74, 0x69, 0x6d, 0x65, 0x73, 0x74, 0x61, 0x6d, 0x70,
0x5f, 0x6d, 0x73, 0x18, 0x06, 0x20, 0x01, 0x28, 0x03, 0x52, 0x0b, 0x74, 0x69, 0x6d, 0x65, 0x73,
0x74, 0x61, 0x6d, 0x70, 0x4d, 0x73, 0x22, 0xa2, 0x01, 0x0a, 0x0c, 0x4d, 0x65, 0x74, 0x72, 0x69,
0x63, 0x46, 0x61, 0x6d, 0x69, 0x6c, 0x79, 0x12, 0x12, 0x0a, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x18,
0x01, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x6e, 0x61, 0x6d, 0x65, 0x12, 0x12, 0x0a, 0x04, 0x68,
0x65, 0x6c, 0x70, 0x18, 0x02, 0x20, 0x01, 0x28, 0x09, 0x52, 0x04, 0x68, 0x65, 0x6c, 0x70, 0x12,
0x34, 0x0a, 0x04, 0x74, 0x79, 0x70, 0x65, 0x18, 0x03, 0x20, 0x01, 0x28, 0x0e, 0x32, 0x20, 0x2e,
0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c,
0x69, 0x65, 0x6e, 0x74, 0x2e, 0x4d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x54, 0x79, 0x70, 0x65, 0x52,
0x04, 0x74, 0x79, 0x70, 0x65, 0x12, 0x34, 0x0a, 0x06, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x18,
0x04, 0x20, 0x03, 0x28, 0x0b, 0x32, 0x1c, 0x2e, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65,
0x74, 0x68, 0x65, 0x75, 0x73, 0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x2e, 0x4d, 0x65, 0x74,
0x72, 0x69, 0x63, 0x52, 0x06, 0x6d, 0x65, 0x74, 0x72, 0x69, 0x63, 0x2a, 0x62, 0x0a, 0x0a, 0x4d,
0x65, 0x74, 0x72, 0x69, 0x63, 0x54, 0x79, 0x70, 0x65, 0x12, 0x0b, 0x0a, 0x07, 0x43, 0x4f, 0x55,
0x4e, 0x54, 0x45, 0x52, 0x10, 0x00, 0x12, 0x09, 0x0a, 0x05, 0x47, 0x41, 0x55, 0x47, 0x45, 0x10,
0x01, 0x12, 0x0b, 0x0a, 0x07, 0x53, 0x55, 0x4d, 0x4d, 0x41, 0x52, 0x59, 0x10, 0x02, 0x12, 0x0b,
0x0a, 0x07, 0x55, 0x4e, 0x54, 0x59, 0x50, 0x45, 0x44, 0x10, 0x03, 0x12, 0x0d, 0x0a, 0x09, 0x48,
0x49, 0x53, 0x54, 0x4f, 0x47, 0x52, 0x41, 0x4d, 0x10, 0x04, 0x12, 0x13, 0x0a, 0x0f, 0x47, 0x41,
0x55, 0x47, 0x45, 0x5f, 0x48, 0x49, 0x53, 0x54, 0x4f, 0x47, 0x52, 0x41, 0x4d, 0x10, 0x05, 0x42,
0x52, 0x0a, 0x14, 0x69, 0x6f, 0x2e, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73,
0x2e, 0x63, 0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5a, 0x3a, 0x67, 0x69, 0x74, 0x68, 0x75, 0x62, 0x2e,
0x63, 0x6f, 0x6d, 0x2f, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x2f, 0x63,
0x6c, 0x69, 0x65, 0x6e, 0x74, 0x5f, 0x6d, 0x6f, 0x64, 0x65, 0x6c, 0x2f, 0x67, 0x6f, 0x3b, 0x69,
0x6f, 0x5f, 0x70, 0x72, 0x6f, 0x6d, 0x65, 0x74, 0x68, 0x65, 0x75, 0x73, 0x5f, 0x63, 0x6c, 0x69,
0x65, 0x6e, 0x74,
}
var (
@ -1137,26 +1178,29 @@ var file_io_prometheus_client_metrics_proto_goTypes = []interface{}{
}
var file_io_prometheus_client_metrics_proto_depIdxs = []int32{
10, // 0: io.prometheus.client.Counter.exemplar:type_name -> io.prometheus.client.Exemplar
4, // 1: io.prometheus.client.Summary.quantile:type_name -> io.prometheus.client.Quantile
8, // 2: io.prometheus.client.Histogram.bucket:type_name -> io.prometheus.client.Bucket
9, // 3: io.prometheus.client.Histogram.negative_span:type_name -> io.prometheus.client.BucketSpan
9, // 4: io.prometheus.client.Histogram.positive_span:type_name -> io.prometheus.client.BucketSpan
10, // 5: io.prometheus.client.Bucket.exemplar:type_name -> io.prometheus.client.Exemplar
1, // 6: io.prometheus.client.Exemplar.label:type_name -> io.prometheus.client.LabelPair
13, // 7: io.prometheus.client.Exemplar.timestamp:type_name -> google.protobuf.Timestamp
1, // 8: io.prometheus.client.Metric.label:type_name -> io.prometheus.client.LabelPair
2, // 9: io.prometheus.client.Metric.gauge:type_name -> io.prometheus.client.Gauge
3, // 10: io.prometheus.client.Metric.counter:type_name -> io.prometheus.client.Counter
5, // 11: io.prometheus.client.Metric.summary:type_name -> io.prometheus.client.Summary
6, // 12: io.prometheus.client.Metric.untyped:type_name -> io.prometheus.client.Untyped
7, // 13: io.prometheus.client.Metric.histogram:type_name -> io.prometheus.client.Histogram
0, // 14: io.prometheus.client.MetricFamily.type:type_name -> io.prometheus.client.MetricType
11, // 15: io.prometheus.client.MetricFamily.metric:type_name -> io.prometheus.client.Metric
16, // [16:16] is the sub-list for method output_type
16, // [16:16] is the sub-list for method input_type
16, // [16:16] is the sub-list for extension type_name
16, // [16:16] is the sub-list for extension extendee
0, // [0:16] is the sub-list for field type_name
13, // 1: io.prometheus.client.Counter.created_timestamp:type_name -> google.protobuf.Timestamp
4, // 2: io.prometheus.client.Summary.quantile:type_name -> io.prometheus.client.Quantile
13, // 3: io.prometheus.client.Summary.created_timestamp:type_name -> google.protobuf.Timestamp
8, // 4: io.prometheus.client.Histogram.bucket:type_name -> io.prometheus.client.Bucket
13, // 5: io.prometheus.client.Histogram.created_timestamp:type_name -> google.protobuf.Timestamp
9, // 6: io.prometheus.client.Histogram.negative_span:type_name -> io.prometheus.client.BucketSpan
9, // 7: io.prometheus.client.Histogram.positive_span:type_name -> io.prometheus.client.BucketSpan
10, // 8: io.prometheus.client.Bucket.exemplar:type_name -> io.prometheus.client.Exemplar
1, // 9: io.prometheus.client.Exemplar.label:type_name -> io.prometheus.client.LabelPair
13, // 10: io.prometheus.client.Exemplar.timestamp:type_name -> google.protobuf.Timestamp
1, // 11: io.prometheus.client.Metric.label:type_name -> io.prometheus.client.LabelPair
2, // 12: io.prometheus.client.Metric.gauge:type_name -> io.prometheus.client.Gauge
3, // 13: io.prometheus.client.Metric.counter:type_name -> io.prometheus.client.Counter
5, // 14: io.prometheus.client.Metric.summary:type_name -> io.prometheus.client.Summary
6, // 15: io.prometheus.client.Metric.untyped:type_name -> io.prometheus.client.Untyped
7, // 16: io.prometheus.client.Metric.histogram:type_name -> io.prometheus.client.Histogram
0, // 17: io.prometheus.client.MetricFamily.type:type_name -> io.prometheus.client.MetricType
11, // 18: io.prometheus.client.MetricFamily.metric:type_name -> io.prometheus.client.Metric
19, // [19:19] is the sub-list for method output_type
19, // [19:19] is the sub-list for method input_type
19, // [19:19] is the sub-list for extension type_name
19, // [19:19] is the sub-list for extension extendee
0, // [0:19] is the sub-list for field type_name
}
func init() { file_io_prometheus_client_metrics_proto_init() }

View File

@ -132,7 +132,10 @@ func (d *textDecoder) Decode(v *dto.MetricFamily) error {
}
// Pick off one MetricFamily per Decode until there's nothing left.
for key, fam := range d.fams {
*v = *fam
v.Name = fam.Name
v.Help = fam.Help
v.Type = fam.Type
v.Metric = fam.Metric
delete(d.fams, key)
return nil
}

View File

@ -18,9 +18,9 @@ import (
"io"
"net/http"
"github.com/golang/protobuf/proto" //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
"github.com/matttproud/golang_protobuf_extensions/pbutil"
"github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg"
"google.golang.org/protobuf/encoding/prototext"
dto "github.com/prometheus/client_model/go"
)
@ -99,8 +99,11 @@ func NegotiateIncludingOpenMetrics(h http.Header) Format {
if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") {
return FmtText
}
if ac.Type+"/"+ac.SubType == OpenMetricsType && (ver == OpenMetricsVersion || ver == "") {
return FmtOpenMetrics
if ac.Type+"/"+ac.SubType == OpenMetricsType && (ver == OpenMetricsVersion_0_0_1 || ver == OpenMetricsVersion_1_0_0 || ver == "") {
if ver == OpenMetricsVersion_1_0_0 {
return FmtOpenMetrics_1_0_0
}
return FmtOpenMetrics_0_0_1
}
}
return FmtText
@ -133,7 +136,7 @@ func NewEncoder(w io.Writer, format Format) Encoder {
case FmtProtoText:
return encoderCloser{
encode: func(v *dto.MetricFamily) error {
_, err := fmt.Fprintln(w, proto.MarshalTextString(v))
_, err := fmt.Fprintln(w, prototext.Format(v))
return err
},
close: func() error { return nil },
@ -146,7 +149,7 @@ func NewEncoder(w io.Writer, format Format) Encoder {
},
close: func() error { return nil },
}
case FmtOpenMetrics:
case FmtOpenMetrics_0_0_1, FmtOpenMetrics_1_0_0:
return encoderCloser{
encode: func(v *dto.MetricFamily) error {
_, err := MetricFamilyToOpenMetrics(w, v)

View File

@ -19,20 +19,22 @@ type Format string
// Constants to assemble the Content-Type values for the different wire protocols.
const (
TextVersion = "0.0.4"
ProtoType = `application/vnd.google.protobuf`
ProtoProtocol = `io.prometheus.client.MetricFamily`
ProtoFmt = ProtoType + "; proto=" + ProtoProtocol + ";"
OpenMetricsType = `application/openmetrics-text`
OpenMetricsVersion = "0.0.1"
TextVersion = "0.0.4"
ProtoType = `application/vnd.google.protobuf`
ProtoProtocol = `io.prometheus.client.MetricFamily`
ProtoFmt = ProtoType + "; proto=" + ProtoProtocol + ";"
OpenMetricsType = `application/openmetrics-text`
OpenMetricsVersion_0_0_1 = "0.0.1"
OpenMetricsVersion_1_0_0 = "1.0.0"
// The Content-Type values for the different wire protocols.
FmtUnknown Format = `<unknown>`
FmtText Format = `text/plain; version=` + TextVersion + `; charset=utf-8`
FmtProtoDelim Format = ProtoFmt + ` encoding=delimited`
FmtProtoText Format = ProtoFmt + ` encoding=text`
FmtProtoCompact Format = ProtoFmt + ` encoding=compact-text`
FmtOpenMetrics Format = OpenMetricsType + `; version=` + OpenMetricsVersion + `; charset=utf-8`
FmtUnknown Format = `<unknown>`
FmtText Format = `text/plain; version=` + TextVersion + `; charset=utf-8`
FmtProtoDelim Format = ProtoFmt + ` encoding=delimited`
FmtProtoText Format = ProtoFmt + ` encoding=text`
FmtProtoCompact Format = ProtoFmt + ` encoding=compact-text`
FmtOpenMetrics_1_0_0 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_1_0_0 + `; charset=utf-8`
FmtOpenMetrics_0_0_1 Format = OpenMetricsType + `; version=` + OpenMetricsVersion_0_0_1 + `; charset=utf-8`
)
const (

View File

@ -24,8 +24,8 @@ import (
dto "github.com/prometheus/client_model/go"
"github.com/golang/protobuf/proto" //nolint:staticcheck // Ignore SA1019. Need to keep deprecated package for compatibility.
"github.com/prometheus/common/model"
"google.golang.org/protobuf/proto"
)
// A stateFn is a function that represents a state in a state machine. By

View File

@ -2,6 +2,7 @@
linters:
enable:
- godot
- misspell
- revive
linter-settings:
@ -10,3 +11,5 @@ linter-settings:
exclude:
# Ignore "See: URL"
- 'See:'
misspell:
locale: US

View File

@ -49,19 +49,19 @@ endif
GOTEST := $(GO) test
GOTEST_DIR :=
ifneq ($(CIRCLE_JOB),)
ifneq ($(shell which gotestsum),)
ifneq ($(shell command -v gotestsum > /dev/null),)
GOTEST_DIR := test-results
GOTEST := gotestsum --junitfile $(GOTEST_DIR)/unit-tests.xml --
endif
endif
PROMU_VERSION ?= 0.14.0
PROMU_VERSION ?= 0.15.0
PROMU_URL := https://github.com/prometheus/promu/releases/download/v$(PROMU_VERSION)/promu-$(PROMU_VERSION).$(GO_BUILD_PLATFORM).tar.gz
SKIP_GOLANGCI_LINT :=
GOLANGCI_LINT :=
GOLANGCI_LINT_OPTS ?=
GOLANGCI_LINT_VERSION ?= v1.51.2
GOLANGCI_LINT_VERSION ?= v1.54.2
# golangci-lint only supports linux, darwin and windows platforms on i386/amd64.
# windows isn't included here because of the path separator being different.
ifeq ($(GOHOSTOS),$(filter $(GOHOSTOS),linux darwin))
@ -178,7 +178,7 @@ endif
.PHONY: common-yamllint
common-yamllint:
@echo ">> running yamllint on all YAML files in the repository"
ifeq (, $(shell which yamllint))
ifeq (, $(shell command -v yamllint > /dev/null))
@echo "yamllint not installed so skipping"
else
yamllint .

View File

@ -51,11 +51,11 @@ ensure the `fixtures` directory is up to date by removing the existing directory
extracting the ttar file using `make fixtures/.unpacked` or just `make test`.
```bash
rm -rf fixtures
rm -rf testdata/fixtures
make test
```
Next, make the required changes to the extracted files in the `fixtures` directory. When
the changes are complete, run `make update_fixtures` to create a new `fixtures.ttar` file
based on the updated `fixtures` directory. And finally, verify the changes using
`git diff fixtures.ttar`.
`git diff testdata/fixtures.ttar`.

View File

@ -55,7 +55,7 @@ type ARPEntry struct {
func (fs FS) GatherARPEntries() ([]ARPEntry, error) {
data, err := os.ReadFile(fs.proc.Path("net/arp"))
if err != nil {
return nil, fmt.Errorf("error reading arp %q: %w", fs.proc.Path("net/arp"), err)
return nil, fmt.Errorf("%s: error reading arp %s: %w", ErrFileRead, fs.proc.Path("net/arp"), err)
}
return parseARPEntries(data)
@ -78,11 +78,11 @@ func parseARPEntries(data []byte) ([]ARPEntry, error) {
} else if width == expectedDataWidth {
entry, err := parseARPEntry(columns)
if err != nil {
return []ARPEntry{}, fmt.Errorf("failed to parse ARP entry: %w", err)
return []ARPEntry{}, fmt.Errorf("%s: Failed to parse ARP entry: %v: %w", ErrFileParse, entry, err)
}
entries = append(entries, entry)
} else {
return []ARPEntry{}, fmt.Errorf("%d columns were detected, but %d were expected", width, expectedDataWidth)
return []ARPEntry{}, fmt.Errorf("%s: %d columns found, but expected %d: %w", ErrFileParse, width, expectedDataWidth, err)
}
}

View File

@ -55,7 +55,7 @@ func parseBuddyInfo(r io.Reader) ([]BuddyInfo, error) {
parts := strings.Fields(line)
if len(parts) < 4 {
return nil, fmt.Errorf("invalid number of fields when parsing buddyinfo")
return nil, fmt.Errorf("%w: Invalid number of fields, found: %v", ErrFileParse, parts)
}
node := strings.TrimRight(parts[1], ",")
@ -66,7 +66,7 @@ func parseBuddyInfo(r io.Reader) ([]BuddyInfo, error) {
bucketCount = arraySize
} else {
if bucketCount != arraySize {
return nil, fmt.Errorf("mismatch in number of buddyinfo buckets, previous count %d, new count %d", bucketCount, arraySize)
return nil, fmt.Errorf("%w: mismatch in number of buddyinfo buckets, previous count %d, new count %d", ErrFileParse, bucketCount, arraySize)
}
}
@ -74,7 +74,7 @@ func parseBuddyInfo(r io.Reader) ([]BuddyInfo, error) {
for i := 0; i < arraySize; i++ {
sizes[i], err = strconv.ParseFloat(parts[i+4], 64)
if err != nil {
return nil, fmt.Errorf("invalid value in buddyinfo: %w", err)
return nil, fmt.Errorf("%s: Invalid valid in buddyinfo: %f: %w", ErrFileParse, sizes[i], err)
}
}

View File

@ -79,7 +79,7 @@ func parseCPUInfoX86(info []byte) ([]CPUInfo, error) {
// find the first "processor" line
firstLine := firstNonEmptyLine(scanner)
if !strings.HasPrefix(firstLine, "processor") || !strings.Contains(firstLine, ":") {
return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine)
return nil, fmt.Errorf("%w: Cannot parse line: %q", ErrFileParse, firstLine)
}
field := strings.SplitN(firstLine, ": ", 2)
v, err := strconv.ParseUint(field[1], 0, 32)
@ -192,9 +192,10 @@ func parseCPUInfoARM(info []byte) ([]CPUInfo, error) {
scanner := bufio.NewScanner(bytes.NewReader(info))
firstLine := firstNonEmptyLine(scanner)
match, _ := regexp.MatchString("^[Pp]rocessor", firstLine)
match, err := regexp.MatchString("^[Pp]rocessor", firstLine)
if !match || !strings.Contains(firstLine, ":") {
return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine)
return nil, fmt.Errorf("%s: Cannot parse line: %q: %w", ErrFileParse, firstLine, err)
}
field := strings.SplitN(firstLine, ": ", 2)
cpuinfo := []CPUInfo{}
@ -258,7 +259,7 @@ func parseCPUInfoS390X(info []byte) ([]CPUInfo, error) {
firstLine := firstNonEmptyLine(scanner)
if !strings.HasPrefix(firstLine, "vendor_id") || !strings.Contains(firstLine, ":") {
return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine)
return nil, fmt.Errorf("%w: Cannot parse line: %q", ErrFileParse, firstLine)
}
field := strings.SplitN(firstLine, ": ", 2)
cpuinfo := []CPUInfo{}
@ -283,7 +284,7 @@ func parseCPUInfoS390X(info []byte) ([]CPUInfo, error) {
if strings.HasPrefix(line, "processor") {
match := cpuinfoS390XProcessorRegexp.FindStringSubmatch(line)
if len(match) < 2 {
return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine)
return nil, fmt.Errorf("%w: %q", ErrFileParse, firstLine)
}
cpu := commonCPUInfo
v, err := strconv.ParseUint(match[1], 0, 32)
@ -343,7 +344,7 @@ func parseCPUInfoMips(info []byte) ([]CPUInfo, error) {
// find the first "processor" line
firstLine := firstNonEmptyLine(scanner)
if !strings.HasPrefix(firstLine, "system type") || !strings.Contains(firstLine, ":") {
return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine)
return nil, fmt.Errorf("%w: %q", ErrFileParse, firstLine)
}
field := strings.SplitN(firstLine, ": ", 2)
cpuinfo := []CPUInfo{}
@ -421,7 +422,7 @@ func parseCPUInfoPPC(info []byte) ([]CPUInfo, error) {
firstLine := firstNonEmptyLine(scanner)
if !strings.HasPrefix(firstLine, "processor") || !strings.Contains(firstLine, ":") {
return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine)
return nil, fmt.Errorf("%w: %q", ErrFileParse, firstLine)
}
field := strings.SplitN(firstLine, ": ", 2)
v, err := strconv.ParseUint(field[1], 0, 32)
@ -466,7 +467,7 @@ func parseCPUInfoRISCV(info []byte) ([]CPUInfo, error) {
firstLine := firstNonEmptyLine(scanner)
if !strings.HasPrefix(firstLine, "processor") || !strings.Contains(firstLine, ":") {
return nil, fmt.Errorf("invalid cpuinfo file: %q", firstLine)
return nil, fmt.Errorf("%w: %q", ErrFileParse, firstLine)
}
field := strings.SplitN(firstLine, ": ", 2)
v, err := strconv.ParseUint(field[1], 0, 32)

View File

@ -55,12 +55,13 @@ func (fs FS) Crypto() ([]Crypto, error) {
path := fs.proc.Path("crypto")
b, err := util.ReadFileNoStat(path)
if err != nil {
return nil, fmt.Errorf("error reading crypto %q: %w", path, err)
return nil, fmt.Errorf("%s: Cannot read file %v: %w", ErrFileRead, b, err)
}
crypto, err := parseCrypto(bytes.NewReader(b))
if err != nil {
return nil, fmt.Errorf("error parsing crypto %q: %w", path, err)
return nil, fmt.Errorf("%s: Cannot parse %v: %w", ErrFileParse, crypto, err)
}
return crypto, nil
@ -83,7 +84,7 @@ func parseCrypto(r io.Reader) ([]Crypto, error) {
kv := strings.Split(text, ":")
if len(kv) != 2 {
return nil, fmt.Errorf("malformed crypto line: %q", text)
return nil, fmt.Errorf("%w: Cannot parae line: %q", ErrFileParse, text)
}
k := strings.TrimSpace(kv[0])

View File

@ -20,8 +20,8 @@ import (
// FS represents the pseudo-filesystem sys, which provides an interface to
// kernel data structures.
type FS struct {
proc fs.FS
real bool
proc fs.FS
isReal bool
}
// DefaultMountPoint is the common mount point of the proc filesystem.
@ -41,10 +41,10 @@ func NewFS(mountPoint string) (FS, error) {
return FS{}, err
}
real, err := isRealProc(mountPoint)
isReal, err := isRealProc(mountPoint)
if err != nil {
return FS{}, err
}
return FS{fs, real}, nil
return FS{fs, isReal}, nil
}

View File

@ -11,8 +11,8 @@
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build netbsd || openbsd || solaris || windows
// +build netbsd openbsd solaris windows
//go:build !freebsd && !linux
// +build !freebsd,!linux
package procfs

View File

@ -11,8 +11,8 @@
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build !netbsd && !openbsd && !solaris && !windows
// +build !netbsd,!openbsd,!solaris,!windows
//go:build freebsd || linux
// +build freebsd linux
package procfs

View File

@ -236,7 +236,7 @@ func (fs FS) Fscacheinfo() (Fscacheinfo, error) {
m, err := parseFscacheinfo(bytes.NewReader(b))
if err != nil {
return Fscacheinfo{}, fmt.Errorf("failed to parse Fscacheinfo: %w", err)
return Fscacheinfo{}, fmt.Errorf("%s: Cannot parse %v: %w", ErrFileParse, m, err)
}
return *m, nil
@ -245,7 +245,7 @@ func (fs FS) Fscacheinfo() (Fscacheinfo, error) {
func setFSCacheFields(fields []string, setFields ...*uint64) error {
var err error
if len(fields) < len(setFields) {
return fmt.Errorf("Insufficient number of fields, expected %v, got %v", len(setFields), len(fields))
return fmt.Errorf("%s: Expected %d, but got %d: %w", ErrFileParse, len(setFields), len(fields), err)
}
for i := range setFields {
@ -263,7 +263,7 @@ func parseFscacheinfo(r io.Reader) (*Fscacheinfo, error) {
for s.Scan() {
fields := strings.Fields(s.Text())
if len(fields) < 2 {
return nil, fmt.Errorf("malformed Fscacheinfo line: %q", s.Text())
return nil, fmt.Errorf("%w: malformed Fscacheinfo line: %q", ErrFileParse, s.Text())
}
switch fields[0] {

View File

@ -221,15 +221,16 @@ func parseIPPort(s string) (net.IP, uint16, error) {
case 46:
ip = net.ParseIP(s[1:40])
if ip == nil {
return nil, 0, fmt.Errorf("invalid IPv6 address: %s", s[1:40])
return nil, 0, fmt.Errorf("%s: Invalid IPv6 addr %s: %w", ErrFileParse, s[1:40], err)
}
default:
return nil, 0, fmt.Errorf("unexpected IP:Port: %s", s)
return nil, 0, fmt.Errorf("%s: Unexpected IP:Port %s: %w", ErrFileParse, s, err)
}
portString := s[len(s)-4:]
if len(portString) != 4 {
return nil, 0, fmt.Errorf("unexpected port string format: %s", portString)
return nil, 0,
fmt.Errorf("%s: Unexpected port string format %s: %w", ErrFileParse, portString, err)
}
port, err := strconv.ParseUint(portString, 16, 16)
if err != nil {

View File

@ -44,14 +44,14 @@ func parseLoad(loadavgBytes []byte) (*LoadAvg, error) {
loads := make([]float64, 3)
parts := strings.Fields(string(loadavgBytes))
if len(parts) < 3 {
return nil, fmt.Errorf("malformed loadavg line: too few fields in loadavg string: %q", string(loadavgBytes))
return nil, fmt.Errorf("%w: Malformed line %q", ErrFileParse, string(loadavgBytes))
}
var err error
for i, load := range parts[0:3] {
loads[i], err = strconv.ParseFloat(load, 64)
if err != nil {
return nil, fmt.Errorf("could not parse load %q: %w", load, err)
return nil, fmt.Errorf("%s: Cannot parse load: %f: %w", ErrFileParse, loads[i], err)
}
}
return &LoadAvg{

View File

@ -70,7 +70,7 @@ func (fs FS) MDStat() ([]MDStat, error) {
}
mdstat, err := parseMDStat(data)
if err != nil {
return nil, fmt.Errorf("error parsing mdstat %q: %w", fs.proc.Path("mdstat"), err)
return nil, fmt.Errorf("%s: Cannot parse %v: %w", ErrFileParse, fs.proc.Path("mdstat"), err)
}
return mdstat, nil
}
@ -90,13 +90,13 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) {
deviceFields := strings.Fields(line)
if len(deviceFields) < 3 {
return nil, fmt.Errorf("not enough fields in mdline (expected at least 3): %s", line)
return nil, fmt.Errorf("%s: Expected 3+ lines, got %q", ErrFileParse, line)
}
mdName := deviceFields[0] // mdx
state := deviceFields[2] // active or inactive
if len(lines) <= i+3 {
return nil, fmt.Errorf("error parsing %q: too few lines for md device", mdName)
return nil, fmt.Errorf("%w: Too few lines for md device: %q", ErrFileParse, mdName)
}
// Failed disks have the suffix (F) & Spare disks have the suffix (S).
@ -105,7 +105,7 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) {
active, total, down, size, err := evalStatusLine(lines[i], lines[i+1])
if err != nil {
return nil, fmt.Errorf("error parsing md device lines: %w", err)
return nil, fmt.Errorf("%s: Cannot parse md device lines: %v: %w", ErrFileParse, active, err)
}
syncLineIdx := i + 2
@ -140,7 +140,7 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) {
} else {
syncedBlocks, pct, finish, speed, err = evalRecoveryLine(lines[syncLineIdx])
if err != nil {
return nil, fmt.Errorf("error parsing sync line in md device %q: %w", mdName, err)
return nil, fmt.Errorf("%s: Cannot parse sync line in md device: %q: %w", ErrFileParse, mdName, err)
}
}
}
@ -168,13 +168,13 @@ func parseMDStat(mdStatData []byte) ([]MDStat, error) {
func evalStatusLine(deviceLine, statusLine string) (active, total, down, size int64, err error) {
statusFields := strings.Fields(statusLine)
if len(statusFields) < 1 {
return 0, 0, 0, 0, fmt.Errorf("unexpected statusLine %q", statusLine)
return 0, 0, 0, 0, fmt.Errorf("%s: Unexpected statusline %q: %w", ErrFileParse, statusLine, err)
}
sizeStr := statusFields[0]
size, err = strconv.ParseInt(sizeStr, 10, 64)
if err != nil {
return 0, 0, 0, 0, fmt.Errorf("unexpected statusLine %q: %w", statusLine, err)
return 0, 0, 0, 0, fmt.Errorf("%s: Unexpected statusline %q: %w", ErrFileParse, statusLine, err)
}
if strings.Contains(deviceLine, "raid0") || strings.Contains(deviceLine, "linear") {
@ -189,17 +189,17 @@ func evalStatusLine(deviceLine, statusLine string) (active, total, down, size in
matches := statusLineRE.FindStringSubmatch(statusLine)
if len(matches) != 5 {
return 0, 0, 0, 0, fmt.Errorf("couldn't find all the substring matches: %s", statusLine)
return 0, 0, 0, 0, fmt.Errorf("%s: Could not fild all substring matches %s: %w", ErrFileParse, statusLine, err)
}
total, err = strconv.ParseInt(matches[2], 10, 64)
if err != nil {
return 0, 0, 0, 0, fmt.Errorf("unexpected statusLine %q: %w", statusLine, err)
return 0, 0, 0, 0, fmt.Errorf("%s: Unexpected statusline %q: %w", ErrFileParse, statusLine, err)
}
active, err = strconv.ParseInt(matches[3], 10, 64)
if err != nil {
return 0, 0, 0, 0, fmt.Errorf("unexpected statusLine %q: %w", statusLine, err)
return 0, 0, 0, 0, fmt.Errorf("%s: Unexpected active %d: %w", ErrFileParse, active, err)
}
down = int64(strings.Count(matches[4], "_"))
@ -209,42 +209,42 @@ func evalStatusLine(deviceLine, statusLine string) (active, total, down, size in
func evalRecoveryLine(recoveryLine string) (syncedBlocks int64, pct float64, finish float64, speed float64, err error) {
matches := recoveryLineBlocksRE.FindStringSubmatch(recoveryLine)
if len(matches) != 2 {
return 0, 0, 0, 0, fmt.Errorf("unexpected recoveryLine: %s", recoveryLine)
return 0, 0, 0, 0, fmt.Errorf("%s: Unexpected recoveryLine %s: %w", ErrFileParse, recoveryLine, err)
}
syncedBlocks, err = strconv.ParseInt(matches[1], 10, 64)
if err != nil {
return 0, 0, 0, 0, fmt.Errorf("error parsing int from recoveryLine %q: %w", recoveryLine, err)
return 0, 0, 0, 0, fmt.Errorf("%s: Unexpected parsing of recoveryLine %q: %w", ErrFileParse, recoveryLine, err)
}
// Get percentage complete
matches = recoveryLinePctRE.FindStringSubmatch(recoveryLine)
if len(matches) != 2 {
return syncedBlocks, 0, 0, 0, fmt.Errorf("unexpected recoveryLine matching percentage: %s", recoveryLine)
return syncedBlocks, 0, 0, 0, fmt.Errorf("%w: Unexpected recoveryLine matching percentage %s", ErrFileParse, recoveryLine)
}
pct, err = strconv.ParseFloat(strings.TrimSpace(matches[1]), 64)
if err != nil {
return syncedBlocks, 0, 0, 0, fmt.Errorf("error parsing float from recoveryLine %q: %w", recoveryLine, err)
return syncedBlocks, 0, 0, 0, fmt.Errorf("%w: Error parsing float from recoveryLine %q", ErrFileParse, recoveryLine)
}
// Get time expected left to complete
matches = recoveryLineFinishRE.FindStringSubmatch(recoveryLine)
if len(matches) != 2 {
return syncedBlocks, pct, 0, 0, fmt.Errorf("unexpected recoveryLine matching est. finish time: %s", recoveryLine)
return syncedBlocks, pct, 0, 0, fmt.Errorf("%w: Unexpected recoveryLine matching est. finish time: %s", ErrFileParse, recoveryLine)
}
finish, err = strconv.ParseFloat(matches[1], 64)
if err != nil {
return syncedBlocks, pct, 0, 0, fmt.Errorf("error parsing float from recoveryLine %q: %w", recoveryLine, err)
return syncedBlocks, pct, 0, 0, fmt.Errorf("%w: Unable to parse float from recoveryLine: %q", ErrFileParse, recoveryLine)
}
// Get recovery speed
matches = recoveryLineSpeedRE.FindStringSubmatch(recoveryLine)
if len(matches) != 2 {
return syncedBlocks, pct, finish, 0, fmt.Errorf("unexpected recoveryLine matching speed: %s", recoveryLine)
return syncedBlocks, pct, finish, 0, fmt.Errorf("%w: Unexpected recoveryLine value: %s", ErrFileParse, recoveryLine)
}
speed, err = strconv.ParseFloat(matches[1], 64)
if err != nil {
return syncedBlocks, pct, finish, 0, fmt.Errorf("error parsing float from recoveryLine %q: %w", recoveryLine, err)
return syncedBlocks, pct, finish, 0, fmt.Errorf("%s: Error parsing float from recoveryLine: %q: %w", ErrFileParse, recoveryLine, err)
}
return syncedBlocks, pct, finish, speed, nil

View File

@ -152,7 +152,7 @@ func (fs FS) Meminfo() (Meminfo, error) {
m, err := parseMemInfo(bytes.NewReader(b))
if err != nil {
return Meminfo{}, fmt.Errorf("failed to parse meminfo: %w", err)
return Meminfo{}, fmt.Errorf("%s: %w", ErrFileParse, err)
}
return *m, nil
@ -165,7 +165,7 @@ func parseMemInfo(r io.Reader) (*Meminfo, error) {
// Each line has at least a name and value; we ignore the unit.
fields := strings.Fields(s.Text())
if len(fields) < 2 {
return nil, fmt.Errorf("malformed meminfo line: %q", s.Text())
return nil, fmt.Errorf("%w: Malformed line %q", ErrFileParse, s.Text())
}
v, err := strconv.ParseUint(fields[1], 0, 64)

View File

@ -78,11 +78,11 @@ func parseMountInfoString(mountString string) (*MountInfo, error) {
mountInfo := strings.Split(mountString, " ")
mountInfoLength := len(mountInfo)
if mountInfoLength < 10 {
return nil, fmt.Errorf("couldn't find enough fields in mount string: %s", mountString)
return nil, fmt.Errorf("%w: Too few fields in mount string: %s", ErrFileParse, mountString)
}
if mountInfo[mountInfoLength-4] != "-" {
return nil, fmt.Errorf("couldn't find separator in expected field: %s", mountInfo[mountInfoLength-4])
return nil, fmt.Errorf("%w: couldn't find separator in expected field: %s", ErrFileParse, mountInfo[mountInfoLength-4])
}
mount := &MountInfo{
@ -98,18 +98,18 @@ func parseMountInfoString(mountString string) (*MountInfo, error) {
mount.MountID, err = strconv.Atoi(mountInfo[0])
if err != nil {
return nil, fmt.Errorf("failed to parse mount ID")
return nil, fmt.Errorf("%w: mount ID: %q", ErrFileParse, mount.MountID)
}
mount.ParentID, err = strconv.Atoi(mountInfo[1])
if err != nil {
return nil, fmt.Errorf("failed to parse parent ID")
return nil, fmt.Errorf("%w: parent ID: %q", ErrFileParse, mount.ParentID)
}
// Has optional fields, which is a space separated list of values.
// Example: shared:2 master:7
if mountInfo[6] != "" {
mount.OptionalFields, err = mountOptionsParseOptionalFields(mountInfo[6 : mountInfoLength-4])
if err != nil {
return nil, err
return nil, fmt.Errorf("%s: %w", ErrFileParse, err)
}
}
return mount, nil

View File

@ -44,6 +44,14 @@ const (
fieldTransport11TCPLen = 13
fieldTransport11UDPLen = 10
// kernel version >= 4.14 MaxLen
// See: https://elixir.bootlin.com/linux/v6.4.8/source/net/sunrpc/xprtrdma/xprt_rdma.h#L393
fieldTransport11RDMAMaxLen = 28
// kernel version <= 4.2 MinLen
// See: https://elixir.bootlin.com/linux/v4.2.8/source/net/sunrpc/xprtrdma/xprt_rdma.h#L331
fieldTransport11RDMAMinLen = 20
)
// A Mount is a device mount parsed from /proc/[pid]/mountstats.
@ -233,6 +241,33 @@ type NFSTransportStats struct {
// A running counter, incremented on each request as the current size of the
// pending queue.
CumulativePendingQueue uint64
// Stats below only available with stat version 1.1.
// Transport over RDMA
// accessed when sending a call
ReadChunkCount uint64
WriteChunkCount uint64
ReplyChunkCount uint64
TotalRdmaRequest uint64
// rarely accessed error counters
PullupCopyCount uint64
HardwayRegisterCount uint64
FailedMarshalCount uint64
BadReplyCount uint64
MrsRecovered uint64
MrsOrphaned uint64
MrsAllocated uint64
EmptySendctxQ uint64
// accessed when receiving a reply
TotalRdmaReply uint64
FixupCopyCount uint64
ReplyWaitsForSend uint64
LocalInvNeeded uint64
NomsgCallCount uint64
BcallCount uint64
}
// parseMountStats parses a /proc/[pid]/mountstats file and returns a slice
@ -266,7 +301,7 @@ func parseMountStats(r io.Reader) ([]*Mount, error) {
if len(ss) > deviceEntryLen {
// Only NFSv3 and v4 are supported for parsing statistics
if m.Type != nfs3Type && m.Type != nfs4Type {
return nil, fmt.Errorf("cannot parse MountStats for fstype %q", m.Type)
return nil, fmt.Errorf("%w: Cannot parse MountStats for %q", ErrFileParse, m.Type)
}
statVersion := strings.TrimPrefix(ss[8], statVersionPrefix)
@ -290,7 +325,7 @@ func parseMountStats(r io.Reader) ([]*Mount, error) {
// device [device] mounted on [mount] with fstype [type]
func parseMount(ss []string) (*Mount, error) {
if len(ss) < deviceEntryLen {
return nil, fmt.Errorf("invalid device entry: %v", ss)
return nil, fmt.Errorf("%w: Invalid device %q", ErrFileParse, ss)
}
// Check for specific words appearing at specific indices to ensure
@ -308,7 +343,7 @@ func parseMount(ss []string) (*Mount, error) {
for _, f := range format {
if ss[f.i] != f.s {
return nil, fmt.Errorf("invalid device entry: %v", ss)
return nil, fmt.Errorf("%w: Invalid device %q", ErrFileParse, ss)
}
}
@ -345,7 +380,7 @@ func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, e
switch ss[0] {
case fieldOpts:
if len(ss) < 2 {
return nil, fmt.Errorf("not enough information for NFS stats: %v", ss)
return nil, fmt.Errorf("%w: Incomplete information for NFS stats: %v", ErrFileParse, ss)
}
if stats.Opts == nil {
stats.Opts = map[string]string{}
@ -360,7 +395,7 @@ func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, e
}
case fieldAge:
if len(ss) < 2 {
return nil, fmt.Errorf("not enough information for NFS stats: %v", ss)
return nil, fmt.Errorf("%w: Incomplete information for NFS stats: %v", ErrFileParse, ss)
}
// Age integer is in seconds
d, err := time.ParseDuration(ss[1] + "s")
@ -371,7 +406,7 @@ func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, e
stats.Age = d
case fieldBytes:
if len(ss) < 2 {
return nil, fmt.Errorf("not enough information for NFS stats: %v", ss)
return nil, fmt.Errorf("%w: Incomplete information for NFS stats: %v", ErrFileParse, ss)
}
bstats, err := parseNFSBytesStats(ss[1:])
if err != nil {
@ -381,7 +416,7 @@ func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, e
stats.Bytes = *bstats
case fieldEvents:
if len(ss) < 2 {
return nil, fmt.Errorf("not enough information for NFS stats: %v", ss)
return nil, fmt.Errorf("%w: Incomplete information for NFS events: %v", ErrFileParse, ss)
}
estats, err := parseNFSEventsStats(ss[1:])
if err != nil {
@ -391,7 +426,7 @@ func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, e
stats.Events = *estats
case fieldTransport:
if len(ss) < 3 {
return nil, fmt.Errorf("not enough information for NFS transport stats: %v", ss)
return nil, fmt.Errorf("%w: Incomplete information for NFS transport stats: %v", ErrFileParse, ss)
}
tstats, err := parseNFSTransportStats(ss[1:], statVersion)
@ -430,7 +465,7 @@ func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, e
// integer fields.
func parseNFSBytesStats(ss []string) (*NFSBytesStats, error) {
if len(ss) != fieldBytesLen {
return nil, fmt.Errorf("invalid NFS bytes stats: %v", ss)
return nil, fmt.Errorf("%w: Invalid NFS bytes stats: %v", ErrFileParse, ss)
}
ns := make([]uint64, 0, fieldBytesLen)
@ -459,7 +494,7 @@ func parseNFSBytesStats(ss []string) (*NFSBytesStats, error) {
// integer fields.
func parseNFSEventsStats(ss []string) (*NFSEventsStats, error) {
if len(ss) != fieldEventsLen {
return nil, fmt.Errorf("invalid NFS events stats: %v", ss)
return nil, fmt.Errorf("%w: invalid NFS events stats: %v", ErrFileParse, ss)
}
ns := make([]uint64, 0, fieldEventsLen)
@ -523,7 +558,7 @@ func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) {
}
if len(ss) < minFields {
return nil, fmt.Errorf("invalid NFS per-operations stats: %v", ss)
return nil, fmt.Errorf("%w: invalid NFS per-operations stats: %v", ErrFileParse, ss)
}
// Skip string operation name for integers
@ -576,10 +611,10 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats
} else if protocol == "udp" {
expectedLength = fieldTransport10UDPLen
} else {
return nil, fmt.Errorf("invalid NFS protocol \"%s\" in stats 1.0 statement: %v", protocol, ss)
return nil, fmt.Errorf("%w: Invalid NFS protocol \"%s\" in stats 1.0 statement: %v", ErrFileParse, protocol, ss)
}
if len(ss) != expectedLength {
return nil, fmt.Errorf("invalid NFS transport stats 1.0 statement: %v", ss)
return nil, fmt.Errorf("%w: Invalid NFS transport stats 1.0 statement: %v", ErrFileParse, ss)
}
case statVersion11:
var expectedLength int
@ -587,14 +622,17 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats
expectedLength = fieldTransport11TCPLen
} else if protocol == "udp" {
expectedLength = fieldTransport11UDPLen
} else if protocol == "rdma" {
expectedLength = fieldTransport11RDMAMinLen
} else {
return nil, fmt.Errorf("invalid NFS protocol \"%s\" in stats 1.1 statement: %v", protocol, ss)
return nil, fmt.Errorf("%w: invalid NFS protocol \"%s\" in stats 1.1 statement: %v", ErrFileParse, protocol, ss)
}
if len(ss) != expectedLength {
return nil, fmt.Errorf("invalid NFS transport stats 1.1 statement: %v", ss)
if (len(ss) != expectedLength && (protocol == "tcp" || protocol == "udp")) ||
(protocol == "rdma" && len(ss) < expectedLength) {
return nil, fmt.Errorf("%w: invalid NFS transport stats 1.1 statement: %v, protocol: %v", ErrFileParse, ss, protocol)
}
default:
return nil, fmt.Errorf("unrecognized NFS transport stats version: %q", statVersion)
return nil, fmt.Errorf("%s: Unrecognized NFS transport stats version: %q, protocol: %v", ErrFileParse, statVersion, protocol)
}
// Allocate enough for v1.1 stats since zero value for v1.1 stats will be okay
@ -604,7 +642,9 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats
// Note: slice length must be set to length of v1.1 stats to avoid a panic when
// only v1.0 stats are present.
// See: https://github.com/prometheus/node_exporter/issues/571.
ns := make([]uint64, fieldTransport11TCPLen)
//
// Note: NFS Over RDMA slice length is fieldTransport11RDMAMaxLen
ns := make([]uint64, fieldTransport11RDMAMaxLen+3)
for i, s := range ss {
n, err := strconv.ParseUint(s, 10, 64)
if err != nil {
@ -622,9 +662,14 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats
// we set them to 0 here.
if protocol == "udp" {
ns = append(ns[:2], append(make([]uint64, 3), ns[2:]...)...)
} else if protocol == "tcp" {
ns = append(ns[:fieldTransport11TCPLen], make([]uint64, fieldTransport11RDMAMaxLen-fieldTransport11TCPLen+3)...)
} else if protocol == "rdma" {
ns = append(ns[:fieldTransport10TCPLen], append(make([]uint64, 3), ns[fieldTransport10TCPLen:]...)...)
}
return &NFSTransportStats{
// NFS xprt over tcp or udp
Protocol: protocol,
Port: ns[0],
Bind: ns[1],
@ -636,8 +681,32 @@ func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats
BadTransactionIDs: ns[7],
CumulativeActiveRequests: ns[8],
CumulativeBacklog: ns[9],
MaximumRPCSlotsUsed: ns[10],
CumulativeSendingQueue: ns[11],
CumulativePendingQueue: ns[12],
// NFS xprt over tcp or udp
// And statVersion 1.1
MaximumRPCSlotsUsed: ns[10],
CumulativeSendingQueue: ns[11],
CumulativePendingQueue: ns[12],
// NFS xprt over rdma
// And stat Version 1.1
ReadChunkCount: ns[13],
WriteChunkCount: ns[14],
ReplyChunkCount: ns[15],
TotalRdmaRequest: ns[16],
PullupCopyCount: ns[17],
HardwayRegisterCount: ns[18],
FailedMarshalCount: ns[19],
BadReplyCount: ns[20],
MrsRecovered: ns[21],
MrsOrphaned: ns[22],
MrsAllocated: ns[23],
EmptySendctxQ: ns[24],
TotalRdmaReply: ns[25],
FixupCopyCount: ns[26],
ReplyWaitsForSend: ns[27],
LocalInvNeeded: ns[28],
NomsgCallCount: ns[29],
BcallCount: ns[30],
}, nil
}

View File

@ -58,7 +58,7 @@ func readConntrackStat(path string) ([]ConntrackStatEntry, error) {
stat, err := parseConntrackStat(bytes.NewReader(b))
if err != nil {
return nil, fmt.Errorf("failed to read conntrack stats from %q: %w", path, err)
return nil, fmt.Errorf("%s: Cannot read file: %v: %w", ErrFileRead, path, err)
}
return stat, nil
@ -86,11 +86,12 @@ func parseConntrackStat(r io.Reader) ([]ConntrackStatEntry, error) {
func parseConntrackStatEntry(fields []string) (*ConntrackStatEntry, error) {
entries, err := util.ParseHexUint64s(fields)
if err != nil {
return nil, fmt.Errorf("invalid conntrackstat entry, couldn't parse fields: %s", err)
return nil, fmt.Errorf("%s: Cannot parse entry: %d: %w", ErrFileParse, entries, err)
}
numEntries := len(entries)
if numEntries < 16 || numEntries > 17 {
return nil, fmt.Errorf("invalid conntrackstat entry, invalid number of fields: %d", numEntries)
return nil,
fmt.Errorf("%w: invalid conntrackstat entry, invalid number of fields: %d", ErrFileParse, numEntries)
}
stats := &ConntrackStatEntry{

View File

@ -130,7 +130,7 @@ func parseIP(hexIP string) (net.IP, error) {
var byteIP []byte
byteIP, err := hex.DecodeString(hexIP)
if err != nil {
return nil, fmt.Errorf("cannot parse address field in socket line %q", hexIP)
return nil, fmt.Errorf("%s: Cannot parse socket field in %q: %w", ErrFileParse, hexIP, err)
}
switch len(byteIP) {
case 4:
@ -144,7 +144,7 @@ func parseIP(hexIP string) (net.IP, error) {
}
return i, nil
default:
return nil, fmt.Errorf("Unable to parse IP %s", hexIP)
return nil, fmt.Errorf("%s: Unable to parse IP %s: %w", ErrFileParse, hexIP, nil)
}
}
@ -153,7 +153,8 @@ func parseNetIPSocketLine(fields []string) (*netIPSocketLine, error) {
line := &netIPSocketLine{}
if len(fields) < 10 {
return nil, fmt.Errorf(
"cannot parse net socket line as it has less then 10 columns %q",
"%w: Less than 10 columns found %q",
ErrFileParse,
strings.Join(fields, " "),
)
}
@ -162,64 +163,65 @@ func parseNetIPSocketLine(fields []string) (*netIPSocketLine, error) {
// sl
s := strings.Split(fields[0], ":")
if len(s) != 2 {
return nil, fmt.Errorf("cannot parse sl field in socket line %q", fields[0])
return nil, fmt.Errorf("%w: Unable to parse sl field in line %q", ErrFileParse, fields[0])
}
if line.Sl, err = strconv.ParseUint(s[0], 0, 64); err != nil {
return nil, fmt.Errorf("cannot parse sl value in socket line: %w", err)
return nil, fmt.Errorf("%s: Unable to parse sl field in %q: %w", ErrFileParse, line.Sl, err)
}
// local_address
l := strings.Split(fields[1], ":")
if len(l) != 2 {
return nil, fmt.Errorf("cannot parse local_address field in socket line %q", fields[1])
return nil, fmt.Errorf("%w: Unable to parse local_address field in %q", ErrFileParse, fields[1])
}
if line.LocalAddr, err = parseIP(l[0]); err != nil {
return nil, err
}
if line.LocalPort, err = strconv.ParseUint(l[1], 16, 64); err != nil {
return nil, fmt.Errorf("cannot parse local_address port value in socket line: %w", err)
return nil, fmt.Errorf("%s: Unable to parse local_address port value line %q: %w", ErrFileParse, line.LocalPort, err)
}
// remote_address
r := strings.Split(fields[2], ":")
if len(r) != 2 {
return nil, fmt.Errorf("cannot parse rem_address field in socket line %q", fields[1])
return nil, fmt.Errorf("%w: Unable to parse rem_address field in %q", ErrFileParse, fields[1])
}
if line.RemAddr, err = parseIP(r[0]); err != nil {
return nil, err
}
if line.RemPort, err = strconv.ParseUint(r[1], 16, 64); err != nil {
return nil, fmt.Errorf("cannot parse rem_address port value in socket line: %w", err)
return nil, fmt.Errorf("%s: Cannot parse rem_address port value in %q: %w", ErrFileParse, line.RemPort, err)
}
// st
if line.St, err = strconv.ParseUint(fields[3], 16, 64); err != nil {
return nil, fmt.Errorf("cannot parse st value in socket line: %w", err)
return nil, fmt.Errorf("%s: Cannot parse st value in %q: %w", ErrFileParse, line.St, err)
}
// tx_queue and rx_queue
q := strings.Split(fields[4], ":")
if len(q) != 2 {
return nil, fmt.Errorf(
"cannot parse tx/rx queues in socket line as it has a missing colon %q",
"%w: Missing colon for tx/rx queues in socket line %q",
ErrFileParse,
fields[4],
)
}
if line.TxQueue, err = strconv.ParseUint(q[0], 16, 64); err != nil {
return nil, fmt.Errorf("cannot parse tx_queue value in socket line: %w", err)
return nil, fmt.Errorf("%s: Cannot parse tx_queue value in %q: %w", ErrFileParse, line.TxQueue, err)
}
if line.RxQueue, err = strconv.ParseUint(q[1], 16, 64); err != nil {
return nil, fmt.Errorf("cannot parse rx_queue value in socket line: %w", err)
return nil, fmt.Errorf("%s: Cannot parse trx_queue value in %q: %w", ErrFileParse, line.RxQueue, err)
}
// uid
if line.UID, err = strconv.ParseUint(fields[7], 0, 64); err != nil {
return nil, fmt.Errorf("cannot parse uid value in socket line: %w", err)
return nil, fmt.Errorf("%s: Cannot parse UID value in %q: %w", ErrFileParse, line.UID, err)
}
// inode
if line.Inode, err = strconv.ParseUint(fields[9], 0, 64); err != nil {
return nil, fmt.Errorf("cannot parse inode value in socket line: %w", err)
return nil, fmt.Errorf("%s: Cannot parse inode value in %q: %w", ErrFileParse, line.Inode, err)
}
return line, nil

View File

@ -131,7 +131,7 @@ func (ps NetProtocolStats) parseLine(rawLine string) (*NetProtocolStatLine, erro
} else if fields[6] == disabled {
line.Slab = false
} else {
return nil, fmt.Errorf("unable to parse capability for protocol: %s", line.Name)
return nil, fmt.Errorf("%w: capability for protocol: %s", ErrFileParse, line.Name)
}
line.ModuleName = fields[7]
@ -173,7 +173,7 @@ func (pc *NetProtocolCapabilities) parseCapabilities(capabilities []string) erro
} else if capabilities[i] == "n" {
*capabilityFields[i] = false
} else {
return fmt.Errorf("unable to parse capability block for protocol: position %d", i)
return fmt.Errorf("%w: capability block for protocol: position %d", ErrFileParse, i)
}
}
return nil

143
vendor/github.com/prometheus/procfs/net_route.go generated vendored Normal file
View File

@ -0,0 +1,143 @@
// Copyright 2023 The Prometheus Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package procfs
import (
"bufio"
"bytes"
"fmt"
"io"
"strconv"
"strings"
"github.com/prometheus/procfs/internal/util"
)
const (
blackholeRepresentation string = "*"
blackholeIfaceName string = "blackhole"
routeLineColumns int = 11
)
// A NetRouteLine represents one line from net/route.
type NetRouteLine struct {
Iface string
Destination uint32
Gateway uint32
Flags uint32
RefCnt uint32
Use uint32
Metric uint32
Mask uint32
MTU uint32
Window uint32
IRTT uint32
}
func (fs FS) NetRoute() ([]NetRouteLine, error) {
return readNetRoute(fs.proc.Path("net", "route"))
}
func readNetRoute(path string) ([]NetRouteLine, error) {
b, err := util.ReadFileNoStat(path)
if err != nil {
return nil, err
}
routelines, err := parseNetRoute(bytes.NewReader(b))
if err != nil {
return nil, fmt.Errorf("failed to read net route from %s: %w", path, err)
}
return routelines, nil
}
func parseNetRoute(r io.Reader) ([]NetRouteLine, error) {
var routelines []NetRouteLine
scanner := bufio.NewScanner(r)
scanner.Scan()
for scanner.Scan() {
fields := strings.Fields(scanner.Text())
routeline, err := parseNetRouteLine(fields)
if err != nil {
return nil, err
}
routelines = append(routelines, *routeline)
}
return routelines, nil
}
func parseNetRouteLine(fields []string) (*NetRouteLine, error) {
if len(fields) != routeLineColumns {
return nil, fmt.Errorf("invalid routeline, num of digits: %d", len(fields))
}
iface := fields[0]
if iface == blackholeRepresentation {
iface = blackholeIfaceName
}
destination, err := strconv.ParseUint(fields[1], 16, 32)
if err != nil {
return nil, err
}
gateway, err := strconv.ParseUint(fields[2], 16, 32)
if err != nil {
return nil, err
}
flags, err := strconv.ParseUint(fields[3], 10, 32)
if err != nil {
return nil, err
}
refcnt, err := strconv.ParseUint(fields[4], 10, 32)
if err != nil {
return nil, err
}
use, err := strconv.ParseUint(fields[5], 10, 32)
if err != nil {
return nil, err
}
metric, err := strconv.ParseUint(fields[6], 10, 32)
if err != nil {
return nil, err
}
mask, err := strconv.ParseUint(fields[7], 16, 32)
if err != nil {
return nil, err
}
mtu, err := strconv.ParseUint(fields[8], 10, 32)
if err != nil {
return nil, err
}
window, err := strconv.ParseUint(fields[9], 10, 32)
if err != nil {
return nil, err
}
irtt, err := strconv.ParseUint(fields[10], 10, 32)
if err != nil {
return nil, err
}
routeline := &NetRouteLine{
Iface: iface,
Destination: uint32(destination),
Gateway: uint32(gateway),
Flags: uint32(flags),
RefCnt: uint32(refcnt),
Use: uint32(use),
Metric: uint32(metric),
Mask: uint32(mask),
MTU: uint32(mtu),
Window: uint32(window),
IRTT: uint32(irtt),
}
return routeline, nil
}

View File

@ -16,7 +16,6 @@ package procfs
import (
"bufio"
"bytes"
"errors"
"fmt"
"io"
"strings"
@ -70,7 +69,7 @@ func readSockstat(name string) (*NetSockstat, error) {
stat, err := parseSockstat(bytes.NewReader(b))
if err != nil {
return nil, fmt.Errorf("failed to read sockstats from %q: %w", name, err)
return nil, fmt.Errorf("%s: sockstats from %q: %w", ErrFileRead, name, err)
}
return stat, nil
@ -84,13 +83,13 @@ func parseSockstat(r io.Reader) (*NetSockstat, error) {
// Expect a minimum of a protocol and one key/value pair.
fields := strings.Split(s.Text(), " ")
if len(fields) < 3 {
return nil, fmt.Errorf("malformed sockstat line: %q", s.Text())
return nil, fmt.Errorf("%w: Malformed sockstat line: %q", ErrFileParse, s.Text())
}
// The remaining fields are key/value pairs.
kvs, err := parseSockstatKVs(fields[1:])
if err != nil {
return nil, fmt.Errorf("error parsing sockstat key/value pairs from %q: %w", s.Text(), err)
return nil, fmt.Errorf("%s: sockstat key/value pairs from %q: %w", ErrFileParse, s.Text(), err)
}
// The first field is the protocol. We must trim its colon suffix.
@ -119,7 +118,7 @@ func parseSockstat(r io.Reader) (*NetSockstat, error) {
// parseSockstatKVs parses a string slice into a map of key/value pairs.
func parseSockstatKVs(kvs []string) (map[string]int, error) {
if len(kvs)%2 != 0 {
return nil, errors.New("odd number of fields in key/value pairs")
return nil, fmt.Errorf("%w:: Odd number of fields in key/value pairs %q", ErrFileParse, kvs)
}
// Iterate two values at a time to gather key/value pairs.

View File

@ -64,7 +64,7 @@ func (fs FS) NetSoftnetStat() ([]SoftnetStat, error) {
entries, err := parseSoftnet(bytes.NewReader(b))
if err != nil {
return nil, fmt.Errorf("failed to parse /proc/net/softnet_stat: %w", err)
return nil, fmt.Errorf("%s: /proc/net/softnet_stat: %w", ErrFileParse, err)
}
return entries, nil
@ -83,7 +83,7 @@ func parseSoftnet(r io.Reader) ([]SoftnetStat, error) {
softnetStat := SoftnetStat{}
if width < minColumns {
return nil, fmt.Errorf("%d columns were detected, but at least %d were expected", width, minColumns)
return nil, fmt.Errorf("%w: detected %d columns, but expected at least %d", ErrFileParse, width, minColumns)
}
// Linux 2.6.23 https://elixir.bootlin.com/linux/v2.6.23/source/net/core/dev.c#L2347

View File

@ -108,14 +108,14 @@ func parseNetUNIX(r io.Reader) (*NetUNIX, error) {
line := s.Text()
item, err := nu.parseLine(line, hasInode, minFields)
if err != nil {
return nil, fmt.Errorf("failed to parse /proc/net/unix data %q: %w", line, err)
return nil, fmt.Errorf("%s: /proc/net/unix encountered data %q: %w", ErrFileParse, line, err)
}
nu.Rows = append(nu.Rows, item)
}
if err := s.Err(); err != nil {
return nil, fmt.Errorf("failed to scan /proc/net/unix data: %w", err)
return nil, fmt.Errorf("%s: /proc/net/unix encountered data: %w", ErrFileParse, err)
}
return &nu, nil
@ -126,7 +126,7 @@ func (u *NetUNIX) parseLine(line string, hasInode bool, min int) (*NetUNIXLine,
l := len(fields)
if l < min {
return nil, fmt.Errorf("expected at least %d fields but got %d", min, l)
return nil, fmt.Errorf("%w: expected at least %d fields but got %d", ErrFileParse, min, l)
}
// Field offsets are as follows:
@ -136,29 +136,29 @@ func (u *NetUNIX) parseLine(line string, hasInode bool, min int) (*NetUNIXLine,
users, err := u.parseUsers(fields[1])
if err != nil {
return nil, fmt.Errorf("failed to parse ref count %q: %w", fields[1], err)
return nil, fmt.Errorf("%s: ref count %q: %w", ErrFileParse, fields[1], err)
}
flags, err := u.parseFlags(fields[3])
if err != nil {
return nil, fmt.Errorf("failed to parse flags %q: %w", fields[3], err)
return nil, fmt.Errorf("%s: Unable to parse flags %q: %w", ErrFileParse, fields[3], err)
}
typ, err := u.parseType(fields[4])
if err != nil {
return nil, fmt.Errorf("failed to parse type %q: %w", fields[4], err)
return nil, fmt.Errorf("%s: Failed to parse type %q: %w", ErrFileParse, fields[4], err)
}
state, err := u.parseState(fields[5])
if err != nil {
return nil, fmt.Errorf("failed to parse state %q: %w", fields[5], err)
return nil, fmt.Errorf("%s: Failed to parse state %q: %w", ErrFileParse, fields[5], err)
}
var inode uint64
if hasInode {
inode, err = u.parseInode(fields[6])
if err != nil {
return nil, fmt.Errorf("failed to parse inode %q: %w", fields[6], err)
return nil, fmt.Errorf("%s failed to parse inode %q: %w", ErrFileParse, fields[6], err)
}
}

View File

@ -68,7 +68,7 @@ func (fs FS) Wireless() ([]*Wireless, error) {
m, err := parseWireless(bytes.NewReader(b))
if err != nil {
return nil, fmt.Errorf("failed to parse wireless: %w", err)
return nil, fmt.Errorf("%s: wireless: %w", ErrFileParse, err)
}
return m, nil
@ -97,64 +97,64 @@ func parseWireless(r io.Reader) ([]*Wireless, error) {
parts := strings.Split(line, ":")
if len(parts) != 2 {
return nil, fmt.Errorf("expected 2 parts after splitting line by ':', got %d for line %q", len(parts), line)
return nil, fmt.Errorf("%w: expected 2 parts after splitting line by ':', got %d for line %q", ErrFileParse, len(parts), line)
}
name := strings.TrimSpace(parts[0])
stats := strings.Fields(parts[1])
if len(stats) < 10 {
return nil, fmt.Errorf("invalid number of fields in line %d, expected at least 10, got %d: %q", n, len(stats), line)
return nil, fmt.Errorf("%w: invalid number of fields in line %d, expected 10+, got %d: %q", ErrFileParse, n, len(stats), line)
}
status, err := strconv.ParseUint(stats[0], 16, 16)
if err != nil {
return nil, fmt.Errorf("invalid status in line %d: %q", n, line)
return nil, fmt.Errorf("%w: invalid status in line %d: %q", ErrFileParse, n, line)
}
qlink, err := strconv.Atoi(strings.TrimSuffix(stats[1], "."))
if err != nil {
return nil, fmt.Errorf("failed to parse Quality:link as integer %q: %w", qlink, err)
return nil, fmt.Errorf("%s: parse Quality:link as integer %q: %w", ErrFileParse, qlink, err)
}
qlevel, err := strconv.Atoi(strings.TrimSuffix(stats[2], "."))
if err != nil {
return nil, fmt.Errorf("failed to parse Quality:level as integer %q: %w", qlevel, err)
return nil, fmt.Errorf("%s: Quality:level as integer %q: %w", ErrFileParse, qlevel, err)
}
qnoise, err := strconv.Atoi(strings.TrimSuffix(stats[3], "."))
if err != nil {
return nil, fmt.Errorf("failed to parse Quality:noise as integer %q: %w", qnoise, err)
return nil, fmt.Errorf("%s: Quality:noise as integer %q: %w", ErrFileParse, qnoise, err)
}
dnwid, err := strconv.Atoi(stats[4])
if err != nil {
return nil, fmt.Errorf("failed to parse Discarded:nwid as integer %q: %w", dnwid, err)
return nil, fmt.Errorf("%s: Discarded:nwid as integer %q: %w", ErrFileParse, dnwid, err)
}
dcrypt, err := strconv.Atoi(stats[5])
if err != nil {
return nil, fmt.Errorf("failed to parse Discarded:crypt as integer %q: %w", dcrypt, err)
return nil, fmt.Errorf("%s: Discarded:crypt as integer %q: %w", ErrFileParse, dcrypt, err)
}
dfrag, err := strconv.Atoi(stats[6])
if err != nil {
return nil, fmt.Errorf("failed to parse Discarded:frag as integer %q: %w", dfrag, err)
return nil, fmt.Errorf("%s: Discarded:frag as integer %q: %w", ErrFileParse, dfrag, err)
}
dretry, err := strconv.Atoi(stats[7])
if err != nil {
return nil, fmt.Errorf("failed to parse Discarded:retry as integer %q: %w", dretry, err)
return nil, fmt.Errorf("%s: Discarded:retry as integer %q: %w", ErrFileParse, dretry, err)
}
dmisc, err := strconv.Atoi(stats[8])
if err != nil {
return nil, fmt.Errorf("failed to parse Discarded:misc as integer %q: %w", dmisc, err)
return nil, fmt.Errorf("%s: Discarded:misc as integer %q: %w", ErrFileParse, dmisc, err)
}
mbeacon, err := strconv.Atoi(stats[9])
if err != nil {
return nil, fmt.Errorf("failed to parse Missed:beacon as integer %q: %w", mbeacon, err)
return nil, fmt.Errorf("%s: Missed:beacon as integer %q: %w", ErrFileParse, mbeacon, err)
}
w := &Wireless{
@ -175,7 +175,7 @@ func parseWireless(r io.Reader) ([]*Wireless, error) {
}
if err := scanner.Err(); err != nil {
return nil, fmt.Errorf("failed to scan /proc/net/wireless: %w", err)
return nil, fmt.Errorf("%s: Failed to scan /proc/net/wireless: %w", ErrFileRead, err)
}
return interfaces, nil

View File

@ -115,7 +115,7 @@ func (fs FS) NewXfrmStat() (XfrmStat, error) {
fields := strings.Fields(s.Text())
if len(fields) != 2 {
return XfrmStat{}, fmt.Errorf("couldn't parse %q line %q", file.Name(), s.Text())
return XfrmStat{}, fmt.Errorf("%w: %q line %q", ErrFileParse, file.Name(), s.Text())
}
name := fields[0]

View File

@ -15,6 +15,7 @@ package procfs
import (
"bytes"
"errors"
"fmt"
"io"
"os"
@ -35,6 +36,12 @@ type Proc struct {
// Procs represents a list of Proc structs.
type Procs []Proc
var (
ErrFileParse = errors.New("Error Parsing File")
ErrFileRead = errors.New("Error Reading File")
ErrMountPoint = errors.New("Error Accessing Mount point")
)
func (p Procs) Len() int { return len(p) }
func (p Procs) Swap(i, j int) { p[i], p[j] = p[j], p[i] }
func (p Procs) Less(i, j int) bool { return p[i].PID < p[j].PID }
@ -42,7 +49,7 @@ func (p Procs) Less(i, j int) bool { return p[i].PID < p[j].PID }
// Self returns a process for the current process read via /proc/self.
func Self() (Proc, error) {
fs, err := NewFS(DefaultMountPoint)
if err != nil {
if err != nil || errors.Unwrap(err) == ErrMountPoint {
return Proc{}, err
}
return fs.Self()
@ -104,7 +111,7 @@ func (fs FS) AllProcs() (Procs, error) {
names, err := d.Readdirnames(-1)
if err != nil {
return Procs{}, fmt.Errorf("could not read %q: %w", d.Name(), err)
return Procs{}, fmt.Errorf("%s: Cannot read file: %v: %w", ErrFileRead, names, err)
}
p := Procs{}
@ -205,7 +212,7 @@ func (p Proc) FileDescriptors() ([]uintptr, error) {
for i, n := range names {
fd, err := strconv.ParseInt(n, 10, 32)
if err != nil {
return nil, fmt.Errorf("could not parse fd %q: %w", n, err)
return nil, fmt.Errorf("%s: Cannot parse line: %v: %w", ErrFileParse, i, err)
}
fds[i] = uintptr(fd)
}
@ -237,7 +244,7 @@ func (p Proc) FileDescriptorTargets() ([]string, error) {
// a process.
func (p Proc) FileDescriptorsLen() (int, error) {
// Use fast path if available (Linux v6.2): https://github.com/torvalds/linux/commit/f1f1f2569901
if p.fs.real {
if p.fs.isReal {
stat, err := os.Stat(p.path("fd"))
if err != nil {
return 0, err
@ -290,7 +297,7 @@ func (p Proc) fileDescriptors() ([]string, error) {
names, err := d.Readdirnames(-1)
if err != nil {
return nil, fmt.Errorf("could not read %q: %w", d.Name(), err)
return nil, fmt.Errorf("%s: Cannot read file: %v: %w", ErrFileRead, names, err)
}
return names, nil

View File

@ -51,7 +51,7 @@ func parseCgroupString(cgroupStr string) (*Cgroup, error) {
fields := strings.SplitN(cgroupStr, ":", 3)
if len(fields) < 3 {
return nil, fmt.Errorf("at least 3 fields required, found %d fields in cgroup string: %s", len(fields), cgroupStr)
return nil, fmt.Errorf("%w: 3+ fields required, found %d fields in cgroup string: %s", ErrFileParse, len(fields), cgroupStr)
}
cgroup := &Cgroup{
@ -60,7 +60,7 @@ func parseCgroupString(cgroupStr string) (*Cgroup, error) {
}
cgroup.HierarchyID, err = strconv.Atoi(fields[0])
if err != nil {
return nil, fmt.Errorf("failed to parse hierarchy ID")
return nil, fmt.Errorf("%w: hierarchy ID: %q", ErrFileParse, cgroup.HierarchyID)
}
if fields[1] != "" {
ssNames := strings.Split(fields[1], ",")

View File

@ -46,7 +46,7 @@ func parseCgroupSummaryString(CgroupSummaryStr string) (*CgroupSummary, error) {
fields := strings.Fields(CgroupSummaryStr)
// require at least 4 fields
if len(fields) < 4 {
return nil, fmt.Errorf("at least 4 fields required, found %d fields in cgroup info string: %s", len(fields), CgroupSummaryStr)
return nil, fmt.Errorf("%w: 4+ fields required, found %d fields in cgroup info string: %s", ErrFileParse, len(fields), CgroupSummaryStr)
}
CgroupSummary := &CgroupSummary{
@ -54,15 +54,15 @@ func parseCgroupSummaryString(CgroupSummaryStr string) (*CgroupSummary, error) {
}
CgroupSummary.Hierarchy, err = strconv.Atoi(fields[1])
if err != nil {
return nil, fmt.Errorf("failed to parse hierarchy ID")
return nil, fmt.Errorf("%w: Unable to parse hierarchy ID from %q", ErrFileParse, fields[1])
}
CgroupSummary.Cgroups, err = strconv.Atoi(fields[2])
if err != nil {
return nil, fmt.Errorf("failed to parse Cgroup Num")
return nil, fmt.Errorf("%w: Unable to parse Cgroup Num from %q", ErrFileParse, fields[2])
}
CgroupSummary.Enabled, err = strconv.Atoi(fields[3])
if err != nil {
return nil, fmt.Errorf("failed to parse Enabled")
return nil, fmt.Errorf("%w: Unable to parse Enabled from %q", ErrFileParse, fields[3])
}
return CgroupSummary, nil
}

View File

@ -26,6 +26,7 @@ var (
rPos = regexp.MustCompile(`^pos:\s+(\d+)$`)
rFlags = regexp.MustCompile(`^flags:\s+(\d+)$`)
rMntID = regexp.MustCompile(`^mnt_id:\s+(\d+)$`)
rIno = regexp.MustCompile(`^ino:\s+(\d+)$`)
rInotify = regexp.MustCompile(`^inotify`)
rInotifyParts = regexp.MustCompile(`^inotify\s+wd:([0-9a-f]+)\s+ino:([0-9a-f]+)\s+sdev:([0-9a-f]+)(?:\s+mask:([0-9a-f]+))?`)
)
@ -40,6 +41,8 @@ type ProcFDInfo struct {
Flags string
// Mount point ID
MntID string
// Inode number
Ino string
// List of inotify lines (structured) in the fdinfo file (kernel 3.8+ only)
InotifyInfos []InotifyInfo
}
@ -51,7 +54,7 @@ func (p Proc) FDInfo(fd string) (*ProcFDInfo, error) {
return nil, err
}
var text, pos, flags, mntid string
var text, pos, flags, mntid, ino string
var inotify []InotifyInfo
scanner := bufio.NewScanner(bytes.NewReader(data))
@ -63,6 +66,8 @@ func (p Proc) FDInfo(fd string) (*ProcFDInfo, error) {
flags = rFlags.FindStringSubmatch(text)[1]
} else if rMntID.MatchString(text) {
mntid = rMntID.FindStringSubmatch(text)[1]
} else if rIno.MatchString(text) {
ino = rIno.FindStringSubmatch(text)[1]
} else if rInotify.MatchString(text) {
newInotify, err := parseInotifyInfo(text)
if err != nil {
@ -77,6 +82,7 @@ func (p Proc) FDInfo(fd string) (*ProcFDInfo, error) {
Pos: pos,
Flags: flags,
MntID: mntid,
Ino: ino,
InotifyInfos: inotify,
}
@ -111,7 +117,7 @@ func parseInotifyInfo(line string) (*InotifyInfo, error) {
}
return i, nil
}
return nil, fmt.Errorf("invalid inode entry: %q", line)
return nil, fmt.Errorf("%w: invalid inode entry: %q", ErrFileParse, line)
}
// ProcFDInfos represents a list of ProcFDInfo structs.

View File

@ -66,7 +66,7 @@ func parseInterrupts(r io.Reader) (Interrupts, error) {
continue
}
if len(parts) < 2 {
return nil, fmt.Errorf("not enough fields in interrupts (expected at least 2 fields but got %d): %s", len(parts), parts)
return nil, fmt.Errorf("%w: Not enough fields in interrupts (expected 2+ fields but got %d): %s", ErrFileParse, len(parts), parts)
}
intName := parts[0][:len(parts[0])-1] // remove trailing :

View File

@ -103,7 +103,7 @@ func (p Proc) Limits() (ProcLimits, error) {
//fields := limitsMatch.Split(s.Text(), limitsFields)
fields := limitsMatch.FindStringSubmatch(s.Text())
if len(fields) != limitsFields {
return ProcLimits{}, fmt.Errorf("couldn't parse %q line %q", f.Name(), s.Text())
return ProcLimits{}, fmt.Errorf("%w: couldn't parse %q line %q", ErrFileParse, f.Name(), s.Text())
}
switch fields[1] {
@ -154,7 +154,7 @@ func parseUint(s string) (uint64, error) {
}
i, err := strconv.ParseUint(s, 10, 64)
if err != nil {
return 0, fmt.Errorf("couldn't parse value %q: %w", s, err)
return 0, fmt.Errorf("%s: couldn't parse value %q: %w", ErrFileParse, s, err)
}
return i, nil
}

View File

@ -63,17 +63,17 @@ type ProcMap struct {
// parseDevice parses the device token of a line and converts it to a dev_t
// (mkdev) like structure.
func parseDevice(s string) (uint64, error) {
toks := strings.Split(s, ":")
if len(toks) < 2 {
return 0, fmt.Errorf("unexpected number of fields")
i := strings.Index(s, ":")
if i == -1 {
return 0, fmt.Errorf("%w: expected separator `:` in %s", ErrFileParse, s)
}
major, err := strconv.ParseUint(toks[0], 16, 0)
major, err := strconv.ParseUint(s[0:i], 16, 0)
if err != nil {
return 0, err
}
minor, err := strconv.ParseUint(toks[1], 16, 0)
minor, err := strconv.ParseUint(s[i+1:], 16, 0)
if err != nil {
return 0, err
}
@ -93,17 +93,17 @@ func parseAddress(s string) (uintptr, error) {
// parseAddresses parses the start-end address.
func parseAddresses(s string) (uintptr, uintptr, error) {
toks := strings.Split(s, "-")
if len(toks) < 2 {
return 0, 0, fmt.Errorf("invalid address")
idx := strings.Index(s, "-")
if idx == -1 {
return 0, 0, fmt.Errorf("%w: expected separator `-` in %s", ErrFileParse, s)
}
saddr, err := parseAddress(toks[0])
saddr, err := parseAddress(s[0:idx])
if err != nil {
return 0, 0, err
}
eaddr, err := parseAddress(toks[1])
eaddr, err := parseAddress(s[idx+1:])
if err != nil {
return 0, 0, err
}
@ -114,7 +114,7 @@ func parseAddresses(s string) (uintptr, uintptr, error) {
// parsePermissions parses a token and returns any that are set.
func parsePermissions(s string) (*ProcMapPermissions, error) {
if len(s) < 4 {
return nil, fmt.Errorf("invalid permissions token")
return nil, fmt.Errorf("%w: invalid permissions token", ErrFileParse)
}
perms := ProcMapPermissions{}
@ -141,7 +141,7 @@ func parsePermissions(s string) (*ProcMapPermissions, error) {
func parseProcMap(text string) (*ProcMap, error) {
fields := strings.Fields(text)
if len(fields) < 5 {
return nil, fmt.Errorf("truncated procmap entry")
return nil, fmt.Errorf("%w: truncated procmap entry", ErrFileParse)
}
saddr, eaddr, err := parseAddresses(fields[0])

View File

@ -195,8 +195,8 @@ func parseProcNetstat(r io.Reader, fileName string) (ProcNetstat, error) {
// Remove trailing :.
protocol := strings.TrimSuffix(nameParts[0], ":")
if len(nameParts) != len(valueParts) {
return procNetstat, fmt.Errorf("mismatch field count mismatch in %s: %s",
fileName, protocol)
return procNetstat, fmt.Errorf("%w: mismatch field count mismatch in %s: %s",
ErrFileParse, fileName, protocol)
}
for i := 1; i < len(nameParts); i++ {
value, err := strconv.ParseFloat(valueParts[i], 64)

View File

@ -40,7 +40,7 @@ func (p Proc) Namespaces() (Namespaces, error) {
names, err := d.Readdirnames(-1)
if err != nil {
return nil, fmt.Errorf("failed to read contents of ns dir: %w", err)
return nil, fmt.Errorf("%s: failed to read contents of ns dir: %w", ErrFileRead, err)
}
ns := make(Namespaces, len(names))
@ -52,13 +52,13 @@ func (p Proc) Namespaces() (Namespaces, error) {
fields := strings.SplitN(target, ":", 2)
if len(fields) != 2 {
return nil, fmt.Errorf("failed to parse namespace type and inode from %q", target)
return nil, fmt.Errorf("%w: namespace type and inode from %q", ErrFileParse, target)
}
typ := fields[0]
inode, err := strconv.ParseUint(strings.Trim(fields[1], "[]"), 10, 32)
if err != nil {
return nil, fmt.Errorf("failed to parse inode from %q: %w", fields[1], err)
return nil, fmt.Errorf("%s: inode from %q: %w", ErrFileParse, fields[1], err)
}
ns[name] = Namespace{typ, uint32(inode)}

View File

@ -61,14 +61,14 @@ type PSIStats struct {
func (fs FS) PSIStatsForResource(resource string) (PSIStats, error) {
data, err := util.ReadFileNoStat(fs.proc.Path(fmt.Sprintf("%s/%s", "pressure", resource)))
if err != nil {
return PSIStats{}, fmt.Errorf("psi_stats: unavailable for %q: %w", resource, err)
return PSIStats{}, fmt.Errorf("%s: psi_stats: unavailable for %q: %w", ErrFileRead, resource, err)
}
return parsePSIStats(resource, bytes.NewReader(data))
return parsePSIStats(bytes.NewReader(data))
}
// parsePSIStats parses the specified file for pressure stall information.
func parsePSIStats(resource string, r io.Reader) (PSIStats, error) {
func parsePSIStats(r io.Reader) (PSIStats, error) {
psiStats := PSIStats{}
scanner := bufio.NewScanner(r)

View File

@ -135,12 +135,12 @@ func (s *ProcSMapsRollup) parseLine(line string) error {
}
vBytes := vKBytes * 1024
s.addValue(k, v, vKBytes, vBytes)
s.addValue(k, vBytes)
return nil
}
func (s *ProcSMapsRollup) addValue(k string, vString string, vUint uint64, vUintBytes uint64) {
func (s *ProcSMapsRollup) addValue(k string, vUintBytes uint64) {
switch k {
case "Rss":
s.Rss += vUintBytes

View File

@ -159,8 +159,8 @@ func parseSnmp(r io.Reader, fileName string) (ProcSnmp, error) {
// Remove trailing :.
protocol := strings.TrimSuffix(nameParts[0], ":")
if len(nameParts) != len(valueParts) {
return procSnmp, fmt.Errorf("mismatch field count mismatch in %s: %s",
fileName, protocol)
return procSnmp, fmt.Errorf("%w: mismatch field count mismatch in %s: %s",
ErrFileParse, fileName, protocol)
}
for i := 1; i < len(nameParts); i++ {
value, err := strconv.ParseFloat(valueParts[i], 64)

View File

@ -138,7 +138,7 @@ func (p Proc) Stat() (ProcStat, error) {
)
if l < 0 || r < 0 {
return ProcStat{}, fmt.Errorf("unexpected format, couldn't extract comm %q", data)
return ProcStat{}, fmt.Errorf("%w: unexpected format, couldn't extract comm %q", ErrFileParse, data)
}
s.Comm = string(data[l+1 : r])

View File

@ -23,7 +23,7 @@ import (
)
// ProcStatus provides status information about the process,
// read from /proc/[pid]/stat.
// read from /proc/[pid]/status.
type ProcStatus struct {
// The process ID.
PID int
@ -32,6 +32,8 @@ type ProcStatus struct {
// Thread group ID.
TGID int
// List of Pid namespace.
NSpids []uint64
// Peak virtual memory size.
VmPeak uint64 // nolint:revive
@ -127,6 +129,8 @@ func (s *ProcStatus) fillStatus(k string, vString string, vUint uint64, vUintByt
copy(s.UIDs[:], strings.Split(vString, "\t"))
case "Gid":
copy(s.GIDs[:], strings.Split(vString, "\t"))
case "NSpid":
s.NSpids = calcNSPidsList(vString)
case "VmPeak":
s.VmPeak = vUintBytes
case "VmSize":
@ -200,3 +204,18 @@ func calcCpusAllowedList(cpuString string) []uint64 {
sort.Slice(g, func(i, j int) bool { return g[i] < g[j] })
return g
}
func calcNSPidsList(nspidsString string) []uint64 {
s := strings.Split(nspidsString, " ")
var nspids []uint64
for _, nspid := range s {
nspid, _ := strconv.ParseUint(nspid, 10, 64)
if nspid == 0 {
continue
}
nspids = append(nspids, nspid)
}
return nspids
}

View File

@ -44,7 +44,7 @@ func (fs FS) SysctlInts(sysctl string) ([]int, error) {
vp := util.NewValueParser(f)
values[i] = vp.Int()
if err := vp.Err(); err != nil {
return nil, fmt.Errorf("field %d in sysctl %s is not a valid int: %w", i, sysctl, err)
return nil, fmt.Errorf("%s: field %d in sysctl %s is not a valid int: %w", ErrFileParse, i, sysctl, err)
}
}
return values, nil

View File

@ -68,7 +68,7 @@ func parseV21SlabEntry(line string) (*Slab, error) {
l := slabSpace.ReplaceAllString(line, " ")
s := strings.Split(l, " ")
if len(s) != 16 {
return nil, fmt.Errorf("unable to parse: %q", line)
return nil, fmt.Errorf("%w: unable to parse: %q", ErrFileParse, line)
}
var err error
i := &Slab{Name: s[0]}

Some files were not shown because too many files have changed in this diff Show More