vendor: github.com/docker/cli v28.0.4

This removes Notary / Docker Content Trust related (indirect)
dependencies;

Before:

    ls -l bin/build/
    total 131200
    -rwxr-xr-x  1 thajeztah  staff  67039266 Mar 21 09:20 buildx*

    ls -lh bin/build/
    total 131200
    -rwxr-xr-x  1 thajeztah  staff    64M Mar 21 09:20 buildx*

After:

    ls -l bin/build/
    total 127288
    -rwxr-xr-x  1 thajeztah  staff  65168450 Mar 21 09:22 buildx*

    ls -lh bin/build/
    total 127288
    -rwxr-xr-x  1 thajeztah  staff    62M Mar 21 09:22 buildx*

Difference: `67039266 - 65168450 = 1870816` (1.87 MB)

full diff: https://github.com/docker/cli/compare/v28.0.2...v28.0.4

Signed-off-by: Sebastiaan van Stijn <github@gone.nl>
This commit is contained in:
Sebastiaan van Stijn 2025-03-01 02:05:39 +01:00
parent 86e4e77ac1
commit d25e260d2e
No known key found for this signature in database
GPG Key ID: 76698F39D527CE8C
146 changed files with 8 additions and 27391 deletions

7
go.mod
View File

@ -17,7 +17,7 @@ require (
github.com/creack/pty v1.1.24 github.com/creack/pty v1.1.24
github.com/davecgh/go-spew v1.1.1 github.com/davecgh/go-spew v1.1.1
github.com/distribution/reference v0.6.0 github.com/distribution/reference v0.6.0
github.com/docker/cli v28.0.2+incompatible github.com/docker/cli v28.0.4+incompatible
github.com/docker/cli-docs-tool v0.9.0 github.com/docker/cli-docs-tool v0.9.0
github.com/docker/docker v28.0.4+incompatible github.com/docker/docker v28.0.4+incompatible
github.com/docker/go-units v0.5.0 github.com/docker/go-units v0.5.0
@ -71,7 +71,6 @@ require (
require ( require (
github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6 // indirect github.com/AdaLogics/go-fuzz-headers v0.0.0-20240806141605-e8a1dd7889d6 // indirect
github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c // indirect github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c // indirect
github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d // indirect
github.com/agext/levenshtein v1.2.3 // indirect github.com/agext/levenshtein v1.2.3 // indirect
github.com/apparentlymart/go-cidr v1.0.1 // indirect github.com/apparentlymart/go-cidr v1.0.1 // indirect
github.com/apparentlymart/go-textseg/v15 v15.0.0 // indirect github.com/apparentlymart/go-textseg/v15 v15.0.0 // indirect
@ -96,9 +95,9 @@ require (
github.com/cpuguy83/go-md2man/v2 v2.0.6 // indirect github.com/cpuguy83/go-md2man/v2 v2.0.6 // indirect
github.com/docker/distribution v2.8.3+incompatible // indirect github.com/docker/distribution v2.8.3+incompatible // indirect
github.com/docker/docker-credential-helpers v0.9.3 // indirect github.com/docker/docker-credential-helpers v0.9.3 // indirect
github.com/docker/go v1.5.1-1.0.20160303222718-d30aec9fd63c // indirect
github.com/docker/go-connections v0.5.0 // indirect github.com/docker/go-connections v0.5.0 // indirect
github.com/docker/go-metrics v0.0.1 // indirect github.com/docker/go-metrics v0.0.1 // indirect
github.com/docker/libtrust v0.0.0-20160708172513-aabc10ec26b7 // indirect
github.com/emicklei/go-restful/v3 v3.11.0 // indirect github.com/emicklei/go-restful/v3 v3.11.0 // indirect
github.com/felixge/httpsnoop v1.0.4 // indirect github.com/felixge/httpsnoop v1.0.4 // indirect
github.com/fvbommel/sortorder v1.0.1 // indirect github.com/fvbommel/sortorder v1.0.1 // indirect
@ -127,9 +126,7 @@ require (
github.com/mailru/easyjson v0.7.7 // indirect github.com/mailru/easyjson v0.7.7 // indirect
github.com/mattn/go-runewidth v0.0.15 // indirect github.com/mattn/go-runewidth v0.0.15 // indirect
github.com/mattn/go-shellwords v1.0.12 // indirect github.com/mattn/go-shellwords v1.0.12 // indirect
github.com/miekg/pkcs11 v1.1.1 // indirect
github.com/mitchellh/go-wordwrap v0.0.0-20150314170334-ad45545899c7 // indirect github.com/mitchellh/go-wordwrap v0.0.0-20150314170334-ad45545899c7 // indirect
github.com/mitchellh/mapstructure v1.5.0 // indirect
github.com/moby/docker-image-spec v1.3.1 // indirect github.com/moby/docker-image-spec v1.3.1 // indirect
github.com/moby/locker v1.0.1 // indirect github.com/moby/locker v1.0.1 // indirect
github.com/moby/patternmatcher v0.6.0 // indirect github.com/moby/patternmatcher v0.6.0 // indirect

25
go.sum
View File

@ -4,7 +4,6 @@ github.com/AdamKorcz/go-118-fuzz-build v0.0.0-20231105174938-2b5cbb29f3e2 h1:dIS
github.com/AdamKorcz/go-118-fuzz-build v0.0.0-20231105174938-2b5cbb29f3e2/go.mod h1:gCLVsLfv1egrcZu+GoJATN5ts75F2s62ih/457eWzOw= github.com/AdamKorcz/go-118-fuzz-build v0.0.0-20231105174938-2b5cbb29f3e2/go.mod h1:gCLVsLfv1egrcZu+GoJATN5ts75F2s62ih/457eWzOw=
github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c h1:udKWzYgxTojEKWjV8V+WSxDXJ4NFATAsZjh8iIbsQIg= github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c h1:udKWzYgxTojEKWjV8V+WSxDXJ4NFATAsZjh8iIbsQIg=
github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E= github.com/Azure/go-ansiterm v0.0.0-20250102033503-faa5f7b0171c/go.mod h1:xomTg63KZ2rFqZQzSB4Vz2SUXa1BpHTVz9L5PTmPC4E=
github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/Masterminds/semver/v3 v3.2.1 h1:RN9w6+7QoMeJVGyfmbcgs28Br8cvmnucEXnY0rYXWg0= github.com/Masterminds/semver/v3 v3.2.1 h1:RN9w6+7QoMeJVGyfmbcgs28Br8cvmnucEXnY0rYXWg0=
github.com/Masterminds/semver/v3 v3.2.1/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ= github.com/Masterminds/semver/v3 v3.2.1/go.mod h1:qvl/7zhW3nngYb5+80sSMF+FG2BjYrf8m9wsX0PNOMQ=
@ -13,8 +12,6 @@ github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA
github.com/Microsoft/hcsshim v0.12.9 h1:2zJy5KA+l0loz1HzEGqyNnjd3fyZA31ZBCGKacp6lLg= github.com/Microsoft/hcsshim v0.12.9 h1:2zJy5KA+l0loz1HzEGqyNnjd3fyZA31ZBCGKacp6lLg=
github.com/Microsoft/hcsshim v0.12.9/go.mod h1:fJ0gkFAna6ukt0bLdKB8djt4XIJhF/vEPuoIWYVvZ8Y= github.com/Microsoft/hcsshim v0.12.9/go.mod h1:fJ0gkFAna6ukt0bLdKB8djt4XIJhF/vEPuoIWYVvZ8Y=
github.com/Shopify/logrus-bugsnag v0.0.0-20170309145241-6dbc35f2c30d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ= github.com/Shopify/logrus-bugsnag v0.0.0-20170309145241-6dbc35f2c30d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ=
github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d h1:UrqY+r/OJnIp5u0s1SbQ8dVfLCZJsnvazdBP5hS4iRs=
github.com/Shopify/logrus-bugsnag v0.0.0-20171204204709-577dee27f20d/go.mod h1:HI8ITrYtUY+O+ZhtlqUnD8+KwNPOyugEhfP9fdUIaEQ=
github.com/agext/levenshtein v1.2.3 h1:YB2fHEn0UJagG8T1rrWknE3ZQzWM06O8AMAatNn7lmo= github.com/agext/levenshtein v1.2.3 h1:YB2fHEn0UJagG8T1rrWknE3ZQzWM06O8AMAatNn7lmo=
github.com/agext/levenshtein v1.2.3/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558= github.com/agext/levenshtein v1.2.3/go.mod h1:JEDfjyjHDjOF/1e4FlBE/PkbqA9OfWu2ki2W0IB5558=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
@ -63,17 +60,13 @@ github.com/bitly/go-hostpool v0.1.0/go.mod h1:4gOCgp6+NZnVqlKyZ/iBZFTAJKembaVENU
github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA= github.com/bitly/go-simplejson v0.5.0/go.mod h1:cXHtHw4XUPsvGaxgjIAn8PhEWG9NfngEKAMDJEczWVA=
github.com/bmatcuk/doublestar v1.1.5/go.mod h1:wiQtGV+rzVYxB7WIlirSN++5HPtPlXEo9MEoZQC/PmE= github.com/bmatcuk/doublestar v1.1.5/go.mod h1:wiQtGV+rzVYxB7WIlirSN++5HPtPlXEo9MEoZQC/PmE=
github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4= github.com/bmizerany/assert v0.0.0-20160611221934-b7ed37b82869/go.mod h1:Ekp36dRnpXw/yCqJaO+ZrUyxD+3VXMFFr56k5XYrpB4=
github.com/bugsnag/bugsnag-go v1.0.5-0.20150529004307-13fd6b8acda0 h1:s7+5BfS4WFJoVF9pnB8kBk03S7pZXRdKamnV0FOl5Sc=
github.com/bugsnag/bugsnag-go v1.0.5-0.20150529004307-13fd6b8acda0/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8= github.com/bugsnag/bugsnag-go v1.0.5-0.20150529004307-13fd6b8acda0/go.mod h1:2oa8nejYd4cQ/b0hMIopN0lCRxU0bueqREvZLWFrtK8=
github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b h1:otBG+dV+YK+Soembjv71DPz3uX/V/6MMlSyD9JBQ6kQ=
github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50= github.com/bugsnag/osext v0.0.0-20130617224835-0dd3f918b21b/go.mod h1:obH5gd0BsqsP2LwDJ9aOkm/6J86V6lyAXCoQWGw3K50=
github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0 h1:nvj0OLI3YqYXer/kZD8Ri1aaunCxIEsOst1BVJswV0o=
github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE= github.com/bugsnag/panicwrap v0.0.0-20151223152923-e2c28503fcd0/go.mod h1:D/8v3kj0zr8ZAKg1AQ6crr+5VwKN5eIywRkfhyM/+dE=
github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8= github.com/cenkalti/backoff/v4 v4.3.0 h1:MyRJ/UdXutAwSAT+s3wNd7MfTIcy71VQueUuFK343L8=
github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE= github.com/cenkalti/backoff/v4 v4.3.0/go.mod h1:Y3VNntkOUPxTVeUxJ/G5vcM//AlwfmyYozVcomhLiZE=
github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs= github.com/cespare/xxhash/v2 v2.3.0 h1:UL815xU9SqsFlibzuggzjXhog7bL6oX9BbNZnL2UFvs=
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs= github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cloudflare/cfssl v0.0.0-20180223231731-4e2dcbde5004 h1:lkAMpLVBDaj17e85keuznYcH5rqI438v41pKcBl4ZxQ=
github.com/cloudflare/cfssl v0.0.0-20180223231731-4e2dcbde5004/go.mod h1:yMWuSON2oQp+43nFtAV/uvKQIFpSPerB57DCt9t8sSA= github.com/cloudflare/cfssl v0.0.0-20180223231731-4e2dcbde5004/go.mod h1:yMWuSON2oQp+43nFtAV/uvKQIFpSPerB57DCt9t8sSA=
github.com/codahale/rfc6979 v0.0.0-20141003034818-6a90f24967eb h1:EDmT6Q9Zs+SbUoc7Ik9EfrFqcylYqgPZ9ANSbTAntnE= github.com/codahale/rfc6979 v0.0.0-20141003034818-6a90f24967eb h1:EDmT6Q9Zs+SbUoc7Ik9EfrFqcylYqgPZ9ANSbTAntnE=
github.com/codahale/rfc6979 v0.0.0-20141003034818-6a90f24967eb/go.mod h1:ZjrT6AXHbDs86ZSdt/osfBi5qfexBrKUdONk989Wnk4= github.com/codahale/rfc6979 v0.0.0-20141003034818-6a90f24967eb/go.mod h1:ZjrT6AXHbDs86ZSdt/osfBi5qfexBrKUdONk989Wnk4=
@ -121,8 +114,8 @@ github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSs
github.com/denisenkom/go-mssqldb v0.0.0-20191128021309-1d7a30a10f73/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU= github.com/denisenkom/go-mssqldb v0.0.0-20191128021309-1d7a30a10f73/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU=
github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk= github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk=
github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E= github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
github.com/docker/cli v28.0.2+incompatible h1:cRPZ77FK3/IXTAIQQj1vmhlxiLS5m+MIUDwS6f57lrE= github.com/docker/cli v28.0.4+incompatible h1:pBJSJeNd9QeIWPjRcV91RVJihd/TXB77q1ef64XEu4A=
github.com/docker/cli v28.0.2+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8= github.com/docker/cli v28.0.4+incompatible/go.mod h1:JLrzqnKDaYBop7H2jaqPtU4hHvMKP+vjCwu2uszcLI8=
github.com/docker/cli-docs-tool v0.9.0 h1:CVwQbE+ZziwlPqrJ7LRyUF6GvCA+6gj7MTCsayaK9t0= github.com/docker/cli-docs-tool v0.9.0 h1:CVwQbE+ZziwlPqrJ7LRyUF6GvCA+6gj7MTCsayaK9t0=
github.com/docker/cli-docs-tool v0.9.0/go.mod h1:ClrwlNW+UioiRyH9GiAOe1o3J/TsY3Tr1ipoypjAUtc= github.com/docker/cli-docs-tool v0.9.0/go.mod h1:ClrwlNW+UioiRyH9GiAOe1o3J/TsY3Tr1ipoypjAUtc=
github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w= github.com/docker/distribution v2.7.1+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
@ -170,7 +163,6 @@ github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En
github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
github.com/go-openapi/swag v0.22.4 h1:QLMzNJnMGPRNDCbySlcj1x01tzU8/9LTTL9hZZZogBU= github.com/go-openapi/swag v0.22.4 h1:QLMzNJnMGPRNDCbySlcj1x01tzU8/9LTTL9hZZZogBU=
github.com/go-openapi/swag v0.22.4/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= github.com/go-openapi/swag v0.22.4/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14=
github.com/go-sql-driver/mysql v1.3.0 h1:pgwjLi/dvffoP9aabwkT3AKpXQM93QARkjFhDDqC1UE=
github.com/go-sql-driver/mysql v1.3.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= github.com/go-sql-driver/mysql v1.3.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI=
@ -195,7 +187,6 @@ github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5y
github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= github.com/golang/protobuf v1.3.4/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw=
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek= github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps= github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
github.com/google/certificate-transparency-go v1.0.10-0.20180222191210-5ab67e519c93 h1:jc2UWq7CbdszqeH6qu1ougXMIUBfSy8Pbh/anURYbGI=
github.com/google/certificate-transparency-go v1.0.10-0.20180222191210-5ab67e519c93/go.mod h1:QeJfpSbVSfYc7RgB3gJFj9cbuQMMchQxrWXz8Ruopmg= github.com/google/certificate-transparency-go v1.0.10-0.20180222191210-5ab67e519c93/go.mod h1:QeJfpSbVSfYc7RgB3gJFj9cbuQMMchQxrWXz8Ruopmg=
github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I=
github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U=
@ -221,7 +212,6 @@ github.com/gorilla/websocket v1.5.0 h1:PPwGk2jz7EePpoHN/+ClbZu8SPxiqlu12wZP/3sWm
github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/gorilla/websocket v1.5.0/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 h1:asbCHRVmodnJTuQ3qamDwqVOIjwqUPTYmYuemVOx+Ys= github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0 h1:asbCHRVmodnJTuQ3qamDwqVOIjwqUPTYmYuemVOx+Ys=
github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0/go.mod h1:ggCgvZ2r7uOoQjOyu2Y1NhHmEPPzzuhWgcza5M1Ji1I= github.com/grpc-ecosystem/grpc-gateway/v2 v2.22.0/go.mod h1:ggCgvZ2r7uOoQjOyu2Y1NhHmEPPzzuhWgcza5M1Ji1I=
github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed h1:5upAirOpQc1Q53c0bnx2ufif5kANL7bfZWcc6VJWJd8=
github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4= github.com/hailocab/go-hostpool v0.0.0-20160125115350-e80d13ce29ed/go.mod h1:tMWxXQ9wFIaZeTI9F+hmhFiGpFmhOHzyShyFUhRm0H4=
github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4= github.com/hashicorp/errwrap v1.0.0/go.mod h1:YH+1FKiLXxHSkmPseP+kNlulaMuP3n2brvKWEqk/Jc4=
github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I= github.com/hashicorp/errwrap v1.1.0 h1:OxrOeh75EUXMY8TBjag2fzXGZ40LB6IKw45YeGUDY2I=
@ -242,9 +232,7 @@ github.com/in-toto/in-toto-golang v0.5.0/go.mod h1:/Rq0IZHLV7Ku5gielPT4wPHJfH1Gd
github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8= github.com/inconshreveable/mousetrap v1.0.0/go.mod h1:PxqpIevigyE2G7u3NXJIT2ANytuPF1OarO4DADm73n8=
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw= github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
github.com/jinzhu/gorm v0.0.0-20170222002820-5409931a1bb8 h1:CZkYfurY6KGhVtlalI4QwQ6T0Cu6iuY3e0x5RLu96WE=
github.com/jinzhu/gorm v0.0.0-20170222002820-5409931a1bb8/go.mod h1:Vla75njaFJ8clLU1W44h34PjIkijhjHIYnZxMqCdxqo= github.com/jinzhu/gorm v0.0.0-20170222002820-5409931a1bb8/go.mod h1:Vla75njaFJ8clLU1W44h34PjIkijhjHIYnZxMqCdxqo=
github.com/jinzhu/inflection v0.0.0-20170102125226-1c35d901db3d h1:jRQLvyVGL+iVtDElaEIDdKwpPqUIZJfzkNLV34htpEc=
github.com/jinzhu/inflection v0.0.0-20170102125226-1c35d901db3d/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc= github.com/jinzhu/inflection v0.0.0-20170102125226-1c35d901db3d/go.mod h1:h+uFLlag+Qp1Va5pdKtLDYj+kHp5pxUVkryuEj+Srlc=
github.com/jinzhu/now v1.1.1/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8= github.com/jinzhu/now v1.1.1/go.mod h1:d3SSVoowX0Lcu0IBviAWJpolVfI5UJVZZ7cO71lE/z8=
github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY=
@ -273,7 +261,6 @@ github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc= github.com/kylelemons/godebug v1.1.0 h1:RPNrshWIDI6G2gRW9EHilWtl7Z6Sb1BR0xunSBf0SNc=
github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw= github.com/kylelemons/godebug v1.1.0/go.mod h1:9/0rRGxNHcop5bhtWyNeEfOS8JIWk580+fNqagV/RAw=
github.com/lib/pq v0.0.0-20150723085316-0dad96c0b94f/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo= github.com/lib/pq v0.0.0-20150723085316-0dad96c0b94f/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/magiconair/properties v1.5.3 h1:C8fxWnhYyME3n0klPOhVM7PtYUB3eV1W3DeFmN3j53Y=
github.com/magiconair/properties v1.5.3/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ= github.com/magiconair/properties v1.5.3/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0=
github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc=
@ -292,8 +279,6 @@ github.com/mitchellh/go-wordwrap v0.0.0-20150314170334-ad45545899c7/go.mod h1:ZX
github.com/mitchellh/hashstructure/v2 v2.0.2 h1:vGKWl0YJqUNxE8d+h8f6NJLcCJrgbhC4NcD46KavDd4= github.com/mitchellh/hashstructure/v2 v2.0.2 h1:vGKWl0YJqUNxE8d+h8f6NJLcCJrgbhC4NcD46KavDd4=
github.com/mitchellh/hashstructure/v2 v2.0.2/go.mod h1:MG3aRVU/N29oo/V/IhBX8GR/zz4kQkprJgF2EVszyDE= github.com/mitchellh/hashstructure/v2 v2.0.2/go.mod h1:MG3aRVU/N29oo/V/IhBX8GR/zz4kQkprJgF2EVszyDE=
github.com/mitchellh/mapstructure v0.0.0-20150613213606-2caf8efc9366/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y= github.com/mitchellh/mapstructure v0.0.0-20150613213606-2caf8efc9366/go.mod h1:FVVH3fgwuzCH5S8UJGiWEs2h04kUh9fWfEaFds41c1Y=
github.com/mitchellh/mapstructure v1.5.0 h1:jeMsZIYE/09sWLaz43PL7Gy6RuMjD2eJVyuac5Z2hdY=
github.com/mitchellh/mapstructure v1.5.0/go.mod h1:bFUtVrKA4DC2yAKiSyO/QUcy7e+RRV2QTWOzhPopBRo=
github.com/moby/buildkit v0.20.2 h1:qIeR47eQ1tzI1rwz0on3Xx2enRw/1CKjFhoONVcTlMA= github.com/moby/buildkit v0.20.2 h1:qIeR47eQ1tzI1rwz0on3Xx2enRw/1CKjFhoONVcTlMA=
github.com/moby/buildkit v0.20.2/go.mod h1:DhaF82FjwOElTftl0JUAJpH/SUIUx4UvcFncLeOtlDI= github.com/moby/buildkit v0.20.2/go.mod h1:DhaF82FjwOElTftl0JUAJpH/SUIUx4UvcFncLeOtlDI=
github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0= github.com/moby/docker-image-spec v1.3.1 h1:jMKff3w6PgbfSa69GfNg+zN/XLhfXJGnEx3Nl2EsFP0=
@ -350,7 +335,6 @@ github.com/opencontainers/runtime-spec v1.2.0 h1:z97+pHb3uELt/yiAWD691HNHQIF07bE
github.com/opencontainers/runtime-spec v1.2.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0= github.com/opencontainers/runtime-spec v1.2.0/go.mod h1:jwyrGlmzljRJv/Fgzds9SsS/C5hL+LL3ko9hs6T5lQ0=
github.com/opencontainers/selinux v1.11.1 h1:nHFvthhM0qY8/m+vfhJylliSshm8G1jJ2jDMcgULaH8= github.com/opencontainers/selinux v1.11.1 h1:nHFvthhM0qY8/m+vfhJylliSshm8G1jJ2jDMcgULaH8=
github.com/opencontainers/selinux v1.11.1/go.mod h1:E5dMC3VPuVvVHDYmi78qvhJp8+M586T4DlDRYpFkyec= github.com/opencontainers/selinux v1.11.1/go.mod h1:E5dMC3VPuVvVHDYmi78qvhJp8+M586T4DlDRYpFkyec=
github.com/opentracing/opentracing-go v1.1.0 h1:pWlfV3Bxv7k65HYwkikxat0+s3pV4bsqf19k25Ur8rU=
github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o= github.com/opentracing/opentracing-go v1.1.0/go.mod h1:UkNAQd3GIcIGf0SeVgPpRdFStlNbqXla1AfSYxPUl2o=
github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8= github.com/pelletier/go-toml v1.9.5 h1:4yBQzkHv+7BHq2PQUZF3Mx0IYxG7LsP222s7Agd3ve8=
github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c= github.com/pelletier/go-toml v1.9.5/go.mod h1:u1nR/EPcESfeI/szUZKdtJ0xRNbUoANCkoOuaOx1Y+c=
@ -405,17 +389,14 @@ github.com/sirupsen/logrus v1.9.3 h1:dueUQJ1C2q9oE3F7wvmSGAaVtTmUizReu6fjN8uqzbQ
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ= github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
github.com/spdx/tools-golang v0.5.3 h1:ialnHeEYUC4+hkm5vJm4qz2x+oEJbS0mAMFrNXdQraY= github.com/spdx/tools-golang v0.5.3 h1:ialnHeEYUC4+hkm5vJm4qz2x+oEJbS0mAMFrNXdQraY=
github.com/spdx/tools-golang v0.5.3/go.mod h1:/ETOahiAo96Ob0/RAIBmFZw6XN0yTnyr/uFZm2NTMhI= github.com/spdx/tools-golang v0.5.3/go.mod h1:/ETOahiAo96Ob0/RAIBmFZw6XN0yTnyr/uFZm2NTMhI=
github.com/spf13/cast v0.0.0-20150508191742-4d07383ffe94 h1:JmfC365KywYwHB946TTiQWEb8kqPY+pybPLoGE9GgVk=
github.com/spf13/cast v0.0.0-20150508191742-4d07383ffe94/go.mod h1:r2rcYCSwa1IExKTDiTfzaxqT2FNHs8hODu4LnUfgKEg= github.com/spf13/cast v0.0.0-20150508191742-4d07383ffe94/go.mod h1:r2rcYCSwa1IExKTDiTfzaxqT2FNHs8hODu4LnUfgKEg=
github.com/spf13/cobra v0.0.1/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ= github.com/spf13/cobra v0.0.1/go.mod h1:1l0Ry5zgKvJasoi3XT1TypsSe7PqH0Sj9dhYf7v3XqQ=
github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo= github.com/spf13/cobra v1.9.1 h1:CXSaggrXdbHK9CF+8ywj8Amf7PBRmPCOJugH954Nnlo=
github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0= github.com/spf13/cobra v1.9.1/go.mod h1:nDyEzZ8ogv936Cinf6g1RU9MRY64Ir93oCnqb9wxYW0=
github.com/spf13/jwalterweatherman v0.0.0-20141219030609-3d60171a6431 h1:XTHrT015sxHyJ5FnQ0AeemSspZWaDq7DoTRW0EVsDCE=
github.com/spf13/jwalterweatherman v0.0.0-20141219030609-3d60171a6431/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo= github.com/spf13/jwalterweatherman v0.0.0-20141219030609-3d60171a6431/go.mod h1:cQK4TGJAtQXfYWX+Ddv3mKDzgVb68N+wFjFa4jdeBTo=
github.com/spf13/pflag v1.0.0/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4= github.com/spf13/pflag v1.0.0/go.mod h1:DYY7MBk1bdzusC3SYhjObp+wFpr4gzcvqqNjLnInEg4=
github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o= github.com/spf13/pflag v1.0.6 h1:jFzHGLGAlb3ruxLB8MhbI6A8+AQX/2eW4qeyNZXNp2o=
github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/spf13/viper v0.0.0-20150530192845-be5ff3e4840c h1:2EejZtjFjKJGk71ANb+wtFK5EjUzUkEM3R0xnp559xg=
github.com/spf13/viper v0.0.0-20150530192845-be5ff3e4840c/go.mod h1:A8kyI5cUJhb8N+3pkfONlcEcZbueH6nhAm0Fq7SrnBM= github.com/spf13/viper v0.0.0-20150530192845-be5ff3e4840c/go.mod h1:A8kyI5cUJhb8N+3pkfONlcEcZbueH6nhAm0Fq7SrnBM=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
@ -584,7 +565,6 @@ google.golang.org/protobuf v1.35.2 h1:8Ar7bF+apOIoThw1EdZl0p1oWvMqTHmpA2fRTyZO8i
google.golang.org/protobuf v1.35.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= google.golang.org/protobuf v1.35.2/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U= gopkg.in/airbrake/gobrake.v2 v2.0.9/go.mod h1:/h5ZAUhDkGaJfjzjKLSjv6zCL6O0LLBxU4K+aSYdM/U=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/cenkalti/backoff.v2 v2.2.1 h1:eJ9UAg01/HIHG987TwxvnzK2MgxXq97YY6rYDpY9aII=
gopkg.in/cenkalti/backoff.v2 v2.2.1/go.mod h1:S0QdOvT2AlerfSBkp0O+dk+bbIMaNbEmVk876gPCthU= gopkg.in/cenkalti/backoff.v2 v2.2.1/go.mod h1:S0QdOvT2AlerfSBkp0O+dk+bbIMaNbEmVk876gPCthU=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
@ -595,7 +575,6 @@ gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMy
gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo= gopkg.in/gemnasium/logrus-airbrake-hook.v2 v2.1.2/go.mod h1:Xk6kEKp8OKb+X14hQBKWaSkCsqBpgog8nAV2xsGOxlo=
gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc=
gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw=
gopkg.in/rethinkdb/rethinkdb-go.v6 v6.2.1 h1:d4KQkxAaAiRY2h5Zqis161Pv91A37uZyJOx73duwUwM=
gopkg.in/rethinkdb/rethinkdb-go.v6 v6.2.1/go.mod h1:WbjuEoo1oadwzQ4apSDU+JTvmllEHtsNHS6y7vFc7iw= gopkg.in/rethinkdb/rethinkdb-go.v6 v6.2.1/go.mod h1:WbjuEoo1oadwzQ4apSDU+JTvmllEHtsNHS6y7vFc7iw=
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=

View File

@ -56,8 +56,6 @@ type Cli interface {
CurrentContext() string CurrentContext() string
DockerEndpoint() docker.Endpoint DockerEndpoint() docker.Endpoint
TelemetryClient TelemetryClient
DeprecatedNotaryClient
DeprecatedManifestClient
} }
// DockerCli is an instance the docker command line client. // DockerCli is an instance the docker command line client.

View File

@ -1,56 +0,0 @@
package command
import (
"context"
"path/filepath"
"github.com/docker/cli/cli/config"
manifeststore "github.com/docker/cli/cli/manifest/store"
registryclient "github.com/docker/cli/cli/registry/client"
"github.com/docker/cli/cli/trust"
"github.com/docker/docker/api/types/registry"
notaryclient "github.com/theupdateframework/notary/client"
)
type DeprecatedNotaryClient interface {
// NotaryClient provides a Notary Repository to interact with signed metadata for an image
//
// Deprecated: use [trust.GetNotaryRepository] instead. This method is no longer used and will be removed in the next release.
NotaryClient(imgRefAndAuth trust.ImageRefAndAuth, actions []string) (notaryclient.Repository, error)
}
type DeprecatedManifestClient interface {
// ManifestStore returns a store for local manifests
//
// Deprecated: use [manifeststore.NewStore] instead. This method is no longer used and will be removed in the next release.
ManifestStore() manifeststore.Store
// RegistryClient returns a client for communicating with a Docker distribution
// registry.
//
// Deprecated: use [registryclient.NewRegistryClient]. This method is no longer used and will be removed in the next release.
RegistryClient(bool) registryclient.RegistryClient
}
// NotaryClient provides a Notary Repository to interact with signed metadata for an image
func (cli *DockerCli) NotaryClient(imgRefAndAuth trust.ImageRefAndAuth, actions []string) (notaryclient.Repository, error) {
return trust.GetNotaryRepository(cli.In(), cli.Out(), UserAgent(), imgRefAndAuth.RepoInfo(), imgRefAndAuth.AuthConfig(), actions...)
}
// ManifestStore returns a store for local manifests
//
// Deprecated: use [manifeststore.NewStore] instead. This method is no longer used and will be removed in the next release.
func (*DockerCli) ManifestStore() manifeststore.Store {
return manifeststore.NewStore(filepath.Join(config.Dir(), "manifests"))
}
// RegistryClient returns a client for communicating with a Docker distribution
// registry
//
// Deprecated: use [registryclient.NewRegistryClient]. This method is no longer used and will be removed in the next release.
func (cli *DockerCli) RegistryClient(allowInsecure bool) registryclient.RegistryClient {
resolver := func(ctx context.Context, index *registry.IndexInfo) registry.AuthConfig {
return ResolveAuthConfig(cli.ConfigFile(), index)
}
return registryclient.NewRegistryClient(resolver, UserAgent(), allowInsecure)
}

View File

@ -58,7 +58,7 @@ func resetConfigDir() {
// getHomeDir is a copy of [pkg/homedir.Get] to prevent adding docker/docker // getHomeDir is a copy of [pkg/homedir.Get] to prevent adding docker/docker
// as dependency for consumers that only need to read the config-file. // as dependency for consumers that only need to read the config-file.
// //
// [pkg/homedir.Get]: https://pkg.go.dev/github.com/docker/docker@v26.1.4+incompatible/pkg/homedir#Get // [pkg/homedir.Get]: https://pkg.go.dev/github.com/docker/docker@v28.0.3+incompatible/pkg/homedir#Get
func getHomeDir() string { func getHomeDir() string {
home, _ := os.UserHomeDir() home, _ := os.UserHomeDir()
if home == "" && runtime.GOOS != "windows" { if home == "" && runtime.GOOS != "windows" {

View File

@ -1,68 +0,0 @@
package jsonstream
import (
"context"
"io"
"github.com/docker/docker/pkg/jsonmessage"
)
type (
Stream = jsonmessage.Stream
JSONMessage = jsonmessage.JSONMessage
JSONError = jsonmessage.JSONError
JSONProgress = jsonmessage.JSONProgress
)
type ctxReader struct {
err chan error
r io.Reader
}
func (r *ctxReader) Read(p []byte) (n int, err error) {
select {
case err = <-r.err:
return 0, err
default:
return r.r.Read(p)
}
}
type Options func(*options)
type options struct {
AuxCallback func(JSONMessage)
}
func WithAuxCallback(cb func(JSONMessage)) Options {
return func(o *options) {
o.AuxCallback = cb
}
}
// Display prints the JSON messages from the given reader to the given stream.
//
// It wraps the [jsonmessage.DisplayJSONMessagesStream] function to make it
// "context aware" and appropriately returns why the function was canceled.
//
// It returns an error if the context is canceled, but not if the input reader / stream is closed.
func Display(ctx context.Context, in io.Reader, stream Stream, opts ...Options) error {
if ctx.Err() != nil {
return ctx.Err()
}
reader := &ctxReader{err: make(chan error, 1), r: in}
stopFunc := context.AfterFunc(ctx, func() { reader.err <- ctx.Err() })
defer stopFunc()
o := options{}
for _, opt := range opts {
opt(&o)
}
if err := jsonmessage.DisplayJSONMessagesStream(reader, stream, stream.FD(), stream.IsTerminal(), o.AuxCallback); err != nil {
return err
}
return ctx.Err()
}

View File

@ -1,178 +0,0 @@
package store
import (
"encoding/json"
"os"
"path/filepath"
"strings"
"github.com/distribution/reference"
"github.com/docker/cli/cli/manifest/types"
"github.com/docker/distribution/manifest/manifestlist"
"github.com/opencontainers/go-digest"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
)
// Store manages local storage of image distribution manifests
type Store interface {
Remove(listRef reference.Reference) error
Get(listRef reference.Reference, manifest reference.Reference) (types.ImageManifest, error)
GetList(listRef reference.Reference) ([]types.ImageManifest, error)
Save(listRef reference.Reference, manifest reference.Reference, image types.ImageManifest) error
}
// fsStore manages manifest files stored on the local filesystem
type fsStore struct {
root string
}
// NewStore returns a new store for a local file path
func NewStore(root string) Store {
return &fsStore{root: root}
}
// Remove a manifest list from local storage
func (s *fsStore) Remove(listRef reference.Reference) error {
path := filepath.Join(s.root, makeFilesafeName(listRef.String()))
return os.RemoveAll(path)
}
// Get returns the local manifest
func (s *fsStore) Get(listRef reference.Reference, manifest reference.Reference) (types.ImageManifest, error) {
filename := manifestToFilename(s.root, listRef.String(), manifest.String())
return s.getFromFilename(manifest, filename)
}
func (*fsStore) getFromFilename(ref reference.Reference, filename string) (types.ImageManifest, error) {
bytes, err := os.ReadFile(filename)
switch {
case os.IsNotExist(err):
return types.ImageManifest{}, newNotFoundError(ref.String())
case err != nil:
return types.ImageManifest{}, err
}
var manifestInfo struct {
types.ImageManifest
// Deprecated Fields, replaced by Descriptor
Digest digest.Digest
Platform *manifestlist.PlatformSpec
}
if err := json.Unmarshal(bytes, &manifestInfo); err != nil {
return types.ImageManifest{}, err
}
// Compatibility with image manifests created before
// descriptor, newer versions omit Digest and Platform
if manifestInfo.Digest != "" {
mediaType, raw, err := manifestInfo.Payload()
if err != nil {
return types.ImageManifest{}, err
}
if dgst := digest.FromBytes(raw); dgst != manifestInfo.Digest {
return types.ImageManifest{}, errors.Errorf("invalid manifest file %v: image manifest digest mismatch (%v != %v)", filename, manifestInfo.Digest, dgst)
}
manifestInfo.ImageManifest.Descriptor = ocispec.Descriptor{
Digest: manifestInfo.Digest,
Size: int64(len(raw)),
MediaType: mediaType,
Platform: types.OCIPlatform(manifestInfo.Platform),
}
}
return manifestInfo.ImageManifest, nil
}
// GetList returns all the local manifests for a transaction
func (s *fsStore) GetList(listRef reference.Reference) ([]types.ImageManifest, error) {
filenames, err := s.listManifests(listRef.String())
switch {
case err != nil:
return nil, err
case filenames == nil:
return nil, newNotFoundError(listRef.String())
}
manifests := []types.ImageManifest{}
for _, filename := range filenames {
filename = filepath.Join(s.root, makeFilesafeName(listRef.String()), filename)
manifest, err := s.getFromFilename(listRef, filename)
if err != nil {
return nil, err
}
manifests = append(manifests, manifest)
}
return manifests, nil
}
// listManifests stored in a transaction
func (s *fsStore) listManifests(transaction string) ([]string, error) {
transactionDir := filepath.Join(s.root, makeFilesafeName(transaction))
fileInfos, err := os.ReadDir(transactionDir)
switch {
case os.IsNotExist(err):
return nil, nil
case err != nil:
return nil, err
}
filenames := make([]string, 0, len(fileInfos))
for _, info := range fileInfos {
filenames = append(filenames, info.Name())
}
return filenames, nil
}
// Save a manifest as part of a local manifest list
func (s *fsStore) Save(listRef reference.Reference, manifest reference.Reference, image types.ImageManifest) error {
if err := s.createManifestListDirectory(listRef.String()); err != nil {
return err
}
filename := manifestToFilename(s.root, listRef.String(), manifest.String())
bytes, err := json.Marshal(image)
if err != nil {
return err
}
return os.WriteFile(filename, bytes, 0o644)
}
func (s *fsStore) createManifestListDirectory(transaction string) error {
path := filepath.Join(s.root, makeFilesafeName(transaction))
return os.MkdirAll(path, 0o755)
}
func manifestToFilename(root, manifestList, manifest string) string {
return filepath.Join(root, makeFilesafeName(manifestList), makeFilesafeName(manifest))
}
func makeFilesafeName(ref string) string {
fileName := strings.ReplaceAll(ref, ":", "-")
return strings.ReplaceAll(fileName, "/", "_")
}
type notFoundError struct {
object string
}
func newNotFoundError(ref string) *notFoundError {
return &notFoundError{object: ref}
}
func (n *notFoundError) Error() string {
return "No such manifest: " + n.object
}
// NotFound interface
func (*notFoundError) NotFound() {}
// IsNotFound returns true if the error is a not found error
func IsNotFound(err error) bool {
_, ok := err.(notFound)
return ok
}
type notFound interface {
NotFound()
}

View File

@ -1,154 +0,0 @@
package types
import (
"encoding/json"
"github.com/distribution/reference"
"github.com/docker/distribution"
"github.com/docker/distribution/manifest/manifestlist"
"github.com/docker/distribution/manifest/ocischema"
"github.com/docker/distribution/manifest/schema2"
"github.com/opencontainers/go-digest"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
)
// ImageManifest contains info to output for a manifest object.
type ImageManifest struct {
Ref *SerializableNamed
Descriptor ocispec.Descriptor
Raw []byte `json:",omitempty"`
// SchemaV2Manifest is used for inspection
SchemaV2Manifest *schema2.DeserializedManifest `json:",omitempty"`
// OCIManifest is used for inspection
OCIManifest *ocischema.DeserializedManifest `json:",omitempty"`
}
// OCIPlatform creates an OCI platform from a manifest list platform spec
func OCIPlatform(ps *manifestlist.PlatformSpec) *ocispec.Platform {
if ps == nil {
return nil
}
return &ocispec.Platform{
Architecture: ps.Architecture,
OS: ps.OS,
OSVersion: ps.OSVersion,
OSFeatures: ps.OSFeatures,
Variant: ps.Variant,
}
}
// PlatformSpecFromOCI creates a platform spec from OCI platform
func PlatformSpecFromOCI(p *ocispec.Platform) *manifestlist.PlatformSpec {
if p == nil {
return nil
}
return &manifestlist.PlatformSpec{
Architecture: p.Architecture,
OS: p.OS,
OSVersion: p.OSVersion,
OSFeatures: p.OSFeatures,
Variant: p.Variant,
}
}
// Blobs returns the digests for all the blobs referenced by this manifest
func (i ImageManifest) Blobs() []digest.Digest {
var digests []digest.Digest
switch {
case i.SchemaV2Manifest != nil:
refs := i.SchemaV2Manifest.References()
digests = make([]digest.Digest, 0, len(refs))
for _, descriptor := range refs {
digests = append(digests, descriptor.Digest)
}
case i.OCIManifest != nil:
refs := i.OCIManifest.References()
digests = make([]digest.Digest, 0, len(refs))
for _, descriptor := range refs {
digests = append(digests, descriptor.Digest)
}
}
return digests
}
// Payload returns the media type and bytes for the manifest
func (i ImageManifest) Payload() (string, []byte, error) {
// TODO: If available, read content from a content store by digest
switch {
case i.SchemaV2Manifest != nil:
return i.SchemaV2Manifest.Payload()
case i.OCIManifest != nil:
return i.OCIManifest.Payload()
default:
return "", nil, errors.Errorf("%s has no payload", i.Ref)
}
}
// References implements the distribution.Manifest interface. It delegates to
// the underlying manifest.
func (i ImageManifest) References() []distribution.Descriptor {
switch {
case i.SchemaV2Manifest != nil:
return i.SchemaV2Manifest.References()
case i.OCIManifest != nil:
return i.OCIManifest.References()
default:
return nil
}
}
// NewImageManifest returns a new ImageManifest object. The values for Platform
// are initialized from those in the image
func NewImageManifest(ref reference.Named, desc ocispec.Descriptor, manifest *schema2.DeserializedManifest) ImageManifest {
raw, err := manifest.MarshalJSON()
if err != nil {
raw = nil
}
return ImageManifest{
Ref: &SerializableNamed{Named: ref},
Descriptor: desc,
Raw: raw,
SchemaV2Manifest: manifest,
}
}
// NewOCIImageManifest returns a new ImageManifest object. The values for
// Platform are initialized from those in the image
func NewOCIImageManifest(ref reference.Named, desc ocispec.Descriptor, manifest *ocischema.DeserializedManifest) ImageManifest {
raw, err := manifest.MarshalJSON()
if err != nil {
raw = nil
}
return ImageManifest{
Ref: &SerializableNamed{Named: ref},
Descriptor: desc,
Raw: raw,
OCIManifest: manifest,
}
}
// SerializableNamed is a reference.Named that can be serialized and deserialized
// from JSON
type SerializableNamed struct {
reference.Named
}
// UnmarshalJSON loads the Named reference from JSON bytes
func (s *SerializableNamed) UnmarshalJSON(b []byte) error {
var raw string
if err := json.Unmarshal(b, &raw); err != nil {
return errors.Wrapf(err, "invalid named reference bytes: %s", b)
}
var err error
s.Named, err = reference.ParseNamed(raw)
return err
}
// MarshalJSON returns the JSON bytes representation
func (s *SerializableNamed) MarshalJSON() ([]byte, error) {
return json.Marshal(s.String())
}

View File

@ -1,197 +0,0 @@
package client
import (
"context"
"fmt"
"net/http"
"strings"
"github.com/distribution/reference"
manifesttypes "github.com/docker/cli/cli/manifest/types"
"github.com/docker/distribution"
distributionclient "github.com/docker/distribution/registry/client"
registrytypes "github.com/docker/docker/api/types/registry"
"github.com/opencontainers/go-digest"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
// RegistryClient is a client used to communicate with a Docker distribution
// registry
type RegistryClient interface {
GetManifest(ctx context.Context, ref reference.Named) (manifesttypes.ImageManifest, error)
GetManifestList(ctx context.Context, ref reference.Named) ([]manifesttypes.ImageManifest, error)
MountBlob(ctx context.Context, source reference.Canonical, target reference.Named) error
PutManifest(ctx context.Context, ref reference.Named, manifest distribution.Manifest) (digest.Digest, error)
}
// NewRegistryClient returns a new RegistryClient with a resolver
func NewRegistryClient(resolver AuthConfigResolver, userAgent string, insecure bool) RegistryClient {
return &client{
authConfigResolver: resolver,
insecureRegistry: insecure,
userAgent: userAgent,
}
}
// AuthConfigResolver returns Auth Configuration for an index
type AuthConfigResolver func(ctx context.Context, index *registrytypes.IndexInfo) registrytypes.AuthConfig
type client struct {
authConfigResolver AuthConfigResolver
insecureRegistry bool
userAgent string
}
// ErrBlobCreated returned when a blob mount request was created
type ErrBlobCreated struct {
From reference.Named
Target reference.Named
}
func (err ErrBlobCreated) Error() string {
return fmt.Sprintf("blob mounted from: %v to: %v",
err.From, err.Target)
}
// httpProtoError returned if attempting to use TLS with a non-TLS registry
type httpProtoError struct {
cause error
}
func (e httpProtoError) Error() string {
return e.cause.Error()
}
var _ RegistryClient = &client{}
// MountBlob into the registry, so it can be referenced by a manifest
func (c *client) MountBlob(ctx context.Context, sourceRef reference.Canonical, targetRef reference.Named) error {
repoEndpoint, err := newDefaultRepositoryEndpoint(targetRef, c.insecureRegistry)
if err != nil {
return err
}
repoEndpoint.actions = []string{"pull", "push"}
repo, err := c.getRepositoryForReference(ctx, targetRef, repoEndpoint)
if err != nil {
return err
}
lu, err := repo.Blobs(ctx).Create(ctx, distributionclient.WithMountFrom(sourceRef))
switch err.(type) {
case distribution.ErrBlobMounted:
logrus.Debugf("mount of blob %s succeeded", sourceRef)
return nil
case nil:
default:
return errors.Wrapf(err, "failed to mount blob %s to %s", sourceRef, targetRef)
}
lu.Cancel(ctx)
logrus.Debugf("mount of blob %s created", sourceRef)
return ErrBlobCreated{From: sourceRef, Target: targetRef}
}
// PutManifest sends the manifest to a registry and returns the new digest
func (c *client) PutManifest(ctx context.Context, ref reference.Named, manifest distribution.Manifest) (digest.Digest, error) {
repoEndpoint, err := newDefaultRepositoryEndpoint(ref, c.insecureRegistry)
if err != nil {
return "", err
}
repoEndpoint.actions = []string{"pull", "push"}
repo, err := c.getRepositoryForReference(ctx, ref, repoEndpoint)
if err != nil {
return "", err
}
manifestService, err := repo.Manifests(ctx)
if err != nil {
return "", err
}
_, opts, err := getManifestOptionsFromReference(ref)
if err != nil {
return "", err
}
dgst, err := manifestService.Put(ctx, manifest, opts...)
if err != nil {
return dgst, errors.Wrapf(err, "failed to put manifest %s", ref)
}
return dgst, nil
}
func (c *client) getRepositoryForReference(ctx context.Context, ref reference.Named, repoEndpoint repositoryEndpoint) (distribution.Repository, error) {
repoName, err := reference.WithName(repoEndpoint.Name())
if err != nil {
return nil, errors.Wrapf(err, "failed to parse repo name from %s", ref)
}
httpTransport, err := c.getHTTPTransportForRepoEndpoint(ctx, repoEndpoint)
if err != nil {
if !strings.Contains(err.Error(), "server gave HTTP response to HTTPS client") {
return nil, err
}
if !repoEndpoint.endpoint.TLSConfig.InsecureSkipVerify {
return nil, httpProtoError{cause: err}
}
// --insecure was set; fall back to plain HTTP
if url := repoEndpoint.endpoint.URL; url != nil && url.Scheme == "https" {
url.Scheme = "http"
httpTransport, err = c.getHTTPTransportForRepoEndpoint(ctx, repoEndpoint)
if err != nil {
return nil, err
}
}
}
return distributionclient.NewRepository(repoName, repoEndpoint.BaseURL(), httpTransport)
}
func (c *client) getHTTPTransportForRepoEndpoint(ctx context.Context, repoEndpoint repositoryEndpoint) (http.RoundTripper, error) {
httpTransport, err := getHTTPTransport(
c.authConfigResolver(ctx, repoEndpoint.info.Index),
repoEndpoint.endpoint,
repoEndpoint.Name(),
c.userAgent,
repoEndpoint.actions,
)
if err != nil {
return nil, errors.Wrap(err, "failed to configure transport")
}
return httpTransport, nil
}
// GetManifest returns an ImageManifest for the reference
func (c *client) GetManifest(ctx context.Context, ref reference.Named) (manifesttypes.ImageManifest, error) {
var result manifesttypes.ImageManifest
fetch := func(ctx context.Context, repo distribution.Repository, ref reference.Named) (bool, error) {
var err error
result, err = fetchManifest(ctx, repo, ref)
return result.Ref != nil, err
}
err := c.iterateEndpoints(ctx, ref, fetch)
return result, err
}
// GetManifestList returns a list of ImageManifest for the reference
func (c *client) GetManifestList(ctx context.Context, ref reference.Named) ([]manifesttypes.ImageManifest, error) {
result := []manifesttypes.ImageManifest{}
fetch := func(ctx context.Context, repo distribution.Repository, ref reference.Named) (bool, error) {
var err error
result, err = fetchList(ctx, repo, ref)
return len(result) > 0, err
}
err := c.iterateEndpoints(ctx, ref, fetch)
return result, err
}
func getManifestOptionsFromReference(ref reference.Named) (digest.Digest, []distribution.ManifestServiceOption, error) {
if tagged, isTagged := ref.(reference.NamedTagged); isTagged {
tag := tagged.Tag()
return "", []distribution.ManifestServiceOption{distribution.WithTag(tag)}, nil
}
if digested, isDigested := ref.(reference.Canonical); isDigested {
return digested.Digest(), []distribution.ManifestServiceOption{}, nil
}
return "", nil, errors.Errorf("%s no tag or digest", ref)
}

View File

@ -1,121 +0,0 @@
package client
import (
"net"
"net/http"
"time"
"github.com/distribution/reference"
"github.com/docker/distribution/registry/client/auth"
"github.com/docker/distribution/registry/client/transport"
registrytypes "github.com/docker/docker/api/types/registry"
"github.com/docker/docker/registry"
"github.com/pkg/errors"
)
type repositoryEndpoint struct {
info *registry.RepositoryInfo
endpoint registry.APIEndpoint
actions []string
}
// Name returns the repository name
func (r repositoryEndpoint) Name() string {
return reference.Path(r.info.Name)
}
// BaseURL returns the endpoint url
func (r repositoryEndpoint) BaseURL() string {
return r.endpoint.URL.String()
}
func newDefaultRepositoryEndpoint(ref reference.Named, insecure bool) (repositoryEndpoint, error) {
repoInfo, _ := registry.ParseRepositoryInfo(ref)
endpoint, err := getDefaultEndpointFromRepoInfo(repoInfo)
if err != nil {
return repositoryEndpoint{}, err
}
if insecure {
endpoint.TLSConfig.InsecureSkipVerify = true
}
return repositoryEndpoint{info: repoInfo, endpoint: endpoint}, nil
}
func getDefaultEndpointFromRepoInfo(repoInfo *registry.RepositoryInfo) (registry.APIEndpoint, error) {
var err error
options := registry.ServiceOptions{}
registryService, err := registry.NewService(options)
if err != nil {
return registry.APIEndpoint{}, err
}
endpoints, err := registryService.LookupPushEndpoints(reference.Domain(repoInfo.Name))
if err != nil {
return registry.APIEndpoint{}, err
}
// Default to the highest priority endpoint to return
endpoint := endpoints[0]
if !repoInfo.Index.Secure {
for _, ep := range endpoints {
if ep.URL.Scheme == "http" {
endpoint = ep
}
}
}
return endpoint, nil
}
// getHTTPTransport builds a transport for use in communicating with a registry
func getHTTPTransport(authConfig registrytypes.AuthConfig, endpoint registry.APIEndpoint, repoName, userAgent string, actions []string) (http.RoundTripper, error) {
// get the http transport, this will be used in a client to upload manifest
base := &http.Transport{
Proxy: http.ProxyFromEnvironment,
Dial: (&net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
}).Dial,
TLSHandshakeTimeout: 10 * time.Second,
TLSClientConfig: endpoint.TLSConfig,
DisableKeepAlives: true,
}
modifiers := registry.Headers(userAgent, http.Header{})
authTransport := transport.NewTransport(base, modifiers...)
challengeManager, err := registry.PingV2Registry(endpoint.URL, authTransport)
if err != nil {
return nil, errors.Wrap(err, "error pinging v2 registry")
}
if authConfig.RegistryToken != "" {
passThruTokenHandler := &existingTokenHandler{token: authConfig.RegistryToken}
modifiers = append(modifiers, auth.NewAuthorizer(challengeManager, passThruTokenHandler))
} else {
if len(actions) == 0 {
actions = []string{"pull"}
}
creds := registry.NewStaticCredentialStore(&authConfig)
tokenHandler := auth.NewTokenHandler(authTransport, creds, repoName, actions...)
basicHandler := auth.NewBasicHandler(creds)
modifiers = append(modifiers, auth.NewAuthorizer(challengeManager, tokenHandler, basicHandler))
}
return transport.NewTransport(base, modifiers...), nil
}
// RepoNameForReference returns the repository name from a reference.
//
// Deprecated: this function is no longer used and will be removed in the next release.
func RepoNameForReference(ref reference.Named) (string, error) {
return reference.Path(reference.TrimNamed(ref)), nil
}
type existingTokenHandler struct {
token string
}
func (th *existingTokenHandler) AuthorizeRequest(req *http.Request, _ map[string]string) error {
req.Header.Set("Authorization", "Bearer "+th.token)
return nil
}
func (*existingTokenHandler) Scheme() string {
return "bearer"
}

View File

@ -1,301 +0,0 @@
package client
import (
"context"
"encoding/json"
"github.com/distribution/reference"
"github.com/docker/cli/cli/manifest/types"
"github.com/docker/distribution"
"github.com/docker/distribution/manifest/manifestlist"
"github.com/docker/distribution/manifest/ocischema"
"github.com/docker/distribution/manifest/schema2"
"github.com/docker/distribution/registry/api/errcode"
v2 "github.com/docker/distribution/registry/api/v2"
distclient "github.com/docker/distribution/registry/client"
"github.com/docker/docker/registry"
"github.com/opencontainers/go-digest"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
)
// fetchManifest pulls a manifest from a registry and returns it. An error
// is returned if no manifest is found matching namedRef.
func fetchManifest(ctx context.Context, repo distribution.Repository, ref reference.Named) (types.ImageManifest, error) {
manifest, err := getManifest(ctx, repo, ref)
if err != nil {
return types.ImageManifest{}, err
}
switch v := manifest.(type) {
// Removed Schema 1 support
case *schema2.DeserializedManifest:
return pullManifestSchemaV2(ctx, ref, repo, *v)
case *ocischema.DeserializedManifest:
return pullManifestOCISchema(ctx, ref, repo, *v)
case *manifestlist.DeserializedManifestList:
return types.ImageManifest{}, errors.Errorf("%s is a manifest list", ref)
}
return types.ImageManifest{}, errors.Errorf("%s is not a manifest", ref)
}
func fetchList(ctx context.Context, repo distribution.Repository, ref reference.Named) ([]types.ImageManifest, error) {
manifest, err := getManifest(ctx, repo, ref)
if err != nil {
return nil, err
}
switch v := manifest.(type) {
case *manifestlist.DeserializedManifestList:
return pullManifestList(ctx, ref, repo, *v)
default:
return nil, errors.Errorf("unsupported manifest format: %v", v)
}
}
func getManifest(ctx context.Context, repo distribution.Repository, ref reference.Named) (distribution.Manifest, error) {
manSvc, err := repo.Manifests(ctx)
if err != nil {
return nil, err
}
dgst, opts, err := getManifestOptionsFromReference(ref)
if err != nil {
return nil, errors.Errorf("image manifest for %q does not exist", ref)
}
return manSvc.Get(ctx, dgst, opts...)
}
func pullManifestSchemaV2(ctx context.Context, ref reference.Named, repo distribution.Repository, mfst schema2.DeserializedManifest) (types.ImageManifest, error) {
manifestDesc, err := validateManifestDigest(ref, mfst)
if err != nil {
return types.ImageManifest{}, err
}
configJSON, err := pullManifestSchemaV2ImageConfig(ctx, mfst.Target().Digest, repo)
if err != nil {
return types.ImageManifest{}, err
}
if manifestDesc.Platform == nil {
manifestDesc.Platform = &ocispec.Platform{}
}
// Fill in os and architecture fields from config JSON
if err := json.Unmarshal(configJSON, manifestDesc.Platform); err != nil {
return types.ImageManifest{}, err
}
return types.NewImageManifest(ref, manifestDesc, &mfst), nil
}
func pullManifestOCISchema(ctx context.Context, ref reference.Named, repo distribution.Repository, mfst ocischema.DeserializedManifest) (types.ImageManifest, error) {
manifestDesc, err := validateManifestDigest(ref, mfst)
if err != nil {
return types.ImageManifest{}, err
}
configJSON, err := pullManifestSchemaV2ImageConfig(ctx, mfst.Target().Digest, repo)
if err != nil {
return types.ImageManifest{}, err
}
if manifestDesc.Platform == nil {
manifestDesc.Platform = &ocispec.Platform{}
}
// Fill in os and architecture fields from config JSON
if err := json.Unmarshal(configJSON, manifestDesc.Platform); err != nil {
return types.ImageManifest{}, err
}
return types.NewOCIImageManifest(ref, manifestDesc, &mfst), nil
}
func pullManifestSchemaV2ImageConfig(ctx context.Context, dgst digest.Digest, repo distribution.Repository) ([]byte, error) {
blobs := repo.Blobs(ctx)
configJSON, err := blobs.Get(ctx, dgst)
if err != nil {
return nil, err
}
verifier := dgst.Verifier()
if _, err := verifier.Write(configJSON); err != nil {
return nil, err
}
if !verifier.Verified() {
return nil, errors.Errorf("image config verification failed for digest %s", dgst)
}
return configJSON, nil
}
// validateManifestDigest computes the manifest digest, and, if pulling by
// digest, ensures that it matches the requested digest.
func validateManifestDigest(ref reference.Named, mfst distribution.Manifest) (ocispec.Descriptor, error) {
mediaType, canonical, err := mfst.Payload()
if err != nil {
return ocispec.Descriptor{}, err
}
desc := ocispec.Descriptor{
Digest: digest.FromBytes(canonical),
Size: int64(len(canonical)),
MediaType: mediaType,
}
// If pull by digest, then verify the manifest digest.
if digested, isDigested := ref.(reference.Canonical); isDigested && digested.Digest() != desc.Digest {
return ocispec.Descriptor{}, errors.Errorf("manifest verification failed for digest %s", digested.Digest())
}
return desc, nil
}
// pullManifestList handles "manifest lists" which point to various
// platform-specific manifests.
func pullManifestList(ctx context.Context, ref reference.Named, repo distribution.Repository, mfstList manifestlist.DeserializedManifestList) ([]types.ImageManifest, error) {
if _, err := validateManifestDigest(ref, mfstList); err != nil {
return nil, err
}
infos := make([]types.ImageManifest, 0, len(mfstList.Manifests))
for _, manifestDescriptor := range mfstList.Manifests {
manSvc, err := repo.Manifests(ctx)
if err != nil {
return nil, err
}
manifest, err := manSvc.Get(ctx, manifestDescriptor.Digest)
if err != nil {
return nil, err
}
manifestRef, err := reference.WithDigest(ref, manifestDescriptor.Digest)
if err != nil {
return nil, err
}
var imageManifest types.ImageManifest
switch v := manifest.(type) {
case *schema2.DeserializedManifest:
imageManifest, err = pullManifestSchemaV2(ctx, manifestRef, repo, *v)
case *ocischema.DeserializedManifest:
imageManifest, err = pullManifestOCISchema(ctx, manifestRef, repo, *v)
default:
err = errors.Errorf("unsupported manifest type: %T", manifest)
}
if err != nil {
return nil, err
}
// Replace platform from config
p := manifestDescriptor.Platform
imageManifest.Descriptor.Platform = types.OCIPlatform(&p)
infos = append(infos, imageManifest)
}
return infos, nil
}
func continueOnError(err error) bool {
switch v := err.(type) {
case errcode.Errors:
if len(v) == 0 {
return true
}
return continueOnError(v[0])
case errcode.Error:
switch e := err.(errcode.Error); e.Code {
case errcode.ErrorCodeUnauthorized, v2.ErrorCodeManifestUnknown, v2.ErrorCodeNameUnknown:
return true
default:
return false
}
case *distclient.UnexpectedHTTPResponseError:
return true
}
return false
}
func (c *client) iterateEndpoints(ctx context.Context, namedRef reference.Named, each func(context.Context, distribution.Repository, reference.Named) (bool, error)) error {
endpoints, err := allEndpoints(namedRef, c.insecureRegistry)
if err != nil {
return err
}
repoInfo, _ := registry.ParseRepositoryInfo(namedRef)
confirmedTLSRegistries := make(map[string]bool)
for _, endpoint := range endpoints {
if endpoint.URL.Scheme != "https" {
if _, confirmedTLS := confirmedTLSRegistries[endpoint.URL.Host]; confirmedTLS {
logrus.Debugf("skipping non-TLS endpoint %s for host/port that appears to use TLS", endpoint.URL)
continue
}
}
if c.insecureRegistry {
endpoint.TLSConfig.InsecureSkipVerify = true
}
repoEndpoint := repositoryEndpoint{endpoint: endpoint, info: repoInfo}
repo, err := c.getRepositoryForReference(ctx, namedRef, repoEndpoint)
if err != nil {
logrus.Debugf("error %s with repo endpoint %+v", err, repoEndpoint)
var protoErr httpProtoError
if errors.As(err, &protoErr) {
continue
}
return err
}
if endpoint.URL.Scheme == "http" && !c.insecureRegistry {
logrus.Debugf("skipping non-tls registry endpoint: %s", endpoint.URL)
continue
}
done, err := each(ctx, repo, namedRef)
if err != nil {
if continueOnError(err) {
if endpoint.URL.Scheme == "https" {
confirmedTLSRegistries[endpoint.URL.Host] = true
}
logrus.Debugf("continuing on error (%T) %s", err, err)
continue
}
logrus.Debugf("not continuing on error (%T) %s", err, err)
return err
}
if done {
return nil
}
}
return newNotFoundError(namedRef.String())
}
// allEndpoints returns a list of endpoints ordered by priority (v2, http).
func allEndpoints(namedRef reference.Named, insecure bool) ([]registry.APIEndpoint, error) {
var serviceOpts registry.ServiceOptions
if insecure {
logrus.Debugf("allowing insecure registry for: %s", reference.Domain(namedRef))
serviceOpts.InsecureRegistries = []string{reference.Domain(namedRef)}
}
registryService, err := registry.NewService(serviceOpts)
if err != nil {
return []registry.APIEndpoint{}, err
}
repoInfo, _ := registry.ParseRepositoryInfo(namedRef)
endpoints, err := registryService.LookupPullEndpoints(reference.Domain(repoInfo.Name))
logrus.Debugf("endpoints for %s: %v", namedRef, endpoints)
return endpoints, err
}
func newNotFoundError(ref string) *notFoundError {
return &notFoundError{err: errors.New("no such manifest: " + ref)}
}
type notFoundError struct {
err error
}
func (n *notFoundError) Error() string {
return n.err.Error()
}
// NotFound satisfies interface github.com/docker/docker/errdefs.ErrNotFound
func (notFoundError) NotFound() {}

View File

@ -1,387 +0,0 @@
package trust
import (
"context"
"encoding/json"
"io"
"net"
"net/http"
"net/url"
"os"
"path"
"path/filepath"
"time"
"github.com/distribution/reference"
"github.com/docker/cli/cli/config"
"github.com/docker/distribution/registry/client/auth"
"github.com/docker/distribution/registry/client/auth/challenge"
"github.com/docker/distribution/registry/client/transport"
registrytypes "github.com/docker/docker/api/types/registry"
"github.com/docker/docker/registry"
"github.com/docker/go-connections/tlsconfig"
"github.com/opencontainers/go-digest"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
"github.com/theupdateframework/notary"
"github.com/theupdateframework/notary/client"
"github.com/theupdateframework/notary/passphrase"
"github.com/theupdateframework/notary/storage"
"github.com/theupdateframework/notary/trustmanager"
"github.com/theupdateframework/notary/trustpinning"
"github.com/theupdateframework/notary/tuf/data"
"github.com/theupdateframework/notary/tuf/signed"
)
var (
// ReleasesRole is the role named "releases"
ReleasesRole = data.RoleName(path.Join(data.CanonicalTargetsRole.String(), "releases"))
// ActionsPullOnly defines the actions for read-only interactions with a Notary Repository
ActionsPullOnly = []string{"pull"}
// ActionsPushAndPull defines the actions for read-write interactions with a Notary Repository
ActionsPushAndPull = []string{"pull", "push"}
)
// NotaryServer is the endpoint serving the Notary trust server
const NotaryServer = "https://notary.docker.io"
// GetTrustDirectory returns the base trust directory name
func GetTrustDirectory() string {
return filepath.Join(config.Dir(), "trust")
}
// certificateDirectory returns the directory containing
// TLS certificates for the given server. An error is
// returned if there was an error parsing the server string.
func certificateDirectory(server string) (string, error) {
u, err := url.Parse(server)
if err != nil {
return "", err
}
return filepath.Join(config.Dir(), "tls", u.Host), nil
}
// Server returns the base URL for the trust server.
func Server(index *registrytypes.IndexInfo) (string, error) {
if s := os.Getenv("DOCKER_CONTENT_TRUST_SERVER"); s != "" {
urlObj, err := url.Parse(s)
if err != nil || urlObj.Scheme != "https" {
return "", errors.Errorf("valid https URL required for trust server, got %s", s)
}
return s, nil
}
if index.Official {
return NotaryServer, nil
}
return "https://" + index.Name, nil
}
type simpleCredentialStore struct {
auth registrytypes.AuthConfig
}
func (scs simpleCredentialStore) Basic(*url.URL) (string, string) {
return scs.auth.Username, scs.auth.Password
}
func (scs simpleCredentialStore) RefreshToken(*url.URL, string) string {
return scs.auth.IdentityToken
}
func (simpleCredentialStore) SetRefreshToken(*url.URL, string, string) {}
// GetNotaryRepository returns a NotaryRepository which stores all the
// information needed to operate on a notary repository.
// It creates an HTTP transport providing authentication support.
func GetNotaryRepository(in io.Reader, out io.Writer, userAgent string, repoInfo *registry.RepositoryInfo, authConfig *registrytypes.AuthConfig, actions ...string) (client.Repository, error) {
server, err := Server(repoInfo.Index)
if err != nil {
return nil, err
}
cfg := tlsconfig.ClientDefault()
cfg.InsecureSkipVerify = !repoInfo.Index.Secure
// Get certificate base directory
certDir, err := certificateDirectory(server)
if err != nil {
return nil, err
}
logrus.Debugf("reading certificate directory: %s", certDir)
if err := registry.ReadCertsDirectory(cfg, certDir); err != nil {
return nil, err
}
base := &http.Transport{
Proxy: http.ProxyFromEnvironment,
Dial: (&net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
}).Dial,
TLSHandshakeTimeout: 10 * time.Second,
TLSClientConfig: cfg,
DisableKeepAlives: true,
}
// Skip configuration headers since request is not going to Docker daemon
modifiers := registry.Headers(userAgent, http.Header{})
authTransport := transport.NewTransport(base, modifiers...)
pingClient := &http.Client{
Transport: authTransport,
Timeout: 5 * time.Second,
}
endpointStr := server + "/v2/"
req, err := http.NewRequest(http.MethodGet, endpointStr, nil)
if err != nil {
return nil, err
}
challengeManager := challenge.NewSimpleManager()
resp, err := pingClient.Do(req)
if err != nil {
// Ignore error on ping to operate in offline mode
logrus.Debugf("Error pinging notary server %q: %s", endpointStr, err)
} else {
defer resp.Body.Close()
// Add response to the challenge manager to parse out
// authentication header and register authentication method
if err := challengeManager.AddResponse(resp); err != nil {
return nil, err
}
}
scope := auth.RepositoryScope{
Repository: repoInfo.Name.Name(),
Actions: actions,
}
creds := simpleCredentialStore{auth: *authConfig}
tokenHandlerOptions := auth.TokenHandlerOptions{
Transport: authTransport,
Credentials: creds,
Scopes: []auth.Scope{scope},
ClientID: registry.AuthClientID,
}
tokenHandler := auth.NewTokenHandlerWithOptions(tokenHandlerOptions)
basicHandler := auth.NewBasicHandler(creds)
modifiers = append(modifiers, auth.NewAuthorizer(challengeManager, tokenHandler, basicHandler))
tr := transport.NewTransport(base, modifiers...)
return client.NewFileCachedRepository(
GetTrustDirectory(),
data.GUN(repoInfo.Name.Name()),
server,
tr,
GetPassphraseRetriever(in, out),
trustpinning.TrustPinConfig{})
}
// GetPassphraseRetriever returns a passphrase retriever that utilizes Content Trust env vars
func GetPassphraseRetriever(in io.Reader, out io.Writer) notary.PassRetriever {
aliasMap := map[string]string{
"root": "root",
"snapshot": "repository",
"targets": "repository",
"default": "repository",
}
baseRetriever := passphrase.PromptRetrieverWithInOut(in, out, aliasMap)
env := map[string]string{
"root": os.Getenv("DOCKER_CONTENT_TRUST_ROOT_PASSPHRASE"),
"snapshot": os.Getenv("DOCKER_CONTENT_TRUST_REPOSITORY_PASSPHRASE"),
"targets": os.Getenv("DOCKER_CONTENT_TRUST_REPOSITORY_PASSPHRASE"),
"default": os.Getenv("DOCKER_CONTENT_TRUST_REPOSITORY_PASSPHRASE"),
}
return func(keyName string, alias string, createNew bool, numAttempts int) (string, bool, error) {
if v := env[alias]; v != "" {
return v, numAttempts > 1, nil
}
// For non-root roles, we can also try the "default" alias if it is specified
if v := env["default"]; v != "" && alias != data.CanonicalRootRole.String() {
return v, numAttempts > 1, nil
}
return baseRetriever(keyName, alias, createNew, numAttempts)
}
}
// NotaryError formats an error message received from the notary service
func NotaryError(repoName string, err error) error {
switch err.(type) {
case *json.SyntaxError:
logrus.Debugf("Notary syntax error: %s", err)
return errors.Errorf("Error: no trust data available for remote repository %s. Try running notary server and setting DOCKER_CONTENT_TRUST_SERVER to its HTTPS address?", repoName)
case signed.ErrExpired:
return errors.Errorf("Error: remote repository %s out-of-date: %v", repoName, err)
case trustmanager.ErrKeyNotFound:
return errors.Errorf("Error: signing keys for remote repository %s not found: %v", repoName, err)
case storage.NetworkError:
return errors.Errorf("Error: error contacting notary server: %v", err)
case storage.ErrMetaNotFound:
return errors.Errorf("Error: trust data missing for remote repository %s or remote repository not found: %v", repoName, err)
case trustpinning.ErrRootRotationFail, trustpinning.ErrValidationFail, signed.ErrInvalidKeyType:
return errors.Errorf("Warning: potential malicious behavior - trust data mismatch for remote repository %s: %v", repoName, err)
case signed.ErrNoKeys:
return errors.Errorf("Error: could not find signing keys for remote repository %s, or could not decrypt signing key: %v", repoName, err)
case signed.ErrLowVersion:
return errors.Errorf("Warning: potential malicious behavior - trust data version is lower than expected for remote repository %s: %v", repoName, err)
case signed.ErrRoleThreshold:
return errors.Errorf("Warning: potential malicious behavior - trust data has insufficient signatures for remote repository %s: %v", repoName, err)
case client.ErrRepositoryNotExist:
return errors.Errorf("Error: remote trust data does not exist for %s: %v", repoName, err)
case signed.ErrInsufficientSignatures:
return errors.Errorf("Error: could not produce valid signature for %s. If Yubikey was used, was touch input provided?: %v", repoName, err)
}
return err
}
// AddToAllSignableRoles attempts to add the image target to all the top level
// delegation roles we can (based on whether we have the signing key and whether
// the role's path allows us to).
//
// If there are no delegation roles, we add to the targets role.
func AddToAllSignableRoles(repo client.Repository, target *client.Target) error {
signableRoles, err := GetSignableRoles(repo, target)
if err != nil {
return err
}
return repo.AddTarget(target, signableRoles...)
}
// GetSignableRoles returns a list of roles for which we have valid signing
// keys, given a notary repository and a target
func GetSignableRoles(repo client.Repository, target *client.Target) ([]data.RoleName, error) {
var signableRoles []data.RoleName
// translate the full key names, which includes the GUN, into just the key IDs
allCanonicalKeyIDs := make(map[string]struct{})
for fullKeyID := range repo.GetCryptoService().ListAllKeys() {
allCanonicalKeyIDs[path.Base(fullKeyID)] = struct{}{}
}
allDelegationRoles, err := repo.GetDelegationRoles()
if err != nil {
return signableRoles, err
}
// if there are no delegation roles, then just try to sign it into the targets role
if len(allDelegationRoles) == 0 {
signableRoles = append(signableRoles, data.CanonicalTargetsRole)
return signableRoles, nil
}
// there are delegation roles, find every delegation role we have a key for,
// and attempt to sign in to all those roles.
for _, delegationRole := range allDelegationRoles {
// We do not support signing any delegation role that isn't a direct child of the targets role.
// Also don't bother checking the keys if we can't add the target
// to this role due to path restrictions
if path.Dir(delegationRole.Name.String()) != data.CanonicalTargetsRole.String() || !delegationRole.CheckPaths(target.Name) {
continue
}
for _, canonicalKeyID := range delegationRole.KeyIDs {
if _, ok := allCanonicalKeyIDs[canonicalKeyID]; ok {
signableRoles = append(signableRoles, delegationRole.Name)
break
}
}
}
if len(signableRoles) == 0 {
return signableRoles, errors.Errorf("no valid signing keys for delegation roles")
}
return signableRoles, nil
}
// ImageRefAndAuth contains all reference information and the auth config for an image request
type ImageRefAndAuth struct {
original string
authConfig *registrytypes.AuthConfig
reference reference.Named
repoInfo *registry.RepositoryInfo
tag string
digest digest.Digest
}
// GetImageReferencesAndAuth retrieves the necessary reference and auth information for an image name
// as an ImageRefAndAuth struct
func GetImageReferencesAndAuth(ctx context.Context,
authResolver func(ctx context.Context, index *registrytypes.IndexInfo) registrytypes.AuthConfig,
imgName string,
) (ImageRefAndAuth, error) {
ref, err := reference.ParseNormalizedNamed(imgName)
if err != nil {
return ImageRefAndAuth{}, err
}
// Resolve the Repository name from fqn to RepositoryInfo
repoInfo, _ := registry.ParseRepositoryInfo(ref)
authConfig := authResolver(ctx, repoInfo.Index)
return ImageRefAndAuth{
original: imgName,
authConfig: &authConfig,
reference: ref,
repoInfo: repoInfo,
tag: getTag(ref),
digest: getDigest(ref),
}, nil
}
func getTag(ref reference.Named) string {
switch x := ref.(type) {
case reference.Canonical, reference.Digested:
return ""
case reference.NamedTagged:
return x.Tag()
default:
return ""
}
}
func getDigest(ref reference.Named) digest.Digest {
switch x := ref.(type) {
case reference.Canonical:
return x.Digest()
case reference.Digested:
return x.Digest()
default:
return digest.Digest("")
}
}
// AuthConfig returns the auth information (username, etc) for a given ImageRefAndAuth
func (imgRefAuth *ImageRefAndAuth) AuthConfig() *registrytypes.AuthConfig {
return imgRefAuth.authConfig
}
// Reference returns the Image reference for a given ImageRefAndAuth
func (imgRefAuth *ImageRefAndAuth) Reference() reference.Named {
return imgRefAuth.reference
}
// RepoInfo returns the repository information for a given ImageRefAndAuth
func (imgRefAuth *ImageRefAndAuth) RepoInfo() *registry.RepositoryInfo {
return imgRefAuth.repoInfo
}
// Tag returns the Image tag for a given ImageRefAndAuth
func (imgRefAuth *ImageRefAndAuth) Tag() string {
return imgRefAuth.tag
}
// Digest returns the Image digest for a given ImageRefAndAuth
func (imgRefAuth *ImageRefAndAuth) Digest() digest.Digest {
return imgRefAuth.digest
}
// Name returns the image name used to initialize the ImageRefAndAuth
func (imgRefAuth *ImageRefAndAuth) Name() string {
return imgRefAuth.original
}

View File

@ -1,143 +0,0 @@
package trust
import (
"context"
"encoding/hex"
"encoding/json"
"fmt"
"io"
"sort"
"github.com/distribution/reference"
"github.com/docker/cli/cli/internal/jsonstream"
"github.com/docker/cli/cli/streams"
"github.com/docker/docker/api/types"
registrytypes "github.com/docker/docker/api/types/registry"
"github.com/docker/docker/registry"
"github.com/opencontainers/go-digest"
"github.com/pkg/errors"
"github.com/theupdateframework/notary/client"
"github.com/theupdateframework/notary/tuf/data"
)
// Streams is an interface which exposes the standard input and output streams.
//
// Same interface as [github.com/docker/cli/cli/command.Streams] but defined here to prevent a circular import.
type Streams interface {
In() *streams.In
Out() *streams.Out
Err() *streams.Out
}
// PushTrustedReference pushes a canonical reference to the trust server.
//
//nolint:gocyclo
func PushTrustedReference(ctx context.Context, ioStreams Streams, repoInfo *registry.RepositoryInfo, ref reference.Named, authConfig registrytypes.AuthConfig, in io.Reader, userAgent string) error {
// If it is a trusted push we would like to find the target entry which match the
// tag provided in the function and then do an AddTarget later.
notaryTarget := &client.Target{}
// Count the times of calling for handleTarget,
// if it is called more that once, that should be considered an error in a trusted push.
cnt := 0
handleTarget := func(msg jsonstream.JSONMessage) {
cnt++
if cnt > 1 {
// handleTarget should only be called once. This will be treated as an error.
return
}
var pushResult types.PushResult
err := json.Unmarshal(*msg.Aux, &pushResult)
if err == nil && pushResult.Tag != "" {
if dgst, err := digest.Parse(pushResult.Digest); err == nil {
h, err := hex.DecodeString(dgst.Hex())
if err != nil {
notaryTarget = nil
return
}
notaryTarget.Name = pushResult.Tag
notaryTarget.Hashes = data.Hashes{string(dgst.Algorithm()): h}
notaryTarget.Length = int64(pushResult.Size)
}
}
}
var tag string
switch x := ref.(type) {
case reference.Canonical:
return errors.New("cannot push a digest reference")
case reference.NamedTagged:
tag = x.Tag()
default:
// We want trust signatures to always take an explicit tag,
// otherwise it will act as an untrusted push.
if err := jsonstream.Display(ctx, in, ioStreams.Out()); err != nil {
return err
}
_, _ = fmt.Fprintln(ioStreams.Err(), "No tag specified, skipping trust metadata push")
return nil
}
if err := jsonstream.Display(ctx, in, ioStreams.Out(), jsonstream.WithAuxCallback(handleTarget)); err != nil {
return err
}
if cnt > 1 {
return errors.Errorf("internal error: only one call to handleTarget expected")
}
if notaryTarget == nil {
return errors.Errorf("no targets found, provide a specific tag in order to sign it")
}
_, _ = fmt.Fprintln(ioStreams.Out(), "Signing and pushing trust metadata")
repo, err := GetNotaryRepository(ioStreams.In(), ioStreams.Out(), userAgent, repoInfo, &authConfig, "push", "pull")
if err != nil {
return errors.Wrap(err, "error establishing connection to trust repository")
}
// get the latest repository metadata so we can figure out which roles to sign
_, err = repo.ListTargets()
switch err.(type) {
case client.ErrRepoNotInitialized, client.ErrRepositoryNotExist:
keys := repo.GetCryptoService().ListKeys(data.CanonicalRootRole)
var rootKeyID string
// always select the first root key
if len(keys) > 0 {
sort.Strings(keys)
rootKeyID = keys[0]
} else {
rootPublicKey, err := repo.GetCryptoService().Create(data.CanonicalRootRole, "", data.ECDSAKey)
if err != nil {
return err
}
rootKeyID = rootPublicKey.ID()
}
// Initialize the notary repository with a remotely managed snapshot key
if err := repo.Initialize([]string{rootKeyID}, data.CanonicalSnapshotRole); err != nil {
return NotaryError(repoInfo.Name.Name(), err)
}
_, _ = fmt.Fprintf(ioStreams.Out(), "Finished initializing %q\n", repoInfo.Name.Name())
err = repo.AddTarget(notaryTarget, data.CanonicalTargetsRole)
case nil:
// already initialized and we have successfully downloaded the latest metadata
err = AddToAllSignableRoles(repo, notaryTarget)
default:
return NotaryError(repoInfo.Name.Name(), err)
}
if err == nil {
err = repo.Publish()
}
if err != nil {
err = errors.Wrapf(err, "failed to sign %s:%s", repoInfo.Name.Name(), tag)
return NotaryError(repoInfo.Name.Name(), err)
}
_, _ = fmt.Fprintf(ioStreams.Out(), "Successfully signed %s:%s\n", repoInfo.Name.Name(), tag)
return nil
}

View File

@ -1,22 +0,0 @@
package trust
import (
"context"
"fmt"
"io"
"github.com/distribution/reference"
"github.com/docker/docker/client"
)
// TagTrusted tags a trusted ref. It is a shallow wrapper around [client.Client.ImageTag]
// that updates the given image references to their familiar format for tagging
// and printing.
func TagTrusted(ctx context.Context, apiClient client.ImageAPIClient, out io.Writer, trustedRef reference.Canonical, ref reference.NamedTagged) error {
// Use familiar references when interacting with client and output
familiarRef := reference.FamiliarString(ref)
trustedFamiliarRef := reference.FamiliarString(trustedRef)
_, _ = fmt.Fprintf(out, "Tagging %s as %s\n", trustedFamiliarRef, familiarRef)
return apiClient.ImageTag(ctx, trustedFamiliarRef, familiarRef)
}

View File

@ -1 +0,0 @@
package manifest

View File

@ -1,239 +0,0 @@
package manifestlist
import (
"encoding/json"
"errors"
"fmt"
"github.com/docker/distribution"
"github.com/docker/distribution/manifest"
"github.com/opencontainers/go-digest"
v1 "github.com/opencontainers/image-spec/specs-go/v1"
)
const (
// MediaTypeManifestList specifies the mediaType for manifest lists.
MediaTypeManifestList = "application/vnd.docker.distribution.manifest.list.v2+json"
)
// SchemaVersion provides a pre-initialized version structure for this
// packages version of the manifest.
var SchemaVersion = manifest.Versioned{
SchemaVersion: 2,
MediaType: MediaTypeManifestList,
}
// OCISchemaVersion provides a pre-initialized version structure for this
// packages OCIschema version of the manifest.
var OCISchemaVersion = manifest.Versioned{
SchemaVersion: 2,
MediaType: v1.MediaTypeImageIndex,
}
func init() {
manifestListFunc := func(b []byte) (distribution.Manifest, distribution.Descriptor, error) {
m := new(DeserializedManifestList)
err := m.UnmarshalJSON(b)
if err != nil {
return nil, distribution.Descriptor{}, err
}
if m.MediaType != MediaTypeManifestList {
err = fmt.Errorf("mediaType in manifest list should be '%s' not '%s'",
MediaTypeManifestList, m.MediaType)
return nil, distribution.Descriptor{}, err
}
dgst := digest.FromBytes(b)
return m, distribution.Descriptor{Digest: dgst, Size: int64(len(b)), MediaType: MediaTypeManifestList}, err
}
err := distribution.RegisterManifestSchema(MediaTypeManifestList, manifestListFunc)
if err != nil {
panic(fmt.Sprintf("Unable to register manifest: %s", err))
}
imageIndexFunc := func(b []byte) (distribution.Manifest, distribution.Descriptor, error) {
if err := validateIndex(b); err != nil {
return nil, distribution.Descriptor{}, err
}
m := new(DeserializedManifestList)
err := m.UnmarshalJSON(b)
if err != nil {
return nil, distribution.Descriptor{}, err
}
if m.MediaType != "" && m.MediaType != v1.MediaTypeImageIndex {
err = fmt.Errorf("if present, mediaType in image index should be '%s' not '%s'",
v1.MediaTypeImageIndex, m.MediaType)
return nil, distribution.Descriptor{}, err
}
dgst := digest.FromBytes(b)
return m, distribution.Descriptor{Digest: dgst, Size: int64(len(b)), MediaType: v1.MediaTypeImageIndex}, err
}
err = distribution.RegisterManifestSchema(v1.MediaTypeImageIndex, imageIndexFunc)
if err != nil {
panic(fmt.Sprintf("Unable to register OCI Image Index: %s", err))
}
}
// PlatformSpec specifies a platform where a particular image manifest is
// applicable.
type PlatformSpec struct {
// Architecture field specifies the CPU architecture, for example
// `amd64` or `ppc64`.
Architecture string `json:"architecture"`
// OS specifies the operating system, for example `linux` or `windows`.
OS string `json:"os"`
// OSVersion is an optional field specifying the operating system
// version, for example `10.0.10586`.
OSVersion string `json:"os.version,omitempty"`
// OSFeatures is an optional field specifying an array of strings,
// each listing a required OS feature (for example on Windows `win32k`).
OSFeatures []string `json:"os.features,omitempty"`
// Variant is an optional field specifying a variant of the CPU, for
// example `ppc64le` to specify a little-endian version of a PowerPC CPU.
Variant string `json:"variant,omitempty"`
// Features is an optional field specifying an array of strings, each
// listing a required CPU feature (for example `sse4` or `aes`).
Features []string `json:"features,omitempty"`
}
// A ManifestDescriptor references a platform-specific manifest.
type ManifestDescriptor struct {
distribution.Descriptor
// Platform specifies which platform the manifest pointed to by the
// descriptor runs on.
Platform PlatformSpec `json:"platform"`
}
// ManifestList references manifests for various platforms.
type ManifestList struct {
manifest.Versioned
// Config references the image configuration as a blob.
Manifests []ManifestDescriptor `json:"manifests"`
}
// References returns the distribution descriptors for the referenced image
// manifests.
func (m ManifestList) References() []distribution.Descriptor {
dependencies := make([]distribution.Descriptor, len(m.Manifests))
for i := range m.Manifests {
dependencies[i] = m.Manifests[i].Descriptor
}
return dependencies
}
// DeserializedManifestList wraps ManifestList with a copy of the original
// JSON.
type DeserializedManifestList struct {
ManifestList
// canonical is the canonical byte representation of the Manifest.
canonical []byte
}
// FromDescriptors takes a slice of descriptors, and returns a
// DeserializedManifestList which contains the resulting manifest list
// and its JSON representation.
func FromDescriptors(descriptors []ManifestDescriptor) (*DeserializedManifestList, error) {
var mediaType string
if len(descriptors) > 0 && descriptors[0].Descriptor.MediaType == v1.MediaTypeImageManifest {
mediaType = v1.MediaTypeImageIndex
} else {
mediaType = MediaTypeManifestList
}
return FromDescriptorsWithMediaType(descriptors, mediaType)
}
// FromDescriptorsWithMediaType is for testing purposes, it's useful to be able to specify the media type explicitly
func FromDescriptorsWithMediaType(descriptors []ManifestDescriptor, mediaType string) (*DeserializedManifestList, error) {
m := ManifestList{
Versioned: manifest.Versioned{
SchemaVersion: 2,
MediaType: mediaType,
},
}
m.Manifests = make([]ManifestDescriptor, len(descriptors))
copy(m.Manifests, descriptors)
deserialized := DeserializedManifestList{
ManifestList: m,
}
var err error
deserialized.canonical, err = json.MarshalIndent(&m, "", " ")
return &deserialized, err
}
// UnmarshalJSON populates a new ManifestList struct from JSON data.
func (m *DeserializedManifestList) UnmarshalJSON(b []byte) error {
m.canonical = make([]byte, len(b))
// store manifest list in canonical
copy(m.canonical, b)
// Unmarshal canonical JSON into ManifestList object
var manifestList ManifestList
if err := json.Unmarshal(m.canonical, &manifestList); err != nil {
return err
}
m.ManifestList = manifestList
return nil
}
// MarshalJSON returns the contents of canonical. If canonical is empty,
// marshals the inner contents.
func (m *DeserializedManifestList) MarshalJSON() ([]byte, error) {
if len(m.canonical) > 0 {
return m.canonical, nil
}
return nil, errors.New("JSON representation not initialized in DeserializedManifestList")
}
// Payload returns the raw content of the manifest list. The contents can be
// used to calculate the content identifier.
func (m DeserializedManifestList) Payload() (string, []byte, error) {
var mediaType string
if m.MediaType == "" {
mediaType = v1.MediaTypeImageIndex
} else {
mediaType = m.MediaType
}
return mediaType, m.canonical, nil
}
// unknownDocument represents a manifest, manifest list, or index that has not
// yet been validated
type unknownDocument struct {
Config interface{} `json:"config,omitempty"`
Layers interface{} `json:"layers,omitempty"`
}
// validateIndex returns an error if the byte slice is invalid JSON or if it
// contains fields that belong to a manifest
func validateIndex(b []byte) error {
var doc unknownDocument
if err := json.Unmarshal(b, &doc); err != nil {
return err
}
if doc.Config != nil || doc.Layers != nil {
return errors.New("index: expected index but found manifest")
}
return nil
}

View File

@ -1,107 +0,0 @@
package ocischema
import (
"context"
"errors"
"github.com/docker/distribution"
"github.com/docker/distribution/manifest"
"github.com/opencontainers/go-digest"
v1 "github.com/opencontainers/image-spec/specs-go/v1"
)
// Builder is a type for constructing manifests.
type Builder struct {
// bs is a BlobService used to publish the configuration blob.
bs distribution.BlobService
// configJSON references
configJSON []byte
// layers is a list of layer descriptors that gets built by successive
// calls to AppendReference.
layers []distribution.Descriptor
// Annotations contains arbitrary metadata relating to the targeted content.
annotations map[string]string
// For testing purposes
mediaType string
}
// NewManifestBuilder is used to build new manifests for the current schema
// version. It takes a BlobService so it can publish the configuration blob
// as part of the Build process, and annotations.
func NewManifestBuilder(bs distribution.BlobService, configJSON []byte, annotations map[string]string) distribution.ManifestBuilder {
mb := &Builder{
bs: bs,
configJSON: make([]byte, len(configJSON)),
annotations: annotations,
mediaType: v1.MediaTypeImageManifest,
}
copy(mb.configJSON, configJSON)
return mb
}
// SetMediaType assigns the passed mediatype or error if the mediatype is not a
// valid media type for oci image manifests currently: "" or "application/vnd.oci.image.manifest.v1+json"
func (mb *Builder) SetMediaType(mediaType string) error {
if mediaType != "" && mediaType != v1.MediaTypeImageManifest {
return errors.New("invalid media type for OCI image manifest")
}
mb.mediaType = mediaType
return nil
}
// Build produces a final manifest from the given references.
func (mb *Builder) Build(ctx context.Context) (distribution.Manifest, error) {
m := Manifest{
Versioned: manifest.Versioned{
SchemaVersion: 2,
MediaType: mb.mediaType,
},
Layers: make([]distribution.Descriptor, len(mb.layers)),
Annotations: mb.annotations,
}
copy(m.Layers, mb.layers)
configDigest := digest.FromBytes(mb.configJSON)
var err error
m.Config, err = mb.bs.Stat(ctx, configDigest)
switch err {
case nil:
// Override MediaType, since Put always replaces the specified media
// type with application/octet-stream in the descriptor it returns.
m.Config.MediaType = v1.MediaTypeImageConfig
return FromStruct(m)
case distribution.ErrBlobUnknown:
// nop
default:
return nil, err
}
// Add config to the blob store
m.Config, err = mb.bs.Put(ctx, v1.MediaTypeImageConfig, mb.configJSON)
// Override MediaType, since Put always replaces the specified media
// type with application/octet-stream in the descriptor it returns.
m.Config.MediaType = v1.MediaTypeImageConfig
if err != nil {
return nil, err
}
return FromStruct(m)
}
// AppendReference adds a reference to the current ManifestBuilder.
func (mb *Builder) AppendReference(d distribution.Describable) error {
mb.layers = append(mb.layers, d.Descriptor())
return nil
}
// References returns the current references added to this builder.
func (mb *Builder) References() []distribution.Descriptor {
return mb.layers
}

View File

@ -1,146 +0,0 @@
package ocischema
import (
"encoding/json"
"errors"
"fmt"
"github.com/docker/distribution"
"github.com/docker/distribution/manifest"
"github.com/opencontainers/go-digest"
v1 "github.com/opencontainers/image-spec/specs-go/v1"
)
var (
// SchemaVersion provides a pre-initialized version structure for this
// packages version of the manifest.
SchemaVersion = manifest.Versioned{
SchemaVersion: 2, // historical value here.. does not pertain to OCI or docker version
MediaType: v1.MediaTypeImageManifest,
}
)
func init() {
ocischemaFunc := func(b []byte) (distribution.Manifest, distribution.Descriptor, error) {
if err := validateManifest(b); err != nil {
return nil, distribution.Descriptor{}, err
}
m := new(DeserializedManifest)
err := m.UnmarshalJSON(b)
if err != nil {
return nil, distribution.Descriptor{}, err
}
dgst := digest.FromBytes(b)
return m, distribution.Descriptor{Digest: dgst, Size: int64(len(b)), MediaType: v1.MediaTypeImageManifest}, err
}
err := distribution.RegisterManifestSchema(v1.MediaTypeImageManifest, ocischemaFunc)
if err != nil {
panic(fmt.Sprintf("Unable to register manifest: %s", err))
}
}
// Manifest defines a ocischema manifest.
type Manifest struct {
manifest.Versioned
// Config references the image configuration as a blob.
Config distribution.Descriptor `json:"config"`
// Layers lists descriptors for the layers referenced by the
// configuration.
Layers []distribution.Descriptor `json:"layers"`
// Annotations contains arbitrary metadata for the image manifest.
Annotations map[string]string `json:"annotations,omitempty"`
}
// References returns the descriptors of this manifests references.
func (m Manifest) References() []distribution.Descriptor {
references := make([]distribution.Descriptor, 0, 1+len(m.Layers))
references = append(references, m.Config)
references = append(references, m.Layers...)
return references
}
// Target returns the target of this manifest.
func (m Manifest) Target() distribution.Descriptor {
return m.Config
}
// DeserializedManifest wraps Manifest with a copy of the original JSON.
// It satisfies the distribution.Manifest interface.
type DeserializedManifest struct {
Manifest
// canonical is the canonical byte representation of the Manifest.
canonical []byte
}
// FromStruct takes a Manifest structure, marshals it to JSON, and returns a
// DeserializedManifest which contains the manifest and its JSON representation.
func FromStruct(m Manifest) (*DeserializedManifest, error) {
var deserialized DeserializedManifest
deserialized.Manifest = m
var err error
deserialized.canonical, err = json.MarshalIndent(&m, "", " ")
return &deserialized, err
}
// UnmarshalJSON populates a new Manifest struct from JSON data.
func (m *DeserializedManifest) UnmarshalJSON(b []byte) error {
m.canonical = make([]byte, len(b))
// store manifest in canonical
copy(m.canonical, b)
// Unmarshal canonical JSON into Manifest object
var manifest Manifest
if err := json.Unmarshal(m.canonical, &manifest); err != nil {
return err
}
if manifest.MediaType != "" && manifest.MediaType != v1.MediaTypeImageManifest {
return fmt.Errorf("if present, mediaType in manifest should be '%s' not '%s'",
v1.MediaTypeImageManifest, manifest.MediaType)
}
m.Manifest = manifest
return nil
}
// MarshalJSON returns the contents of canonical. If canonical is empty,
// marshals the inner contents.
func (m *DeserializedManifest) MarshalJSON() ([]byte, error) {
if len(m.canonical) > 0 {
return m.canonical, nil
}
return nil, errors.New("JSON representation not initialized in DeserializedManifest")
}
// Payload returns the raw content of the manifest. The contents can be used to
// calculate the content identifier.
func (m DeserializedManifest) Payload() (string, []byte, error) {
return v1.MediaTypeImageManifest, m.canonical, nil
}
// unknownDocument represents a manifest, manifest list, or index that has not
// yet been validated
type unknownDocument struct {
Manifests interface{} `json:"manifests,omitempty"`
}
// validateManifest returns an error if the byte slice is invalid JSON or if it
// contains fields that belong to a index
func validateManifest(b []byte) error {
var doc unknownDocument
if err := json.Unmarshal(b, &doc); err != nil {
return err
}
if doc.Manifests != nil {
return errors.New("ocimanifest: expected manifest but found index")
}
return nil
}

View File

@ -1,85 +0,0 @@
package schema2
import (
"context"
"github.com/docker/distribution"
"github.com/opencontainers/go-digest"
)
// builder is a type for constructing manifests.
type builder struct {
// bs is a BlobService used to publish the configuration blob.
bs distribution.BlobService
// configMediaType is media type used to describe configuration
configMediaType string
// configJSON references
configJSON []byte
// dependencies is a list of descriptors that gets built by successive
// calls to AppendReference. In case of image configuration these are layers.
dependencies []distribution.Descriptor
}
// NewManifestBuilder is used to build new manifests for the current schema
// version. It takes a BlobService so it can publish the configuration blob
// as part of the Build process.
func NewManifestBuilder(bs distribution.BlobService, configMediaType string, configJSON []byte) distribution.ManifestBuilder {
mb := &builder{
bs: bs,
configMediaType: configMediaType,
configJSON: make([]byte, len(configJSON)),
}
copy(mb.configJSON, configJSON)
return mb
}
// Build produces a final manifest from the given references.
func (mb *builder) Build(ctx context.Context) (distribution.Manifest, error) {
m := Manifest{
Versioned: SchemaVersion,
Layers: make([]distribution.Descriptor, len(mb.dependencies)),
}
copy(m.Layers, mb.dependencies)
configDigest := digest.FromBytes(mb.configJSON)
var err error
m.Config, err = mb.bs.Stat(ctx, configDigest)
switch err {
case nil:
// Override MediaType, since Put always replaces the specified media
// type with application/octet-stream in the descriptor it returns.
m.Config.MediaType = mb.configMediaType
return FromStruct(m)
case distribution.ErrBlobUnknown:
// nop
default:
return nil, err
}
// Add config to the blob store
m.Config, err = mb.bs.Put(ctx, mb.configMediaType, mb.configJSON)
// Override MediaType, since Put always replaces the specified media
// type with application/octet-stream in the descriptor it returns.
m.Config.MediaType = mb.configMediaType
if err != nil {
return nil, err
}
return FromStruct(m)
}
// AppendReference adds a reference to the current ManifestBuilder.
func (mb *builder) AppendReference(d distribution.Describable) error {
mb.dependencies = append(mb.dependencies, d.Descriptor())
return nil
}
// References returns the current references added to this builder.
func (mb *builder) References() []distribution.Descriptor {
return mb.dependencies
}

View File

@ -1,144 +0,0 @@
package schema2
import (
"encoding/json"
"errors"
"fmt"
"github.com/docker/distribution"
"github.com/docker/distribution/manifest"
"github.com/opencontainers/go-digest"
)
const (
// MediaTypeManifest specifies the mediaType for the current version.
MediaTypeManifest = "application/vnd.docker.distribution.manifest.v2+json"
// MediaTypeImageConfig specifies the mediaType for the image configuration.
MediaTypeImageConfig = "application/vnd.docker.container.image.v1+json"
// MediaTypePluginConfig specifies the mediaType for plugin configuration.
MediaTypePluginConfig = "application/vnd.docker.plugin.v1+json"
// MediaTypeLayer is the mediaType used for layers referenced by the
// manifest.
MediaTypeLayer = "application/vnd.docker.image.rootfs.diff.tar.gzip"
// MediaTypeForeignLayer is the mediaType used for layers that must be
// downloaded from foreign URLs.
MediaTypeForeignLayer = "application/vnd.docker.image.rootfs.foreign.diff.tar.gzip"
// MediaTypeUncompressedLayer is the mediaType used for layers which
// are not compressed.
MediaTypeUncompressedLayer = "application/vnd.docker.image.rootfs.diff.tar"
)
var (
// SchemaVersion provides a pre-initialized version structure for this
// packages version of the manifest.
SchemaVersion = manifest.Versioned{
SchemaVersion: 2,
MediaType: MediaTypeManifest,
}
)
func init() {
schema2Func := func(b []byte) (distribution.Manifest, distribution.Descriptor, error) {
m := new(DeserializedManifest)
err := m.UnmarshalJSON(b)
if err != nil {
return nil, distribution.Descriptor{}, err
}
dgst := digest.FromBytes(b)
return m, distribution.Descriptor{Digest: dgst, Size: int64(len(b)), MediaType: MediaTypeManifest}, err
}
err := distribution.RegisterManifestSchema(MediaTypeManifest, schema2Func)
if err != nil {
panic(fmt.Sprintf("Unable to register manifest: %s", err))
}
}
// Manifest defines a schema2 manifest.
type Manifest struct {
manifest.Versioned
// Config references the image configuration as a blob.
Config distribution.Descriptor `json:"config"`
// Layers lists descriptors for the layers referenced by the
// configuration.
Layers []distribution.Descriptor `json:"layers"`
}
// References returns the descriptors of this manifests references.
func (m Manifest) References() []distribution.Descriptor {
references := make([]distribution.Descriptor, 0, 1+len(m.Layers))
references = append(references, m.Config)
references = append(references, m.Layers...)
return references
}
// Target returns the target of this manifest.
func (m Manifest) Target() distribution.Descriptor {
return m.Config
}
// DeserializedManifest wraps Manifest with a copy of the original JSON.
// It satisfies the distribution.Manifest interface.
type DeserializedManifest struct {
Manifest
// canonical is the canonical byte representation of the Manifest.
canonical []byte
}
// FromStruct takes a Manifest structure, marshals it to JSON, and returns a
// DeserializedManifest which contains the manifest and its JSON representation.
func FromStruct(m Manifest) (*DeserializedManifest, error) {
var deserialized DeserializedManifest
deserialized.Manifest = m
var err error
deserialized.canonical, err = json.MarshalIndent(&m, "", " ")
return &deserialized, err
}
// UnmarshalJSON populates a new Manifest struct from JSON data.
func (m *DeserializedManifest) UnmarshalJSON(b []byte) error {
m.canonical = make([]byte, len(b))
// store manifest in canonical
copy(m.canonical, b)
// Unmarshal canonical JSON into Manifest object
var manifest Manifest
if err := json.Unmarshal(m.canonical, &manifest); err != nil {
return err
}
if manifest.MediaType != MediaTypeManifest {
return fmt.Errorf("mediaType in manifest should be '%s' not '%s'",
MediaTypeManifest, manifest.MediaType)
}
m.Manifest = manifest
return nil
}
// MarshalJSON returns the contents of canonical. If canonical is empty,
// marshals the inner contents.
func (m *DeserializedManifest) MarshalJSON() ([]byte, error) {
if len(m.canonical) > 0 {
return m.canonical, nil
}
return nil, errors.New("JSON representation not initialized in DeserializedManifest")
}
// Payload returns the raw content of the manifest. The contents can be used to
// calculate the content identifier.
func (m DeserializedManifest) Payload() (string, []byte, error) {
return m.MediaType, m.canonical, nil
}

View File

@ -1,12 +0,0 @@
package manifest
// Versioned provides a struct with the manifest schemaVersion and mediaType.
// Incoming content with unknown schema version can be decoded against this
// struct to check the version.
type Versioned struct {
// SchemaVersion is the image manifest schema that this image follows
SchemaVersion int `json:"schemaVersion"`
// MediaType is the media type of this schema.
MediaType string `json:"mediaType,omitempty"`
}

View File

@ -1,126 +0,0 @@
// Package uuid provides simple UUID generation. Only version 4 style UUIDs
// can be generated.
//
// Please see http://tools.ietf.org/html/rfc4122 for details on UUIDs.
package uuid
import (
"crypto/rand"
"fmt"
"io"
"os"
"syscall"
"time"
)
const (
// Bits is the number of bits in a UUID
Bits = 128
// Size is the number of bytes in a UUID
Size = Bits / 8
format = "%08x-%04x-%04x-%04x-%012x"
)
var (
// ErrUUIDInvalid indicates a parsed string is not a valid uuid.
ErrUUIDInvalid = fmt.Errorf("invalid uuid")
// Loggerf can be used to override the default logging destination. Such
// log messages in this library should be logged at warning or higher.
Loggerf = func(format string, args ...interface{}) {}
)
// UUID represents a UUID value. UUIDs can be compared and set to other values
// and accessed by byte.
type UUID [Size]byte
// Generate creates a new, version 4 uuid.
func Generate() (u UUID) {
const (
// ensures we backoff for less than 450ms total. Use the following to
// select new value, in units of 10ms:
// n*(n+1)/2 = d -> n^2 + n - 2d -> n = (sqrt(8d + 1) - 1)/2
maxretries = 9
backoff = time.Millisecond * 10
)
var (
totalBackoff time.Duration
count int
retries int
)
for {
// This should never block but the read may fail. Because of this,
// we just try to read the random number generator until we get
// something. This is a very rare condition but may happen.
b := time.Duration(retries) * backoff
time.Sleep(b)
totalBackoff += b
n, err := io.ReadFull(rand.Reader, u[count:])
if err != nil {
if retryOnError(err) && retries < maxretries {
count += n
retries++
Loggerf("error generating version 4 uuid, retrying: %v", err)
continue
}
// Any other errors represent a system problem. What did someone
// do to /dev/urandom?
panic(fmt.Errorf("error reading random number generator, retried for %v: %v", totalBackoff.String(), err))
}
break
}
u[6] = (u[6] & 0x0f) | 0x40 // set version byte
u[8] = (u[8] & 0x3f) | 0x80 // set high order byte 0b10{8,9,a,b}
return u
}
// Parse attempts to extract a uuid from the string or returns an error.
func Parse(s string) (u UUID, err error) {
if len(s) != 36 {
return UUID{}, ErrUUIDInvalid
}
// create stack addresses for each section of the uuid.
p := make([][]byte, 5)
if _, err := fmt.Sscanf(s, format, &p[0], &p[1], &p[2], &p[3], &p[4]); err != nil {
return u, err
}
copy(u[0:4], p[0])
copy(u[4:6], p[1])
copy(u[6:8], p[2])
copy(u[8:10], p[3])
copy(u[10:16], p[4])
return
}
func (u UUID) String() string {
return fmt.Sprintf(format, u[:4], u[4:6], u[6:8], u[8:10], u[10:])
}
// retryOnError tries to detect whether or not retrying would be fruitful.
func retryOnError(err error) bool {
switch err := err.(type) {
case *os.PathError:
return retryOnError(err.Err) // unpack the target error
case syscall.Errno:
if err == syscall.EPERM {
// EPERM represents an entropy pool exhaustion, a condition under
// which we backoff and retry.
return true
}
}
return false
}

27
vendor/github.com/docker/go/LICENSE generated vendored
View File

@ -1,27 +0,0 @@
Copyright (c) 2012 The Go Authors. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Google Inc. nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

@ -1,143 +0,0 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package json
import (
"bytes"
"unicode/utf8"
)
const (
caseMask = ^byte(0x20) // Mask to ignore case in ASCII.
kelvin = '\u212a'
smallLongEss = '\u017f'
)
// foldFunc returns one of four different case folding equivalence
// functions, from most general (and slow) to fastest:
//
// 1) bytes.EqualFold, if the key s contains any non-ASCII UTF-8
// 2) equalFoldRight, if s contains special folding ASCII ('k', 'K', 's', 'S')
// 3) asciiEqualFold, no special, but includes non-letters (including _)
// 4) simpleLetterEqualFold, no specials, no non-letters.
//
// The letters S and K are special because they map to 3 runes, not just 2:
// * S maps to s and to U+017F 'ſ' Latin small letter long s
// * k maps to K and to U+212A '' Kelvin sign
// See https://play.golang.org/p/tTxjOc0OGo
//
// The returned function is specialized for matching against s and
// should only be given s. It's not curried for performance reasons.
func foldFunc(s []byte) func(s, t []byte) bool {
nonLetter := false
special := false // special letter
for _, b := range s {
if b >= utf8.RuneSelf {
return bytes.EqualFold
}
upper := b & caseMask
if upper < 'A' || upper > 'Z' {
nonLetter = true
} else if upper == 'K' || upper == 'S' {
// See above for why these letters are special.
special = true
}
}
if special {
return equalFoldRight
}
if nonLetter {
return asciiEqualFold
}
return simpleLetterEqualFold
}
// equalFoldRight is a specialization of bytes.EqualFold when s is
// known to be all ASCII (including punctuation), but contains an 's',
// 'S', 'k', or 'K', requiring a Unicode fold on the bytes in t.
// See comments on foldFunc.
func equalFoldRight(s, t []byte) bool {
for _, sb := range s {
if len(t) == 0 {
return false
}
tb := t[0]
if tb < utf8.RuneSelf {
if sb != tb {
sbUpper := sb & caseMask
if 'A' <= sbUpper && sbUpper <= 'Z' {
if sbUpper != tb&caseMask {
return false
}
} else {
return false
}
}
t = t[1:]
continue
}
// sb is ASCII and t is not. t must be either kelvin
// sign or long s; sb must be s, S, k, or K.
tr, size := utf8.DecodeRune(t)
switch sb {
case 's', 'S':
if tr != smallLongEss {
return false
}
case 'k', 'K':
if tr != kelvin {
return false
}
default:
return false
}
t = t[size:]
}
if len(t) > 0 {
return false
}
return true
}
// asciiEqualFold is a specialization of bytes.EqualFold for use when
// s is all ASCII (but may contain non-letters) and contains no
// special-folding letters.
// See comments on foldFunc.
func asciiEqualFold(s, t []byte) bool {
if len(s) != len(t) {
return false
}
for i, sb := range s {
tb := t[i]
if sb == tb {
continue
}
if ('a' <= sb && sb <= 'z') || ('A' <= sb && sb <= 'Z') {
if sb&caseMask != tb&caseMask {
return false
}
} else {
return false
}
}
return true
}
// simpleLetterEqualFold is a specialization of bytes.EqualFold for
// use when s is all ASCII letters (no underscores, etc) and also
// doesn't contain 'k', 'K', 's', or 'S'.
// See comments on foldFunc.
func simpleLetterEqualFold(s, t []byte) bool {
if len(s) != len(t) {
return false
}
for i, b := range s {
if b&caseMask != t[i]&caseMask {
return false
}
}
return true
}

View File

@ -1,141 +0,0 @@
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package json
import "bytes"
// Compact appends to dst the JSON-encoded src with
// insignificant space characters elided.
func Compact(dst *bytes.Buffer, src []byte) error {
return compact(dst, src, false)
}
func compact(dst *bytes.Buffer, src []byte, escape bool) error {
origLen := dst.Len()
var scan scanner
scan.reset()
start := 0
for i, c := range src {
if escape && (c == '<' || c == '>' || c == '&') {
if start < i {
dst.Write(src[start:i])
}
dst.WriteString(`\u00`)
dst.WriteByte(hex[c>>4])
dst.WriteByte(hex[c&0xF])
start = i + 1
}
// Convert U+2028 and U+2029 (E2 80 A8 and E2 80 A9).
if c == 0xE2 && i+2 < len(src) && src[i+1] == 0x80 && src[i+2]&^1 == 0xA8 {
if start < i {
dst.Write(src[start:i])
}
dst.WriteString(`\u202`)
dst.WriteByte(hex[src[i+2]&0xF])
start = i + 3
}
v := scan.step(&scan, c)
if v >= scanSkipSpace {
if v == scanError {
break
}
if start < i {
dst.Write(src[start:i])
}
start = i + 1
}
}
if scan.eof() == scanError {
dst.Truncate(origLen)
return scan.err
}
if start < len(src) {
dst.Write(src[start:])
}
return nil
}
func newline(dst *bytes.Buffer, prefix, indent string, depth int) {
dst.WriteByte('\n')
dst.WriteString(prefix)
for i := 0; i < depth; i++ {
dst.WriteString(indent)
}
}
// Indent appends to dst an indented form of the JSON-encoded src.
// Each element in a JSON object or array begins on a new,
// indented line beginning with prefix followed by one or more
// copies of indent according to the indentation nesting.
// The data appended to dst does not begin with the prefix nor
// any indentation, to make it easier to embed inside other formatted JSON data.
// Although leading space characters (space, tab, carriage return, newline)
// at the beginning of src are dropped, trailing space characters
// at the end of src are preserved and copied to dst.
// For example, if src has no trailing spaces, neither will dst;
// if src ends in a trailing newline, so will dst.
func Indent(dst *bytes.Buffer, src []byte, prefix, indent string) error {
origLen := dst.Len()
var scan scanner
scan.reset()
needIndent := false
depth := 0
for _, c := range src {
scan.bytes++
v := scan.step(&scan, c)
if v == scanSkipSpace {
continue
}
if v == scanError {
break
}
if needIndent && v != scanEndObject && v != scanEndArray {
needIndent = false
depth++
newline(dst, prefix, indent, depth)
}
// Emit semantically uninteresting bytes
// (in particular, punctuation in strings) unmodified.
if v == scanContinue {
dst.WriteByte(c)
continue
}
// Add spacing around real punctuation.
switch c {
case '{', '[':
// delay indent so that empty object and array are formatted as {} and [].
needIndent = true
dst.WriteByte(c)
case ',':
dst.WriteByte(c)
newline(dst, prefix, indent, depth)
case ':':
dst.WriteByte(c)
dst.WriteByte(' ')
case '}', ']':
if needIndent {
// suppress indent in empty object/array
needIndent = false
} else {
depth--
newline(dst, prefix, indent, depth)
}
dst.WriteByte(c)
default:
dst.WriteByte(c)
}
}
if scan.eof() == scanError {
dst.Truncate(origLen)
return scan.err
}
return nil
}

View File

@ -1,623 +0,0 @@
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package json
// JSON value parser state machine.
// Just about at the limit of what is reasonable to write by hand.
// Some parts are a bit tedious, but overall it nicely factors out the
// otherwise common code from the multiple scanning functions
// in this package (Compact, Indent, checkValid, nextValue, etc).
//
// This file starts with two simple examples using the scanner
// before diving into the scanner itself.
import "strconv"
// checkValid verifies that data is valid JSON-encoded data.
// scan is passed in for use by checkValid to avoid an allocation.
func checkValid(data []byte, scan *scanner) error {
scan.reset()
for _, c := range data {
scan.bytes++
if scan.step(scan, c) == scanError {
return scan.err
}
}
if scan.eof() == scanError {
return scan.err
}
return nil
}
// nextValue splits data after the next whole JSON value,
// returning that value and the bytes that follow it as separate slices.
// scan is passed in for use by nextValue to avoid an allocation.
func nextValue(data []byte, scan *scanner) (value, rest []byte, err error) {
scan.reset()
for i, c := range data {
v := scan.step(scan, c)
if v >= scanEndObject {
switch v {
// probe the scanner with a space to determine whether we will
// get scanEnd on the next character. Otherwise, if the next character
// is not a space, scanEndTop allocates a needless error.
case scanEndObject, scanEndArray:
if scan.step(scan, ' ') == scanEnd {
return data[:i+1], data[i+1:], nil
}
case scanError:
return nil, nil, scan.err
case scanEnd:
return data[:i], data[i:], nil
}
}
}
if scan.eof() == scanError {
return nil, nil, scan.err
}
return data, nil, nil
}
// A SyntaxError is a description of a JSON syntax error.
type SyntaxError struct {
msg string // description of error
Offset int64 // error occurred after reading Offset bytes
}
func (e *SyntaxError) Error() string { return e.msg }
// A scanner is a JSON scanning state machine.
// Callers call scan.reset() and then pass bytes in one at a time
// by calling scan.step(&scan, c) for each byte.
// The return value, referred to as an opcode, tells the
// caller about significant parsing events like beginning
// and ending literals, objects, and arrays, so that the
// caller can follow along if it wishes.
// The return value scanEnd indicates that a single top-level
// JSON value has been completed, *before* the byte that
// just got passed in. (The indication must be delayed in order
// to recognize the end of numbers: is 123 a whole value or
// the beginning of 12345e+6?).
type scanner struct {
// The step is a func to be called to execute the next transition.
// Also tried using an integer constant and a single func
// with a switch, but using the func directly was 10% faster
// on a 64-bit Mac Mini, and it's nicer to read.
step func(*scanner, byte) int
// Reached end of top-level value.
endTop bool
// Stack of what we're in the middle of - array values, object keys, object values.
parseState []int
// Error that happened, if any.
err error
// 1-byte redo (see undo method)
redo bool
redoCode int
redoState func(*scanner, byte) int
// total bytes consumed, updated by decoder.Decode
bytes int64
}
// These values are returned by the state transition functions
// assigned to scanner.state and the method scanner.eof.
// They give details about the current state of the scan that
// callers might be interested to know about.
// It is okay to ignore the return value of any particular
// call to scanner.state: if one call returns scanError,
// every subsequent call will return scanError too.
const (
// Continue.
scanContinue = iota // uninteresting byte
scanBeginLiteral // end implied by next result != scanContinue
scanBeginObject // begin object
scanObjectKey // just finished object key (string)
scanObjectValue // just finished non-last object value
scanEndObject // end object (implies scanObjectValue if possible)
scanBeginArray // begin array
scanArrayValue // just finished array value
scanEndArray // end array (implies scanArrayValue if possible)
scanSkipSpace // space byte; can skip; known to be last "continue" result
// Stop.
scanEnd // top-level value ended *before* this byte; known to be first "stop" result
scanError // hit an error, scanner.err.
)
// These values are stored in the parseState stack.
// They give the current state of a composite value
// being scanned. If the parser is inside a nested value
// the parseState describes the nested state, outermost at entry 0.
const (
parseObjectKey = iota // parsing object key (before colon)
parseObjectValue // parsing object value (after colon)
parseArrayValue // parsing array value
)
// reset prepares the scanner for use.
// It must be called before calling s.step.
func (s *scanner) reset() {
s.step = stateBeginValue
s.parseState = s.parseState[0:0]
s.err = nil
s.redo = false
s.endTop = false
}
// eof tells the scanner that the end of input has been reached.
// It returns a scan status just as s.step does.
func (s *scanner) eof() int {
if s.err != nil {
return scanError
}
if s.endTop {
return scanEnd
}
s.step(s, ' ')
if s.endTop {
return scanEnd
}
if s.err == nil {
s.err = &SyntaxError{"unexpected end of JSON input", s.bytes}
}
return scanError
}
// pushParseState pushes a new parse state p onto the parse stack.
func (s *scanner) pushParseState(p int) {
s.parseState = append(s.parseState, p)
}
// popParseState pops a parse state (already obtained) off the stack
// and updates s.step accordingly.
func (s *scanner) popParseState() {
n := len(s.parseState) - 1
s.parseState = s.parseState[0:n]
s.redo = false
if n == 0 {
s.step = stateEndTop
s.endTop = true
} else {
s.step = stateEndValue
}
}
func isSpace(c byte) bool {
return c == ' ' || c == '\t' || c == '\r' || c == '\n'
}
// stateBeginValueOrEmpty is the state after reading `[`.
func stateBeginValueOrEmpty(s *scanner, c byte) int {
if c <= ' ' && isSpace(c) {
return scanSkipSpace
}
if c == ']' {
return stateEndValue(s, c)
}
return stateBeginValue(s, c)
}
// stateBeginValue is the state at the beginning of the input.
func stateBeginValue(s *scanner, c byte) int {
if c <= ' ' && isSpace(c) {
return scanSkipSpace
}
switch c {
case '{':
s.step = stateBeginStringOrEmpty
s.pushParseState(parseObjectKey)
return scanBeginObject
case '[':
s.step = stateBeginValueOrEmpty
s.pushParseState(parseArrayValue)
return scanBeginArray
case '"':
s.step = stateInString
return scanBeginLiteral
case '-':
s.step = stateNeg
return scanBeginLiteral
case '0': // beginning of 0.123
s.step = state0
return scanBeginLiteral
case 't': // beginning of true
s.step = stateT
return scanBeginLiteral
case 'f': // beginning of false
s.step = stateF
return scanBeginLiteral
case 'n': // beginning of null
s.step = stateN
return scanBeginLiteral
}
if '1' <= c && c <= '9' { // beginning of 1234.5
s.step = state1
return scanBeginLiteral
}
return s.error(c, "looking for beginning of value")
}
// stateBeginStringOrEmpty is the state after reading `{`.
func stateBeginStringOrEmpty(s *scanner, c byte) int {
if c <= ' ' && isSpace(c) {
return scanSkipSpace
}
if c == '}' {
n := len(s.parseState)
s.parseState[n-1] = parseObjectValue
return stateEndValue(s, c)
}
return stateBeginString(s, c)
}
// stateBeginString is the state after reading `{"key": value,`.
func stateBeginString(s *scanner, c byte) int {
if c <= ' ' && isSpace(c) {
return scanSkipSpace
}
if c == '"' {
s.step = stateInString
return scanBeginLiteral
}
return s.error(c, "looking for beginning of object key string")
}
// stateEndValue is the state after completing a value,
// such as after reading `{}` or `true` or `["x"`.
func stateEndValue(s *scanner, c byte) int {
n := len(s.parseState)
if n == 0 {
// Completed top-level before the current byte.
s.step = stateEndTop
s.endTop = true
return stateEndTop(s, c)
}
if c <= ' ' && isSpace(c) {
s.step = stateEndValue
return scanSkipSpace
}
ps := s.parseState[n-1]
switch ps {
case parseObjectKey:
if c == ':' {
s.parseState[n-1] = parseObjectValue
s.step = stateBeginValue
return scanObjectKey
}
return s.error(c, "after object key")
case parseObjectValue:
if c == ',' {
s.parseState[n-1] = parseObjectKey
s.step = stateBeginString
return scanObjectValue
}
if c == '}' {
s.popParseState()
return scanEndObject
}
return s.error(c, "after object key:value pair")
case parseArrayValue:
if c == ',' {
s.step = stateBeginValue
return scanArrayValue
}
if c == ']' {
s.popParseState()
return scanEndArray
}
return s.error(c, "after array element")
}
return s.error(c, "")
}
// stateEndTop is the state after finishing the top-level value,
// such as after reading `{}` or `[1,2,3]`.
// Only space characters should be seen now.
func stateEndTop(s *scanner, c byte) int {
if c != ' ' && c != '\t' && c != '\r' && c != '\n' {
// Complain about non-space byte on next call.
s.error(c, "after top-level value")
}
return scanEnd
}
// stateInString is the state after reading `"`.
func stateInString(s *scanner, c byte) int {
if c == '"' {
s.step = stateEndValue
return scanContinue
}
if c == '\\' {
s.step = stateInStringEsc
return scanContinue
}
if c < 0x20 {
return s.error(c, "in string literal")
}
return scanContinue
}
// stateInStringEsc is the state after reading `"\` during a quoted string.
func stateInStringEsc(s *scanner, c byte) int {
switch c {
case 'b', 'f', 'n', 'r', 't', '\\', '/', '"':
s.step = stateInString
return scanContinue
case 'u':
s.step = stateInStringEscU
return scanContinue
}
return s.error(c, "in string escape code")
}
// stateInStringEscU is the state after reading `"\u` during a quoted string.
func stateInStringEscU(s *scanner, c byte) int {
if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
s.step = stateInStringEscU1
return scanContinue
}
// numbers
return s.error(c, "in \\u hexadecimal character escape")
}
// stateInStringEscU1 is the state after reading `"\u1` during a quoted string.
func stateInStringEscU1(s *scanner, c byte) int {
if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
s.step = stateInStringEscU12
return scanContinue
}
// numbers
return s.error(c, "in \\u hexadecimal character escape")
}
// stateInStringEscU12 is the state after reading `"\u12` during a quoted string.
func stateInStringEscU12(s *scanner, c byte) int {
if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
s.step = stateInStringEscU123
return scanContinue
}
// numbers
return s.error(c, "in \\u hexadecimal character escape")
}
// stateInStringEscU123 is the state after reading `"\u123` during a quoted string.
func stateInStringEscU123(s *scanner, c byte) int {
if '0' <= c && c <= '9' || 'a' <= c && c <= 'f' || 'A' <= c && c <= 'F' {
s.step = stateInString
return scanContinue
}
// numbers
return s.error(c, "in \\u hexadecimal character escape")
}
// stateNeg is the state after reading `-` during a number.
func stateNeg(s *scanner, c byte) int {
if c == '0' {
s.step = state0
return scanContinue
}
if '1' <= c && c <= '9' {
s.step = state1
return scanContinue
}
return s.error(c, "in numeric literal")
}
// state1 is the state after reading a non-zero integer during a number,
// such as after reading `1` or `100` but not `0`.
func state1(s *scanner, c byte) int {
if '0' <= c && c <= '9' {
s.step = state1
return scanContinue
}
return state0(s, c)
}
// state0 is the state after reading `0` during a number.
func state0(s *scanner, c byte) int {
if c == '.' {
s.step = stateDot
return scanContinue
}
if c == 'e' || c == 'E' {
s.step = stateE
return scanContinue
}
return stateEndValue(s, c)
}
// stateDot is the state after reading the integer and decimal point in a number,
// such as after reading `1.`.
func stateDot(s *scanner, c byte) int {
if '0' <= c && c <= '9' {
s.step = stateDot0
return scanContinue
}
return s.error(c, "after decimal point in numeric literal")
}
// stateDot0 is the state after reading the integer, decimal point, and subsequent
// digits of a number, such as after reading `3.14`.
func stateDot0(s *scanner, c byte) int {
if '0' <= c && c <= '9' {
return scanContinue
}
if c == 'e' || c == 'E' {
s.step = stateE
return scanContinue
}
return stateEndValue(s, c)
}
// stateE is the state after reading the mantissa and e in a number,
// such as after reading `314e` or `0.314e`.
func stateE(s *scanner, c byte) int {
if c == '+' || c == '-' {
s.step = stateESign
return scanContinue
}
return stateESign(s, c)
}
// stateESign is the state after reading the mantissa, e, and sign in a number,
// such as after reading `314e-` or `0.314e+`.
func stateESign(s *scanner, c byte) int {
if '0' <= c && c <= '9' {
s.step = stateE0
return scanContinue
}
return s.error(c, "in exponent of numeric literal")
}
// stateE0 is the state after reading the mantissa, e, optional sign,
// and at least one digit of the exponent in a number,
// such as after reading `314e-2` or `0.314e+1` or `3.14e0`.
func stateE0(s *scanner, c byte) int {
if '0' <= c && c <= '9' {
return scanContinue
}
return stateEndValue(s, c)
}
// stateT is the state after reading `t`.
func stateT(s *scanner, c byte) int {
if c == 'r' {
s.step = stateTr
return scanContinue
}
return s.error(c, "in literal true (expecting 'r')")
}
// stateTr is the state after reading `tr`.
func stateTr(s *scanner, c byte) int {
if c == 'u' {
s.step = stateTru
return scanContinue
}
return s.error(c, "in literal true (expecting 'u')")
}
// stateTru is the state after reading `tru`.
func stateTru(s *scanner, c byte) int {
if c == 'e' {
s.step = stateEndValue
return scanContinue
}
return s.error(c, "in literal true (expecting 'e')")
}
// stateF is the state after reading `f`.
func stateF(s *scanner, c byte) int {
if c == 'a' {
s.step = stateFa
return scanContinue
}
return s.error(c, "in literal false (expecting 'a')")
}
// stateFa is the state after reading `fa`.
func stateFa(s *scanner, c byte) int {
if c == 'l' {
s.step = stateFal
return scanContinue
}
return s.error(c, "in literal false (expecting 'l')")
}
// stateFal is the state after reading `fal`.
func stateFal(s *scanner, c byte) int {
if c == 's' {
s.step = stateFals
return scanContinue
}
return s.error(c, "in literal false (expecting 's')")
}
// stateFals is the state after reading `fals`.
func stateFals(s *scanner, c byte) int {
if c == 'e' {
s.step = stateEndValue
return scanContinue
}
return s.error(c, "in literal false (expecting 'e')")
}
// stateN is the state after reading `n`.
func stateN(s *scanner, c byte) int {
if c == 'u' {
s.step = stateNu
return scanContinue
}
return s.error(c, "in literal null (expecting 'u')")
}
// stateNu is the state after reading `nu`.
func stateNu(s *scanner, c byte) int {
if c == 'l' {
s.step = stateNul
return scanContinue
}
return s.error(c, "in literal null (expecting 'l')")
}
// stateNul is the state after reading `nul`.
func stateNul(s *scanner, c byte) int {
if c == 'l' {
s.step = stateEndValue
return scanContinue
}
return s.error(c, "in literal null (expecting 'l')")
}
// stateError is the state after reaching a syntax error,
// such as after reading `[1}` or `5.1.2`.
func stateError(s *scanner, c byte) int {
return scanError
}
// error records an error and switches to the error state.
func (s *scanner) error(c byte, context string) int {
s.step = stateError
s.err = &SyntaxError{"invalid character " + quoteChar(c) + " " + context, s.bytes}
return scanError
}
// quoteChar formats c as a quoted character literal
func quoteChar(c byte) string {
// special cases - different from quoted strings
if c == '\'' {
return `'\''`
}
if c == '"' {
return `'"'`
}
// use quoted string with different quotation marks
s := strconv.Quote(string(c))
return "'" + s[1:len(s)-1] + "'"
}
// undo causes the scanner to return scanCode from the next state transition.
// This gives callers a simple 1-byte undo mechanism.
func (s *scanner) undo(scanCode int) {
if s.redo {
panic("json: invalid use of scanner")
}
s.redoCode = scanCode
s.redoState = s.step
s.step = stateRedo
s.redo = true
}
// stateRedo helps implement the scanner's 1-byte undo.
func stateRedo(s *scanner, c byte) int {
s.redo = false
s.step = s.redoState
return s.redoCode
}

View File

@ -1,487 +0,0 @@
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package json
import (
"bytes"
"errors"
"io"
)
// A Decoder reads and decodes JSON objects from an input stream.
type Decoder struct {
r io.Reader
buf []byte
d decodeState
scanp int // start of unread data in buf
scan scanner
err error
tokenState int
tokenStack []int
}
// NewDecoder returns a new decoder that reads from r.
//
// The decoder introduces its own buffering and may
// read data from r beyond the JSON values requested.
func NewDecoder(r io.Reader) *Decoder {
return &Decoder{r: r}
}
// UseNumber causes the Decoder to unmarshal a number into an interface{} as a
// Number instead of as a float64.
func (dec *Decoder) UseNumber() { dec.d.useNumber = true }
// Decode reads the next JSON-encoded value from its
// input and stores it in the value pointed to by v.
//
// See the documentation for Unmarshal for details about
// the conversion of JSON into a Go value.
func (dec *Decoder) Decode(v interface{}) error {
if dec.err != nil {
return dec.err
}
if err := dec.tokenPrepareForDecode(); err != nil {
return err
}
if !dec.tokenValueAllowed() {
return &SyntaxError{msg: "not at beginning of value"}
}
// Read whole value into buffer.
n, err := dec.readValue()
if err != nil {
return err
}
dec.d.init(dec.buf[dec.scanp : dec.scanp+n])
dec.scanp += n
// Don't save err from unmarshal into dec.err:
// the connection is still usable since we read a complete JSON
// object from it before the error happened.
err = dec.d.unmarshal(v)
// fixup token streaming state
dec.tokenValueEnd()
return err
}
// Buffered returns a reader of the data remaining in the Decoder's
// buffer. The reader is valid until the next call to Decode.
func (dec *Decoder) Buffered() io.Reader {
return bytes.NewReader(dec.buf[dec.scanp:])
}
// readValue reads a JSON value into dec.buf.
// It returns the length of the encoding.
func (dec *Decoder) readValue() (int, error) {
dec.scan.reset()
scanp := dec.scanp
var err error
Input:
for {
// Look in the buffer for a new value.
for i, c := range dec.buf[scanp:] {
dec.scan.bytes++
v := dec.scan.step(&dec.scan, c)
if v == scanEnd {
scanp += i
break Input
}
// scanEnd is delayed one byte.
// We might block trying to get that byte from src,
// so instead invent a space byte.
if (v == scanEndObject || v == scanEndArray) && dec.scan.step(&dec.scan, ' ') == scanEnd {
scanp += i + 1
break Input
}
if v == scanError {
dec.err = dec.scan.err
return 0, dec.scan.err
}
}
scanp = len(dec.buf)
// Did the last read have an error?
// Delayed until now to allow buffer scan.
if err != nil {
if err == io.EOF {
if dec.scan.step(&dec.scan, ' ') == scanEnd {
break Input
}
if nonSpace(dec.buf) {
err = io.ErrUnexpectedEOF
}
}
dec.err = err
return 0, err
}
n := scanp - dec.scanp
err = dec.refill()
scanp = dec.scanp + n
}
return scanp - dec.scanp, nil
}
func (dec *Decoder) refill() error {
// Make room to read more into the buffer.
// First slide down data already consumed.
if dec.scanp > 0 {
n := copy(dec.buf, dec.buf[dec.scanp:])
dec.buf = dec.buf[:n]
dec.scanp = 0
}
// Grow buffer if not large enough.
const minRead = 512
if cap(dec.buf)-len(dec.buf) < minRead {
newBuf := make([]byte, len(dec.buf), 2*cap(dec.buf)+minRead)
copy(newBuf, dec.buf)
dec.buf = newBuf
}
// Read. Delay error for next iteration (after scan).
n, err := dec.r.Read(dec.buf[len(dec.buf):cap(dec.buf)])
dec.buf = dec.buf[0 : len(dec.buf)+n]
return err
}
func nonSpace(b []byte) bool {
for _, c := range b {
if !isSpace(c) {
return true
}
}
return false
}
// An Encoder writes JSON objects to an output stream.
type Encoder struct {
w io.Writer
err error
canonical bool
}
// NewEncoder returns a new encoder that writes to w.
func NewEncoder(w io.Writer) *Encoder {
return &Encoder{w: w}
}
// Canonical causes the encoder to switch to Canonical JSON mode.
// Read more at: http://wiki.laptop.org/go/Canonical_JSON
func (enc *Encoder) Canonical() { enc.canonical = true }
// Encode writes the JSON encoding of v to the stream,
// followed by a newline character.
//
// See the documentation for Marshal for details about the
// conversion of Go values to JSON.
func (enc *Encoder) Encode(v interface{}) error {
if enc.err != nil {
return enc.err
}
e := newEncodeState(enc.canonical)
err := e.marshal(v)
if err != nil {
return err
}
if !enc.canonical {
// Terminate each value with a newline.
// This makes the output look a little nicer
// when debugging, and some kind of space
// is required if the encoded value was a number,
// so that the reader knows there aren't more
// digits coming.
e.WriteByte('\n')
}
if _, err = enc.w.Write(e.Bytes()); err != nil {
enc.err = err
}
encodeStatePool.Put(e)
return err
}
// RawMessage is a raw encoded JSON object.
// It implements Marshaler and Unmarshaler and can
// be used to delay JSON decoding or precompute a JSON encoding.
type RawMessage []byte
// MarshalJSON returns *m as the JSON encoding of m.
func (m *RawMessage) MarshalJSON() ([]byte, error) {
return *m, nil
}
// UnmarshalJSON sets *m to a copy of data.
func (m *RawMessage) UnmarshalJSON(data []byte) error {
if m == nil {
return errors.New("json.RawMessage: UnmarshalJSON on nil pointer")
}
*m = append((*m)[0:0], data...)
return nil
}
var _ Marshaler = (*RawMessage)(nil)
var _ Unmarshaler = (*RawMessage)(nil)
// A Token holds a value of one of these types:
//
// Delim, for the four JSON delimiters [ ] { }
// bool, for JSON booleans
// float64, for JSON numbers
// Number, for JSON numbers
// string, for JSON string literals
// nil, for JSON null
//
type Token interface{}
const (
tokenTopValue = iota
tokenArrayStart
tokenArrayValue
tokenArrayComma
tokenObjectStart
tokenObjectKey
tokenObjectColon
tokenObjectValue
tokenObjectComma
)
// advance tokenstate from a separator state to a value state
func (dec *Decoder) tokenPrepareForDecode() error {
// Note: Not calling peek before switch, to avoid
// putting peek into the standard Decode path.
// peek is only called when using the Token API.
switch dec.tokenState {
case tokenArrayComma:
c, err := dec.peek()
if err != nil {
return err
}
if c != ',' {
return &SyntaxError{"expected comma after array element", 0}
}
dec.scanp++
dec.tokenState = tokenArrayValue
case tokenObjectColon:
c, err := dec.peek()
if err != nil {
return err
}
if c != ':' {
return &SyntaxError{"expected colon after object key", 0}
}
dec.scanp++
dec.tokenState = tokenObjectValue
}
return nil
}
func (dec *Decoder) tokenValueAllowed() bool {
switch dec.tokenState {
case tokenTopValue, tokenArrayStart, tokenArrayValue, tokenObjectValue:
return true
}
return false
}
func (dec *Decoder) tokenValueEnd() {
switch dec.tokenState {
case tokenArrayStart, tokenArrayValue:
dec.tokenState = tokenArrayComma
case tokenObjectValue:
dec.tokenState = tokenObjectComma
}
}
// A Delim is a JSON array or object delimiter, one of [ ] { or }.
type Delim rune
func (d Delim) String() string {
return string(d)
}
// Token returns the next JSON token in the input stream.
// At the end of the input stream, Token returns nil, io.EOF.
//
// Token guarantees that the delimiters [ ] { } it returns are
// properly nested and matched: if Token encounters an unexpected
// delimiter in the input, it will return an error.
//
// The input stream consists of basic JSON values—bool, string,
// number, and null—along with delimiters [ ] { } of type Delim
// to mark the start and end of arrays and objects.
// Commas and colons are elided.
func (dec *Decoder) Token() (Token, error) {
for {
c, err := dec.peek()
if err != nil {
return nil, err
}
switch c {
case '[':
if !dec.tokenValueAllowed() {
return dec.tokenError(c)
}
dec.scanp++
dec.tokenStack = append(dec.tokenStack, dec.tokenState)
dec.tokenState = tokenArrayStart
return Delim('['), nil
case ']':
if dec.tokenState != tokenArrayStart && dec.tokenState != tokenArrayComma {
return dec.tokenError(c)
}
dec.scanp++
dec.tokenState = dec.tokenStack[len(dec.tokenStack)-1]
dec.tokenStack = dec.tokenStack[:len(dec.tokenStack)-1]
dec.tokenValueEnd()
return Delim(']'), nil
case '{':
if !dec.tokenValueAllowed() {
return dec.tokenError(c)
}
dec.scanp++
dec.tokenStack = append(dec.tokenStack, dec.tokenState)
dec.tokenState = tokenObjectStart
return Delim('{'), nil
case '}':
if dec.tokenState != tokenObjectStart && dec.tokenState != tokenObjectComma {
return dec.tokenError(c)
}
dec.scanp++
dec.tokenState = dec.tokenStack[len(dec.tokenStack)-1]
dec.tokenStack = dec.tokenStack[:len(dec.tokenStack)-1]
dec.tokenValueEnd()
return Delim('}'), nil
case ':':
if dec.tokenState != tokenObjectColon {
return dec.tokenError(c)
}
dec.scanp++
dec.tokenState = tokenObjectValue
continue
case ',':
if dec.tokenState == tokenArrayComma {
dec.scanp++
dec.tokenState = tokenArrayValue
continue
}
if dec.tokenState == tokenObjectComma {
dec.scanp++
dec.tokenState = tokenObjectKey
continue
}
return dec.tokenError(c)
case '"':
if dec.tokenState == tokenObjectStart || dec.tokenState == tokenObjectKey {
var x string
old := dec.tokenState
dec.tokenState = tokenTopValue
err := dec.Decode(&x)
dec.tokenState = old
if err != nil {
clearOffset(err)
return nil, err
}
dec.tokenState = tokenObjectColon
return x, nil
}
fallthrough
default:
if !dec.tokenValueAllowed() {
return dec.tokenError(c)
}
var x interface{}
if err := dec.Decode(&x); err != nil {
clearOffset(err)
return nil, err
}
return x, nil
}
}
}
func clearOffset(err error) {
if s, ok := err.(*SyntaxError); ok {
s.Offset = 0
}
}
func (dec *Decoder) tokenError(c byte) (Token, error) {
var context string
switch dec.tokenState {
case tokenTopValue:
context = " looking for beginning of value"
case tokenArrayStart, tokenArrayValue, tokenObjectValue:
context = " looking for beginning of value"
case tokenArrayComma:
context = " after array element"
case tokenObjectKey:
context = " looking for beginning of object key string"
case tokenObjectColon:
context = " after object key"
case tokenObjectComma:
context = " after object key:value pair"
}
return nil, &SyntaxError{"invalid character " + quoteChar(c) + " " + context, 0}
}
// More reports whether there is another element in the
// current array or object being parsed.
func (dec *Decoder) More() bool {
c, err := dec.peek()
return err == nil && c != ']' && c != '}'
}
func (dec *Decoder) peek() (byte, error) {
var err error
for {
for i := dec.scanp; i < len(dec.buf); i++ {
c := dec.buf[i]
if isSpace(c) {
continue
}
dec.scanp = i
return c, nil
}
// buffer has been scanned, now report any error
if err != nil {
return 0, err
}
err = dec.refill()
}
}
/*
TODO
// EncodeToken writes the given JSON token to the stream.
// It returns an error if the delimiters [ ] { } are not properly used.
//
// EncodeToken does not call Flush, because usually it is part of
// a larger operation such as Encode, and those will call Flush when finished.
// Callers that create an Encoder and then invoke EncodeToken directly,
// without using Encode, need to call Flush when finished to ensure that
// the JSON is written to the underlying writer.
func (e *Encoder) EncodeToken(t Token) error {
...
}
*/

View File

@ -1,44 +0,0 @@
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package json
import (
"strings"
)
// tagOptions is the string following a comma in a struct field's "json"
// tag, or the empty string. It does not include the leading comma.
type tagOptions string
// parseTag splits a struct field's json tag into its name and
// comma-separated options.
func parseTag(tag string) (string, tagOptions) {
if idx := strings.Index(tag, ","); idx != -1 {
return tag[:idx], tagOptions(tag[idx+1:])
}
return tag, tagOptions("")
}
// Contains reports whether a comma-separated list of options
// contains a particular substr flag. substr must be surrounded by a
// string boundary or commas.
func (o tagOptions) Contains(optionName string) bool {
if len(o) == 0 {
return false
}
s := string(o)
for s != "" {
var next string
i := strings.Index(s, ",")
if i >= 0 {
s, next = s[:i], s[i+1:]
}
if s == optionName {
return true
}
s = next
}
return false
}

View File

@ -1,3 +0,0 @@
tags
test_db/*/generation
test_db/*/*.lock

View File

@ -1,27 +0,0 @@
Copyright (c) 2013 Miek Gieben. All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following disclaimer
in the documentation and/or other materials provided with the
distribution.
* Neither the name of Miek Gieben nor the names of its
contributors may be used to endorse or promote products derived from
this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

View File

@ -1,57 +0,0 @@
# Makefile for releasing.
#
# The release is controlled from version.go. The version found there is
# used to tag the git repo, we're not building any artifects so there is nothing
# to upload to github.
#
# * Up the version in version.go
# * Run: make -f Makefile.release release
# * will *commit* your change with 'Release $VERSION'
# * push to github
#
define GO
//+build ignore
package main
import (
"fmt"
"github.com/miekg/pkcs11"
)
func main() {
fmt.Println(pkcs11.Release.String())
}
endef
$(file > version_release.go,$(GO))
VERSION:=$(shell go run -tags release version_release.go)
TAG="v$(VERSION)"
all:
rm -f version_release.go
@echo Use the \'release\' target to start a release $(VERSION)
.PHONY: run
run:
rm -f version_release.go
@echo $(VERSION)
.PHONY: release
release: commit push
@echo Released $(VERSION)
.PHONY: commit
commit:
rm -f version_release.go
@echo Committing release $(VERSION)
git commit -am"Release $(VERSION)"
git tag $(TAG)
.PHONY: push
push:
@echo Pushing release $(VERSION) to master
git push --tags
git push

View File

@ -1,68 +0,0 @@
# PKCS#11
This is a Go implementation of the PKCS#11 API. It wraps the library closely, but uses Go idiom where
it makes sense. It has been tested with SoftHSM.
## SoftHSM
* Make it use a custom configuration file `export SOFTHSM_CONF=$PWD/softhsm.conf`
* Then use `softhsm` to init it
~~~
softhsm --init-token --slot 0 --label test --pin 1234
~~~
* Then use `libsofthsm2.so` as the pkcs11 module:
~~~ go
p := pkcs11.New("/usr/lib/softhsm/libsofthsm2.so")
~~~
## Examples
A skeleton program would look somewhat like this (yes, pkcs#11 is verbose):
~~~ go
p := pkcs11.New("/usr/lib/softhsm/libsofthsm2.so")
err := p.Initialize()
if err != nil {
panic(err)
}
defer p.Destroy()
defer p.Finalize()
slots, err := p.GetSlotList(true)
if err != nil {
panic(err)
}
session, err := p.OpenSession(slots[0], pkcs11.CKF_SERIAL_SESSION|pkcs11.CKF_RW_SESSION)
if err != nil {
panic(err)
}
defer p.CloseSession(session)
err = p.Login(session, pkcs11.CKU_USER, "1234")
if err != nil {
panic(err)
}
defer p.Logout(session)
p.DigestInit(session, []*pkcs11.Mechanism{pkcs11.NewMechanism(pkcs11.CKM_SHA_1, nil)})
hash, err := p.Digest(session, []byte("this is a string"))
if err != nil {
panic(err)
}
for _, d := range hash {
fmt.Printf("%x", d)
}
fmt.Println()
~~~
Further examples are included in the tests.
To expose PKCS#11 keys using the [crypto.Signer interface](https://golang.org/pkg/crypto/#Signer),
please see [github.com/thalesignite/crypto11](https://github.com/thalesignite/crypto11).

View File

@ -1,98 +0,0 @@
// Copyright 2013 Miek Gieben. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package pkcs11
// awk '/#define CKR_/{ print $3":\""$2"\"," }' pkcs11t.h
var strerror = map[uint]string{
0x00000000: "CKR_OK",
0x00000001: "CKR_CANCEL",
0x00000002: "CKR_HOST_MEMORY",
0x00000003: "CKR_SLOT_ID_INVALID",
0x00000005: "CKR_GENERAL_ERROR",
0x00000006: "CKR_FUNCTION_FAILED",
0x00000007: "CKR_ARGUMENTS_BAD",
0x00000008: "CKR_NO_EVENT",
0x00000009: "CKR_NEED_TO_CREATE_THREADS",
0x0000000A: "CKR_CANT_LOCK",
0x00000010: "CKR_ATTRIBUTE_READ_ONLY",
0x00000011: "CKR_ATTRIBUTE_SENSITIVE",
0x00000012: "CKR_ATTRIBUTE_TYPE_INVALID",
0x00000013: "CKR_ATTRIBUTE_VALUE_INVALID",
0x00000020: "CKR_DATA_INVALID",
0x00000021: "CKR_DATA_LEN_RANGE",
0x00000030: "CKR_DEVICE_ERROR",
0x00000031: "CKR_DEVICE_MEMORY",
0x00000032: "CKR_DEVICE_REMOVED",
0x00000040: "CKR_ENCRYPTED_DATA_INVALID",
0x00000041: "CKR_ENCRYPTED_DATA_LEN_RANGE",
0x00000050: "CKR_FUNCTION_CANCELED",
0x00000051: "CKR_FUNCTION_NOT_PARALLEL",
0x00000054: "CKR_FUNCTION_NOT_SUPPORTED",
0x00000060: "CKR_KEY_HANDLE_INVALID",
0x00000062: "CKR_KEY_SIZE_RANGE",
0x00000063: "CKR_KEY_TYPE_INCONSISTENT",
0x00000064: "CKR_KEY_NOT_NEEDED",
0x00000065: "CKR_KEY_CHANGED",
0x00000066: "CKR_KEY_NEEDED",
0x00000067: "CKR_KEY_INDIGESTIBLE",
0x00000068: "CKR_KEY_FUNCTION_NOT_PERMITTED",
0x00000069: "CKR_KEY_NOT_WRAPPABLE",
0x0000006A: "CKR_KEY_UNEXTRACTABLE",
0x00000070: "CKR_MECHANISM_INVALID",
0x00000071: "CKR_MECHANISM_PARAM_INVALID",
0x00000082: "CKR_OBJECT_HANDLE_INVALID",
0x00000090: "CKR_OPERATION_ACTIVE",
0x00000091: "CKR_OPERATION_NOT_INITIALIZED",
0x000000A0: "CKR_PIN_INCORRECT",
0x000000A1: "CKR_PIN_INVALID",
0x000000A2: "CKR_PIN_LEN_RANGE",
0x000000A3: "CKR_PIN_EXPIRED",
0x000000A4: "CKR_PIN_LOCKED",
0x000000B0: "CKR_SESSION_CLOSED",
0x000000B1: "CKR_SESSION_COUNT",
0x000000B3: "CKR_SESSION_HANDLE_INVALID",
0x000000B4: "CKR_SESSION_PARALLEL_NOT_SUPPORTED",
0x000000B5: "CKR_SESSION_READ_ONLY",
0x000000B6: "CKR_SESSION_EXISTS",
0x000000B7: "CKR_SESSION_READ_ONLY_EXISTS",
0x000000B8: "CKR_SESSION_READ_WRITE_SO_EXISTS",
0x000000C0: "CKR_SIGNATURE_INVALID",
0x000000C1: "CKR_SIGNATURE_LEN_RANGE",
0x000000D0: "CKR_TEMPLATE_INCOMPLETE",
0x000000D1: "CKR_TEMPLATE_INCONSISTENT",
0x000000E0: "CKR_TOKEN_NOT_PRESENT",
0x000000E1: "CKR_TOKEN_NOT_RECOGNIZED",
0x000000E2: "CKR_TOKEN_WRITE_PROTECTED",
0x000000F0: "CKR_UNWRAPPING_KEY_HANDLE_INVALID",
0x000000F1: "CKR_UNWRAPPING_KEY_SIZE_RANGE",
0x000000F2: "CKR_UNWRAPPING_KEY_TYPE_INCONSISTENT",
0x00000100: "CKR_USER_ALREADY_LOGGED_IN",
0x00000101: "CKR_USER_NOT_LOGGED_IN",
0x00000102: "CKR_USER_PIN_NOT_INITIALIZED",
0x00000103: "CKR_USER_TYPE_INVALID",
0x00000104: "CKR_USER_ANOTHER_ALREADY_LOGGED_IN",
0x00000105: "CKR_USER_TOO_MANY_TYPES",
0x00000110: "CKR_WRAPPED_KEY_INVALID",
0x00000112: "CKR_WRAPPED_KEY_LEN_RANGE",
0x00000113: "CKR_WRAPPING_KEY_HANDLE_INVALID",
0x00000114: "CKR_WRAPPING_KEY_SIZE_RANGE",
0x00000115: "CKR_WRAPPING_KEY_TYPE_INCONSISTENT",
0x00000120: "CKR_RANDOM_SEED_NOT_SUPPORTED",
0x00000121: "CKR_RANDOM_NO_RNG",
0x00000130: "CKR_DOMAIN_PARAMS_INVALID",
0x00000150: "CKR_BUFFER_TOO_SMALL",
0x00000160: "CKR_SAVED_STATE_INVALID",
0x00000170: "CKR_INFORMATION_SENSITIVE",
0x00000180: "CKR_STATE_UNSAVEABLE",
0x00000190: "CKR_CRYPTOKI_NOT_INITIALIZED",
0x00000191: "CKR_CRYPTOKI_ALREADY_INITIALIZED",
0x000001A0: "CKR_MUTEX_BAD",
0x000001A1: "CKR_MUTEX_NOT_LOCKED",
0x000001B0: "CKR_NEW_PIN_MODE",
0x000001B1: "CKR_NEXT_OTP",
0x00000200: "CKR_FUNCTION_REJECTED",
0x80000000: "CKR_VENDOR_DEFINED",
}

BIN
vendor/github.com/miekg/pkcs11/hsm.db generated vendored

Binary file not shown.

View File

@ -1,190 +0,0 @@
// Copyright 2013 Miek Gieben. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package pkcs11
/*
#include <stdlib.h>
#include <string.h>
#include "pkcs11go.h"
static inline void putOAEPParams(CK_RSA_PKCS_OAEP_PARAMS_PTR params, CK_VOID_PTR pSourceData, CK_ULONG ulSourceDataLen)
{
params->pSourceData = pSourceData;
params->ulSourceDataLen = ulSourceDataLen;
}
static inline void putECDH1SharedParams(CK_ECDH1_DERIVE_PARAMS_PTR params, CK_VOID_PTR pSharedData, CK_ULONG ulSharedDataLen)
{
params->pSharedData = pSharedData;
params->ulSharedDataLen = ulSharedDataLen;
}
static inline void putECDH1PublicParams(CK_ECDH1_DERIVE_PARAMS_PTR params, CK_VOID_PTR pPublicData, CK_ULONG ulPublicDataLen)
{
params->pPublicData = pPublicData;
params->ulPublicDataLen = ulPublicDataLen;
}
*/
import "C"
import "unsafe"
// GCMParams represents the parameters for the AES-GCM mechanism.
type GCMParams struct {
arena
params *C.CK_GCM_PARAMS
iv []byte
aad []byte
tagSize int
}
// NewGCMParams returns a pointer to AES-GCM parameters that can be used with the CKM_AES_GCM mechanism.
// The Free() method must be called after the operation is complete.
//
// Note that some HSMs, like CloudHSM, will ignore the IV you pass in and write their
// own. As a result, to support all libraries, memory is not freed
// automatically, so that after the EncryptInit/Encrypt operation the HSM's IV
// can be read back out. It is up to the caller to ensure that Free() is called
// on the GCMParams object at an appropriate time, which is after
//
// Encrypt/Decrypt. As an example:
//
// gcmParams := pkcs11.NewGCMParams(make([]byte, 12), nil, 128)
// p.ctx.EncryptInit(session, []*pkcs11.Mechanism{pkcs11.NewMechanism(pkcs11.CKM_AES_GCM, gcmParams)},
// aesObjHandle)
// ct, _ := p.ctx.Encrypt(session, pt)
// iv := gcmParams.IV()
// gcmParams.Free()
//
func NewGCMParams(iv, aad []byte, tagSize int) *GCMParams {
return &GCMParams{
iv: iv,
aad: aad,
tagSize: tagSize,
}
}
func cGCMParams(p *GCMParams) []byte {
params := C.CK_GCM_PARAMS{
ulTagBits: C.CK_ULONG(p.tagSize),
}
var arena arena
if len(p.iv) > 0 {
iv, ivLen := arena.Allocate(p.iv)
params.pIv = C.CK_BYTE_PTR(iv)
params.ulIvLen = ivLen
params.ulIvBits = ivLen * 8
}
if len(p.aad) > 0 {
aad, aadLen := arena.Allocate(p.aad)
params.pAAD = C.CK_BYTE_PTR(aad)
params.ulAADLen = aadLen
}
p.Free()
p.arena = arena
p.params = &params
return C.GoBytes(unsafe.Pointer(&params), C.int(unsafe.Sizeof(params)))
}
// IV returns a copy of the actual IV used for the operation.
//
// Some HSMs may ignore the user-specified IV and write their own at the end of
// the encryption operation; this method allows you to retrieve it.
func (p *GCMParams) IV() []byte {
if p == nil || p.params == nil {
return nil
}
newIv := C.GoBytes(unsafe.Pointer(p.params.pIv), C.int(p.params.ulIvLen))
iv := make([]byte, len(newIv))
copy(iv, newIv)
return iv
}
// Free deallocates the memory reserved for the HSM to write back the actual IV.
//
// This must be called after the entire operation is complete, i.e. after
// Encrypt or EncryptFinal. It is safe to call Free multiple times.
func (p *GCMParams) Free() {
if p == nil || p.arena == nil {
return
}
p.arena.Free()
p.params = nil
p.arena = nil
}
// NewPSSParams creates a CK_RSA_PKCS_PSS_PARAMS structure and returns it as a byte array for use with the CKM_RSA_PKCS_PSS mechanism.
func NewPSSParams(hashAlg, mgf, saltLength uint) []byte {
p := C.CK_RSA_PKCS_PSS_PARAMS{
hashAlg: C.CK_MECHANISM_TYPE(hashAlg),
mgf: C.CK_RSA_PKCS_MGF_TYPE(mgf),
sLen: C.CK_ULONG(saltLength),
}
return C.GoBytes(unsafe.Pointer(&p), C.int(unsafe.Sizeof(p)))
}
// OAEPParams can be passed to NewMechanism to implement CKM_RSA_PKCS_OAEP.
type OAEPParams struct {
HashAlg uint
MGF uint
SourceType uint
SourceData []byte
}
// NewOAEPParams creates a CK_RSA_PKCS_OAEP_PARAMS structure suitable for use with the CKM_RSA_PKCS_OAEP mechanism.
func NewOAEPParams(hashAlg, mgf, sourceType uint, sourceData []byte) *OAEPParams {
return &OAEPParams{
HashAlg: hashAlg,
MGF: mgf,
SourceType: sourceType,
SourceData: sourceData,
}
}
func cOAEPParams(p *OAEPParams, arena arena) ([]byte, arena) {
params := C.CK_RSA_PKCS_OAEP_PARAMS{
hashAlg: C.CK_MECHANISM_TYPE(p.HashAlg),
mgf: C.CK_RSA_PKCS_MGF_TYPE(p.MGF),
source: C.CK_RSA_PKCS_OAEP_SOURCE_TYPE(p.SourceType),
}
if len(p.SourceData) != 0 {
buf, len := arena.Allocate(p.SourceData)
// field is unaligned on windows so this has to call into C
C.putOAEPParams(&params, buf, len)
}
return C.GoBytes(unsafe.Pointer(&params), C.int(unsafe.Sizeof(params))), arena
}
// ECDH1DeriveParams can be passed to NewMechanism to implement CK_ECDH1_DERIVE_PARAMS.
type ECDH1DeriveParams struct {
KDF uint
SharedData []byte
PublicKeyData []byte
}
// NewECDH1DeriveParams creates a CK_ECDH1_DERIVE_PARAMS structure suitable for use with the CKM_ECDH1_DERIVE mechanism.
func NewECDH1DeriveParams(kdf uint, sharedData []byte, publicKeyData []byte) *ECDH1DeriveParams {
return &ECDH1DeriveParams{
KDF: kdf,
SharedData: sharedData,
PublicKeyData: publicKeyData,
}
}
func cECDH1DeriveParams(p *ECDH1DeriveParams, arena arena) ([]byte, arena) {
params := C.CK_ECDH1_DERIVE_PARAMS{
kdf: C.CK_EC_KDF_TYPE(p.KDF),
}
// SharedData MUST be null if key derivation function (KDF) is CKD_NULL
if len(p.SharedData) != 0 {
sharedData, sharedDataLen := arena.Allocate(p.SharedData)
C.putECDH1SharedParams(&params, sharedData, sharedDataLen)
}
publicKeyData, publicKeyDataLen := arena.Allocate(p.PublicKeyData)
C.putECDH1PublicParams(&params, publicKeyData, publicKeyDataLen)
return C.GoBytes(unsafe.Pointer(&params), C.int(unsafe.Sizeof(params))), arena
}

File diff suppressed because it is too large Load Diff

View File

@ -1,265 +0,0 @@
/* Copyright (c) OASIS Open 2016. All Rights Reserved./
* /Distributed under the terms of the OASIS IPR Policy,
* [http://www.oasis-open.org/policies-guidelines/ipr], AS-IS, WITHOUT ANY
* IMPLIED OR EXPRESS WARRANTY; there is no warranty of MERCHANTABILITY, FITNESS FOR A
* PARTICULAR PURPOSE or NONINFRINGEMENT of the rights of others.
*/
/* Latest version of the specification:
* http://docs.oasis-open.org/pkcs11/pkcs11-base/v2.40/pkcs11-base-v2.40.html
*/
#ifndef _PKCS11_H_
#define _PKCS11_H_ 1
#ifdef __cplusplus
extern "C" {
#endif
/* Before including this file (pkcs11.h) (or pkcs11t.h by
* itself), 5 platform-specific macros must be defined. These
* macros are described below, and typical definitions for them
* are also given. Be advised that these definitions can depend
* on both the platform and the compiler used (and possibly also
* on whether a Cryptoki library is linked statically or
* dynamically).
*
* In addition to defining these 5 macros, the packing convention
* for Cryptoki structures should be set. The Cryptoki
* convention on packing is that structures should be 1-byte
* aligned.
*
* If you're using Microsoft Developer Studio 5.0 to produce
* Win32 stuff, this might be done by using the following
* preprocessor directive before including pkcs11.h or pkcs11t.h:
*
* #pragma pack(push, cryptoki, 1)
*
* and using the following preprocessor directive after including
* pkcs11.h or pkcs11t.h:
*
* #pragma pack(pop, cryptoki)
*
* If you're using an earlier version of Microsoft Developer
* Studio to produce Win16 stuff, this might be done by using
* the following preprocessor directive before including
* pkcs11.h or pkcs11t.h:
*
* #pragma pack(1)
*
* In a UNIX environment, you're on your own for this. You might
* not need to do (or be able to do!) anything.
*
*
* Now for the macros:
*
*
* 1. CK_PTR: The indirection string for making a pointer to an
* object. It can be used like this:
*
* typedef CK_BYTE CK_PTR CK_BYTE_PTR;
*
* If you're using Microsoft Developer Studio 5.0 to produce
* Win32 stuff, it might be defined by:
*
* #define CK_PTR *
*
* If you're using an earlier version of Microsoft Developer
* Studio to produce Win16 stuff, it might be defined by:
*
* #define CK_PTR far *
*
* In a typical UNIX environment, it might be defined by:
*
* #define CK_PTR *
*
*
* 2. CK_DECLARE_FUNCTION(returnType, name): A macro which makes
* an importable Cryptoki library function declaration out of a
* return type and a function name. It should be used in the
* following fashion:
*
* extern CK_DECLARE_FUNCTION(CK_RV, C_Initialize)(
* CK_VOID_PTR pReserved
* );
*
* If you're using Microsoft Developer Studio 5.0 to declare a
* function in a Win32 Cryptoki .dll, it might be defined by:
*
* #define CK_DECLARE_FUNCTION(returnType, name) \
* returnType __declspec(dllimport) name
*
* If you're using an earlier version of Microsoft Developer
* Studio to declare a function in a Win16 Cryptoki .dll, it
* might be defined by:
*
* #define CK_DECLARE_FUNCTION(returnType, name) \
* returnType __export _far _pascal name
*
* In a UNIX environment, it might be defined by:
*
* #define CK_DECLARE_FUNCTION(returnType, name) \
* returnType name
*
*
* 3. CK_DECLARE_FUNCTION_POINTER(returnType, name): A macro
* which makes a Cryptoki API function pointer declaration or
* function pointer type declaration out of a return type and a
* function name. It should be used in the following fashion:
*
* // Define funcPtr to be a pointer to a Cryptoki API function
* // taking arguments args and returning CK_RV.
* CK_DECLARE_FUNCTION_POINTER(CK_RV, funcPtr)(args);
*
* or
*
* // Define funcPtrType to be the type of a pointer to a
* // Cryptoki API function taking arguments args and returning
* // CK_RV, and then define funcPtr to be a variable of type
* // funcPtrType.
* typedef CK_DECLARE_FUNCTION_POINTER(CK_RV, funcPtrType)(args);
* funcPtrType funcPtr;
*
* If you're using Microsoft Developer Studio 5.0 to access
* functions in a Win32 Cryptoki .dll, in might be defined by:
*
* #define CK_DECLARE_FUNCTION_POINTER(returnType, name) \
* returnType __declspec(dllimport) (* name)
*
* If you're using an earlier version of Microsoft Developer
* Studio to access functions in a Win16 Cryptoki .dll, it might
* be defined by:
*
* #define CK_DECLARE_FUNCTION_POINTER(returnType, name) \
* returnType __export _far _pascal (* name)
*
* In a UNIX environment, it might be defined by:
*
* #define CK_DECLARE_FUNCTION_POINTER(returnType, name) \
* returnType (* name)
*
*
* 4. CK_CALLBACK_FUNCTION(returnType, name): A macro which makes
* a function pointer type for an application callback out of
* a return type for the callback and a name for the callback.
* It should be used in the following fashion:
*
* CK_CALLBACK_FUNCTION(CK_RV, myCallback)(args);
*
* to declare a function pointer, myCallback, to a callback
* which takes arguments args and returns a CK_RV. It can also
* be used like this:
*
* typedef CK_CALLBACK_FUNCTION(CK_RV, myCallbackType)(args);
* myCallbackType myCallback;
*
* If you're using Microsoft Developer Studio 5.0 to do Win32
* Cryptoki development, it might be defined by:
*
* #define CK_CALLBACK_FUNCTION(returnType, name) \
* returnType (* name)
*
* If you're using an earlier version of Microsoft Developer
* Studio to do Win16 development, it might be defined by:
*
* #define CK_CALLBACK_FUNCTION(returnType, name) \
* returnType _far _pascal (* name)
*
* In a UNIX environment, it might be defined by:
*
* #define CK_CALLBACK_FUNCTION(returnType, name) \
* returnType (* name)
*
*
* 5. NULL_PTR: This macro is the value of a NULL pointer.
*
* In any ANSI/ISO C environment (and in many others as well),
* this should best be defined by
*
* #ifndef NULL_PTR
* #define NULL_PTR 0
* #endif
*/
/* All the various Cryptoki types and #define'd values are in the
* file pkcs11t.h.
*/
#include "pkcs11t.h"
#define __PASTE(x,y) x##y
/* ==============================================================
* Define the "extern" form of all the entry points.
* ==============================================================
*/
#define CK_NEED_ARG_LIST 1
#define CK_PKCS11_FUNCTION_INFO(name) \
extern CK_DECLARE_FUNCTION(CK_RV, name)
/* pkcs11f.h has all the information about the Cryptoki
* function prototypes.
*/
#include "pkcs11f.h"
#undef CK_NEED_ARG_LIST
#undef CK_PKCS11_FUNCTION_INFO
/* ==============================================================
* Define the typedef form of all the entry points. That is, for
* each Cryptoki function C_XXX, define a type CK_C_XXX which is
* a pointer to that kind of function.
* ==============================================================
*/
#define CK_NEED_ARG_LIST 1
#define CK_PKCS11_FUNCTION_INFO(name) \
typedef CK_DECLARE_FUNCTION_POINTER(CK_RV, __PASTE(CK_,name))
/* pkcs11f.h has all the information about the Cryptoki
* function prototypes.
*/
#include "pkcs11f.h"
#undef CK_NEED_ARG_LIST
#undef CK_PKCS11_FUNCTION_INFO
/* ==============================================================
* Define structed vector of entry points. A CK_FUNCTION_LIST
* contains a CK_VERSION indicating a library's Cryptoki version
* and then a whole slew of function pointers to the routines in
* the library. This type was declared, but not defined, in
* pkcs11t.h.
* ==============================================================
*/
#define CK_PKCS11_FUNCTION_INFO(name) \
__PASTE(CK_,name) name;
struct CK_FUNCTION_LIST {
CK_VERSION version; /* Cryptoki version */
/* Pile all the function pointers into the CK_FUNCTION_LIST. */
/* pkcs11f.h has all the information about the Cryptoki
* function prototypes.
*/
#include "pkcs11f.h"
};
#undef CK_PKCS11_FUNCTION_INFO
#undef __PASTE
#ifdef __cplusplus
}
#endif
#endif /* _PKCS11_H_ */

View File

@ -1,939 +0,0 @@
/* Copyright (c) OASIS Open 2016. All Rights Reserved./
* /Distributed under the terms of the OASIS IPR Policy,
* [http://www.oasis-open.org/policies-guidelines/ipr], AS-IS, WITHOUT ANY
* IMPLIED OR EXPRESS WARRANTY; there is no warranty of MERCHANTABILITY, FITNESS FOR A
* PARTICULAR PURPOSE or NONINFRINGEMENT of the rights of others.
*/
/* Latest version of the specification:
* http://docs.oasis-open.org/pkcs11/pkcs11-base/v2.40/pkcs11-base-v2.40.html
*/
/* This header file contains pretty much everything about all the
* Cryptoki function prototypes. Because this information is
* used for more than just declaring function prototypes, the
* order of the functions appearing herein is important, and
* should not be altered.
*/
/* General-purpose */
/* C_Initialize initializes the Cryptoki library. */
CK_PKCS11_FUNCTION_INFO(C_Initialize)
#ifdef CK_NEED_ARG_LIST
(
CK_VOID_PTR pInitArgs /* if this is not NULL_PTR, it gets
* cast to CK_C_INITIALIZE_ARGS_PTR
* and dereferenced
*/
);
#endif
/* C_Finalize indicates that an application is done with the
* Cryptoki library.
*/
CK_PKCS11_FUNCTION_INFO(C_Finalize)
#ifdef CK_NEED_ARG_LIST
(
CK_VOID_PTR pReserved /* reserved. Should be NULL_PTR */
);
#endif
/* C_GetInfo returns general information about Cryptoki. */
CK_PKCS11_FUNCTION_INFO(C_GetInfo)
#ifdef CK_NEED_ARG_LIST
(
CK_INFO_PTR pInfo /* location that receives information */
);
#endif
/* C_GetFunctionList returns the function list. */
CK_PKCS11_FUNCTION_INFO(C_GetFunctionList)
#ifdef CK_NEED_ARG_LIST
(
CK_FUNCTION_LIST_PTR_PTR ppFunctionList /* receives pointer to
* function list
*/
);
#endif
/* Slot and token management */
/* C_GetSlotList obtains a list of slots in the system. */
CK_PKCS11_FUNCTION_INFO(C_GetSlotList)
#ifdef CK_NEED_ARG_LIST
(
CK_BBOOL tokenPresent, /* only slots with tokens */
CK_SLOT_ID_PTR pSlotList, /* receives array of slot IDs */
CK_ULONG_PTR pulCount /* receives number of slots */
);
#endif
/* C_GetSlotInfo obtains information about a particular slot in
* the system.
*/
CK_PKCS11_FUNCTION_INFO(C_GetSlotInfo)
#ifdef CK_NEED_ARG_LIST
(
CK_SLOT_ID slotID, /* the ID of the slot */
CK_SLOT_INFO_PTR pInfo /* receives the slot information */
);
#endif
/* C_GetTokenInfo obtains information about a particular token
* in the system.
*/
CK_PKCS11_FUNCTION_INFO(C_GetTokenInfo)
#ifdef CK_NEED_ARG_LIST
(
CK_SLOT_ID slotID, /* ID of the token's slot */
CK_TOKEN_INFO_PTR pInfo /* receives the token information */
);
#endif
/* C_GetMechanismList obtains a list of mechanism types
* supported by a token.
*/
CK_PKCS11_FUNCTION_INFO(C_GetMechanismList)
#ifdef CK_NEED_ARG_LIST
(
CK_SLOT_ID slotID, /* ID of token's slot */
CK_MECHANISM_TYPE_PTR pMechanismList, /* gets mech. array */
CK_ULONG_PTR pulCount /* gets # of mechs. */
);
#endif
/* C_GetMechanismInfo obtains information about a particular
* mechanism possibly supported by a token.
*/
CK_PKCS11_FUNCTION_INFO(C_GetMechanismInfo)
#ifdef CK_NEED_ARG_LIST
(
CK_SLOT_ID slotID, /* ID of the token's slot */
CK_MECHANISM_TYPE type, /* type of mechanism */
CK_MECHANISM_INFO_PTR pInfo /* receives mechanism info */
);
#endif
/* C_InitToken initializes a token. */
CK_PKCS11_FUNCTION_INFO(C_InitToken)
#ifdef CK_NEED_ARG_LIST
(
CK_SLOT_ID slotID, /* ID of the token's slot */
CK_UTF8CHAR_PTR pPin, /* the SO's initial PIN */
CK_ULONG ulPinLen, /* length in bytes of the PIN */
CK_UTF8CHAR_PTR pLabel /* 32-byte token label (blank padded) */
);
#endif
/* C_InitPIN initializes the normal user's PIN. */
CK_PKCS11_FUNCTION_INFO(C_InitPIN)
#ifdef CK_NEED_ARG_LIST
(
CK_SESSION_HANDLE hSession, /* the session's handle */
CK_UTF8CHAR_PTR pPin, /* the normal user's PIN */
CK_ULONG ulPinLen /* length in bytes of the PIN */
);
#endif
/* C_SetPIN modifies the PIN of the user who is logged in. */
CK_PKCS11_FUNCTION_INFO(C_SetPIN)
#ifdef CK_NEED_ARG_LIST
(
CK_SESSION_HANDLE hSession, /* the session's handle */
CK_UTF8CHAR_PTR pOldPin, /* the old PIN */
CK_ULONG ulOldLen, /* length of the old PIN */
CK_UTF8CHAR_PTR pNewPin, /* the new PIN */
CK_ULONG ulNewLen /* length of the new PIN */
);
#endif
/* Session management */
/* C_OpenSession opens a session between an application and a
* token.
*/
CK_PKCS11_FUNCTION_INFO(C_OpenSession)
#ifdef CK_NEED_ARG_LIST
(
CK_SLOT_ID slotID, /* the slot's ID */
CK_FLAGS flags, /* from CK_SESSION_INFO */
CK_VOID_PTR pApplication, /* passed to callback */
CK_NOTIFY Notify, /* callback function */
CK_SESSION_HANDLE_PTR phSession /* gets session handle */
);
#endif
/* C_CloseSession closes a session between an application and a
* token.
*/
CK_PKCS11_FUNCTION_INFO(C_CloseSession)
#ifdef CK_NEED_ARG_LIST
(
CK_SESSION_HANDLE hSession /* the session's handle */
);
#endif
/* C_CloseAllSessions closes all sessions with a token. */
CK_PKCS11_FUNCTION_INFO(C_CloseAllSessions)
#ifdef CK_NEED_ARG_LIST
(
CK_SLOT_ID slotID /* the token's slot */
);
#endif
/* C_GetSessionInfo obtains information about the session. */
CK_PKCS11_FUNCTION_INFO(C_GetSessionInfo)
#ifdef CK_NEED_ARG_LIST
(
CK_SESSION_HANDLE hSession, /* the session's handle */
CK_SESSION_INFO_PTR pInfo /* receives session info */
);
#endif
/* C_GetOperationState obtains the state of the cryptographic operation
* in a session.
*/
CK_PKCS11_FUNCTION_INFO(C_GetOperationState)
#ifdef CK_NEED_ARG_LIST
(
CK_SESSION_HANDLE hSession, /* session's handle */
CK_BYTE_PTR pOperationState, /* gets state */
CK_ULONG_PTR pulOperationStateLen /* gets state length */
);
#endif
/* C_SetOperationState restores the state of the cryptographic
* operation in a session.
*/
CK_PKCS11_FUNCTION_INFO(C_SetOperationState)
#ifdef CK_NEED_ARG_LIST
(
CK_SESSION_HANDLE hSession, /* session's handle */
CK_BYTE_PTR pOperationState, /* holds state */
CK_ULONG ulOperationStateLen, /* holds state length */
CK_OBJECT_HANDLE hEncryptionKey, /* en/decryption key */
CK_OBJECT_HANDLE hAuthenticationKey /* sign/verify key */
);
#endif
/* C_Login logs a user into a token. */
CK_PKCS11_FUNCTION_INFO(C_Login)
#ifdef CK_NEED_ARG_LIST
(
CK_SESSION_HANDLE hSession, /* the session's handle */
CK_USER_TYPE userType, /* the user type */
CK_UTF8CHAR_PTR pPin, /* the user's PIN */
CK_ULONG ulPinLen /* the length of the PIN */
);
#endif
/* C_Logout logs a user out from a token. */
CK_PKCS11_FUNCTION_INFO(C_Logout)
#ifdef CK_NEED_ARG_LIST
(
CK_SESSION_HANDLE hSession /* the session's handle */
);
#endif
/* Object management */
/* C_CreateObject creates a new object. */
CK_PKCS11_FUNCTION_INFO(C_CreateObject)
#ifdef CK_NEED_ARG_LIST
(
CK_SESSION_HANDLE hSession, /* the session's handle */
CK_ATTRIBUTE_PTR pTemplate, /* the object's template */
CK_ULONG ulCount, /* attributes in template */
CK_OBJECT_HANDLE_PTR phObject /* gets new object's handle. */
);
#endif
/* C_CopyObject copies an object, creating a new object for the
* copy.
*/
CK_PKCS11_FUNCTION_INFO(C_CopyObject)
#ifdef CK_NEED_ARG_LIST
(
CK_SESSION_HANDLE hSession, /* the session's handle */
CK_OBJECT_HANDLE hObject, /* the object's handle */
CK_ATTRIBUTE_PTR pTemplate, /* template for new object */
CK_ULONG ulCount, /* attributes in template */
CK_OBJECT_HANDLE_PTR phNewObject /* receives handle of copy */
);
#endif
/* C_DestroyObject destroys an object. */
CK_PKCS11_FUNCTION_INFO(C_DestroyObject)
#ifdef CK_NEED_ARG_LIST
(
CK_SESSION_HANDLE hSession, /* the session's handle */
CK_OBJECT_HANDLE hObject /* the object's handle */
);
#endif
/* C_GetObjectSize gets the size of an object in bytes. */
CK_PKCS11_FUNCTION_INFO(C_GetObjectSize)
#ifdef CK_NEED_ARG_LIST
(
CK_SESSION_HANDLE hSession, /* the session's handle */
CK_OBJECT_HANDLE hObject, /* the object's handle */
CK_ULONG_PTR pulSize /* receives size of object */
);
#endif
/* C_GetAttributeValue obtains the value of one or more object
* attributes.
*/
CK_PKCS11_FUNCTION_INFO(C_GetAttributeValue)
#ifdef CK_NEED_ARG_LIST
(
CK_SESSION_HANDLE hSession, /* the session's handle */
CK_OBJECT_HANDLE hObject, /* the object's handle */
CK_ATTRIBUTE_PTR pTemplate, /* specifies attrs; gets vals */
CK_ULONG ulCount /* attributes in template */
);
#endif
/* C_SetAttributeValue modifies the value of one or more object
* attributes.
*/
CK_PKCS11_FUNCTION_INFO(C_SetAttributeValue)
#ifdef CK_NEED_ARG_LIST
(
CK_SESSION_HANDLE hSession, /* the session's handle */
CK_OBJECT_HANDLE hObject, /* the object's handle */
CK_ATTRIBUTE_PTR pTemplate, /* specifies attrs and values */
CK_ULONG ulCount /* attributes in template */
);
#endif
/* C_FindObjectsInit initializes a search for token and session
* objects that match a template.
*/
CK_PKCS11_FUNCTION_INFO(C_FindObjectsInit)
#ifdef CK_NEED_ARG_LIST
(
CK_SESSION_HANDLE hSession, /* the session's handle */
CK_ATTRIBUTE_PTR pTemplate, /* attribute values to match */
CK_ULONG ulCount /* attrs in search template */
);
#endif
/* C_FindObjects continues a search for token and session
* objects that match a template, obtaining additional object
* handles.
*/
CK_PKCS11_FUNCTION_INFO(C_FindObjects)
#ifdef CK_NEED_ARG_LIST
(
CK_SESSION_HANDLE hSession, /* session's handle */
CK_OBJECT_HANDLE_PTR phObject, /* gets obj. handles */
CK_ULONG ulMaxObjectCount, /* max handles to get */
CK_ULONG_PTR pulObjectCount /* actual # returned */
);
#endif
/* C_FindObjectsFinal finishes a search for token and session
* objects.
*/
CK_PKCS11_FUNCTION_INFO(C_FindObjectsFinal)
#ifdef CK_NEED_ARG_LIST
(
CK_SESSION_HANDLE hSession /* the session's handle */
);
#endif
/* Encryption and decryption */
/* C_EncryptInit initializes an encryption operation. */
CK_PKCS11_FUNCTION_INFO(C_EncryptInit)
#ifdef CK_NEED_ARG_LIST
(
CK_SESSION_HANDLE hSession, /* the session's handle */
CK_MECHANISM_PTR pMechanism, /* the encryption mechanism */
CK_OBJECT_HANDLE hKey /* handle of encryption key */
);
#endif
/* C_Encrypt encrypts single-part data. */
CK_PKCS11_FUNCTION_INFO(C_Encrypt)
#ifdef CK_NEED_ARG_LIST
(
CK_SESSION_HANDLE hSession, /* session's handle */
CK_BYTE_PTR pData, /* the plaintext data */
CK_ULONG ulDataLen, /* bytes of plaintext */
CK_BYTE_PTR pEncryptedData, /* gets ciphertext */
CK_ULONG_PTR pulEncryptedDataLen /* gets c-text size */
);
#endif
/* C_EncryptUpdate continues a multiple-part encryption
* operation.
*/
CK_PKCS11_FUNCTION_INFO(C_EncryptUpdate)
#ifdef CK_NEED_ARG_LIST
(
CK_SESSION_HANDLE hSession, /* session's handle */
CK_BYTE_PTR pPart, /* the plaintext data */
CK_ULONG ulPartLen, /* plaintext data len */
CK_BYTE_PTR pEncryptedPart, /* gets ciphertext */
CK_ULONG_PTR pulEncryptedPartLen /* gets c-text size */
);
#endif
/* C_EncryptFinal finishes a multiple-part encryption
* operation.
*/
CK_PKCS11_FUNCTION_INFO(C_EncryptFinal)
#ifdef CK_NEED_ARG_LIST
(
CK_SESSION_HANDLE hSession, /* session handle */
CK_BYTE_PTR pLastEncryptedPart, /* last c-text */
CK_ULONG_PTR pulLastEncryptedPartLen /* gets last size */
);
#endif
/* C_DecryptInit initializes a decryption operation. */
CK_PKCS11_FUNCTION_INFO(C_DecryptInit)
#ifdef CK_NEED_ARG_LIST
(
CK_SESSION_HANDLE hSession, /* the session's handle */
CK_MECHANISM_PTR pMechanism, /* the decryption mechanism */
CK_OBJECT_HANDLE hKey /* handle of decryption key */
);
#endif
/* C_Decrypt decrypts encrypted data in a single part. */
CK_PKCS11_FUNCTION_INFO(C_Decrypt)
#ifdef CK_NEED_ARG_LIST
(
CK_SESSION_HANDLE hSession, /* session's handle */
CK_BYTE_PTR pEncryptedData, /* ciphertext */
CK_ULONG ulEncryptedDataLen, /* ciphertext length */
CK_BYTE_PTR pData, /* gets plaintext */
CK_ULONG_PTR pulDataLen /* gets p-text size */
);
#endif
/* C_DecryptUpdate continues a multiple-part decryption
* operation.
*/
CK_PKCS11_FUNCTION_INFO(C_DecryptUpdate)
#ifdef CK_NEED_ARG_LIST
(
CK_SESSION_HANDLE hSession, /* session's handle */
CK_BYTE_PTR pEncryptedPart, /* encrypted data */
CK_ULONG ulEncryptedPartLen, /* input length */
CK_BYTE_PTR pPart, /* gets plaintext */
CK_ULONG_PTR pulPartLen /* p-text size */
);
#endif
/* C_DecryptFinal finishes a multiple-part decryption
* operation.
*/
CK_PKCS11_FUNCTION_INFO(C_DecryptFinal)
#ifdef CK_NEED_ARG_LIST
(
CK_SESSION_HANDLE hSession, /* the session's handle */
CK_BYTE_PTR pLastPart, /* gets plaintext */
CK_ULONG_PTR pulLastPartLen /* p-text size */
);
#endif
/* Message digesting */
/* C_DigestInit initializes a message-digesting operation. */
CK_PKCS11_FUNCTION_INFO(C_DigestInit)
#ifdef CK_NEED_ARG_LIST
(
CK_SESSION_HANDLE hSession, /* the session's handle */
CK_MECHANISM_PTR pMechanism /* the digesting mechanism */
);
#endif
/* C_Digest digests data in a single part. */
CK_PKCS11_FUNCTION_INFO(C_Digest)
#ifdef CK_NEED_ARG_LIST
(
CK_SESSION_HANDLE hSession, /* the session's handle */
CK_BYTE_PTR pData, /* data to be digested */
CK_ULONG ulDataLen, /* bytes of data to digest */
CK_BYTE_PTR pDigest, /* gets the message digest */
CK_ULONG_PTR pulDigestLen /* gets digest length */
);
#endif
/* C_DigestUpdate continues a multiple-part message-digesting
* operation.
*/
CK_PKCS11_FUNCTION_INFO(C_DigestUpdate)
#ifdef CK_NEED_ARG_LIST
(
CK_SESSION_HANDLE hSession, /* the session's handle */
CK_BYTE_PTR pPart, /* data to be digested */
CK_ULONG ulPartLen /* bytes of data to be digested */
);
#endif
/* C_DigestKey continues a multi-part message-digesting
* operation, by digesting the value of a secret key as part of
* the data already digested.
*/
CK_PKCS11_FUNCTION_INFO(C_DigestKey)
#ifdef CK_NEED_ARG_LIST
(
CK_SESSION_HANDLE hSession, /* the session's handle */
CK_OBJECT_HANDLE hKey /* secret key to digest */
);
#endif
/* C_DigestFinal finishes a multiple-part message-digesting
* operation.
*/
CK_PKCS11_FUNCTION_INFO(C_DigestFinal)
#ifdef CK_NEED_ARG_LIST
(
CK_SESSION_HANDLE hSession, /* the session's handle */
CK_BYTE_PTR pDigest, /* gets the message digest */
CK_ULONG_PTR pulDigestLen /* gets byte count of digest */
);
#endif
/* Signing and MACing */
/* C_SignInit initializes a signature (private key encryption)
* operation, where the signature is (will be) an appendix to
* the data, and plaintext cannot be recovered from the
* signature.
*/
CK_PKCS11_FUNCTION_INFO(C_SignInit)
#ifdef CK_NEED_ARG_LIST
(
CK_SESSION_HANDLE hSession, /* the session's handle */
CK_MECHANISM_PTR pMechanism, /* the signature mechanism */
CK_OBJECT_HANDLE hKey /* handle of signature key */
);
#endif
/* C_Sign signs (encrypts with private key) data in a single
* part, where the signature is (will be) an appendix to the
* data, and plaintext cannot be recovered from the signature.
*/
CK_PKCS11_FUNCTION_INFO(C_Sign)
#ifdef CK_NEED_ARG_LIST
(
CK_SESSION_HANDLE hSession, /* the session's handle */
CK_BYTE_PTR pData, /* the data to sign */
CK_ULONG ulDataLen, /* count of bytes to sign */
CK_BYTE_PTR pSignature, /* gets the signature */
CK_ULONG_PTR pulSignatureLen /* gets signature length */
);
#endif
/* C_SignUpdate continues a multiple-part signature operation,
* where the signature is (will be) an appendix to the data,
* and plaintext cannot be recovered from the signature.
*/
CK_PKCS11_FUNCTION_INFO(C_SignUpdate)
#ifdef CK_NEED_ARG_LIST
(
CK_SESSION_HANDLE hSession, /* the session's handle */
CK_BYTE_PTR pPart, /* the data to sign */
CK_ULONG ulPartLen /* count of bytes to sign */
);
#endif
/* C_SignFinal finishes a multiple-part signature operation,
* returning the signature.
*/
CK_PKCS11_FUNCTION_INFO(C_SignFinal)
#ifdef CK_NEED_ARG_LIST
(
CK_SESSION_HANDLE hSession, /* the session's handle */
CK_BYTE_PTR pSignature, /* gets the signature */
CK_ULONG_PTR pulSignatureLen /* gets signature length */
);
#endif
/* C_SignRecoverInit initializes a signature operation, where
* the data can be recovered from the signature.
*/
CK_PKCS11_FUNCTION_INFO(C_SignRecoverInit)
#ifdef CK_NEED_ARG_LIST
(
CK_SESSION_HANDLE hSession, /* the session's handle */
CK_MECHANISM_PTR pMechanism, /* the signature mechanism */
CK_OBJECT_HANDLE hKey /* handle of the signature key */
);
#endif
/* C_SignRecover signs data in a single operation, where the
* data can be recovered from the signature.
*/
CK_PKCS11_FUNCTION_INFO(C_SignRecover)
#ifdef CK_NEED_ARG_LIST
(
CK_SESSION_HANDLE hSession, /* the session's handle */
CK_BYTE_PTR pData, /* the data to sign */
CK_ULONG ulDataLen, /* count of bytes to sign */
CK_BYTE_PTR pSignature, /* gets the signature */
CK_ULONG_PTR pulSignatureLen /* gets signature length */
);
#endif
/* Verifying signatures and MACs */
/* C_VerifyInit initializes a verification operation, where the
* signature is an appendix to the data, and plaintext cannot
* cannot be recovered from the signature (e.g. DSA).
*/
CK_PKCS11_FUNCTION_INFO(C_VerifyInit)
#ifdef CK_NEED_ARG_LIST
(
CK_SESSION_HANDLE hSession, /* the session's handle */
CK_MECHANISM_PTR pMechanism, /* the verification mechanism */
CK_OBJECT_HANDLE hKey /* verification key */
);
#endif
/* C_Verify verifies a signature in a single-part operation,
* where the signature is an appendix to the data, and plaintext
* cannot be recovered from the signature.
*/
CK_PKCS11_FUNCTION_INFO(C_Verify)
#ifdef CK_NEED_ARG_LIST
(
CK_SESSION_HANDLE hSession, /* the session's handle */
CK_BYTE_PTR pData, /* signed data */
CK_ULONG ulDataLen, /* length of signed data */
CK_BYTE_PTR pSignature, /* signature */
CK_ULONG ulSignatureLen /* signature length*/
);
#endif
/* C_VerifyUpdate continues a multiple-part verification
* operation, where the signature is an appendix to the data,
* and plaintext cannot be recovered from the signature.
*/
CK_PKCS11_FUNCTION_INFO(C_VerifyUpdate)
#ifdef CK_NEED_ARG_LIST
(
CK_SESSION_HANDLE hSession, /* the session's handle */
CK_BYTE_PTR pPart, /* signed data */
CK_ULONG ulPartLen /* length of signed data */
);
#endif
/* C_VerifyFinal finishes a multiple-part verification
* operation, checking the signature.
*/
CK_PKCS11_FUNCTION_INFO(C_VerifyFinal)
#ifdef CK_NEED_ARG_LIST
(
CK_SESSION_HANDLE hSession, /* the session's handle */
CK_BYTE_PTR pSignature, /* signature to verify */
CK_ULONG ulSignatureLen /* signature length */
);
#endif
/* C_VerifyRecoverInit initializes a signature verification
* operation, where the data is recovered from the signature.
*/
CK_PKCS11_FUNCTION_INFO(C_VerifyRecoverInit)
#ifdef CK_NEED_ARG_LIST
(
CK_SESSION_HANDLE hSession, /* the session's handle */
CK_MECHANISM_PTR pMechanism, /* the verification mechanism */
CK_OBJECT_HANDLE hKey /* verification key */
);
#endif
/* C_VerifyRecover verifies a signature in a single-part
* operation, where the data is recovered from the signature.
*/
CK_PKCS11_FUNCTION_INFO(C_VerifyRecover)
#ifdef CK_NEED_ARG_LIST
(
CK_SESSION_HANDLE hSession, /* the session's handle */
CK_BYTE_PTR pSignature, /* signature to verify */
CK_ULONG ulSignatureLen, /* signature length */
CK_BYTE_PTR pData, /* gets signed data */
CK_ULONG_PTR pulDataLen /* gets signed data len */
);
#endif
/* Dual-function cryptographic operations */
/* C_DigestEncryptUpdate continues a multiple-part digesting
* and encryption operation.
*/
CK_PKCS11_FUNCTION_INFO(C_DigestEncryptUpdate)
#ifdef CK_NEED_ARG_LIST
(
CK_SESSION_HANDLE hSession, /* session's handle */
CK_BYTE_PTR pPart, /* the plaintext data */
CK_ULONG ulPartLen, /* plaintext length */
CK_BYTE_PTR pEncryptedPart, /* gets ciphertext */
CK_ULONG_PTR pulEncryptedPartLen /* gets c-text length */
);
#endif
/* C_DecryptDigestUpdate continues a multiple-part decryption and
* digesting operation.
*/
CK_PKCS11_FUNCTION_INFO(C_DecryptDigestUpdate)
#ifdef CK_NEED_ARG_LIST
(
CK_SESSION_HANDLE hSession, /* session's handle */
CK_BYTE_PTR pEncryptedPart, /* ciphertext */
CK_ULONG ulEncryptedPartLen, /* ciphertext length */
CK_BYTE_PTR pPart, /* gets plaintext */
CK_ULONG_PTR pulPartLen /* gets plaintext len */
);
#endif
/* C_SignEncryptUpdate continues a multiple-part signing and
* encryption operation.
*/
CK_PKCS11_FUNCTION_INFO(C_SignEncryptUpdate)
#ifdef CK_NEED_ARG_LIST
(
CK_SESSION_HANDLE hSession, /* session's handle */
CK_BYTE_PTR pPart, /* the plaintext data */
CK_ULONG ulPartLen, /* plaintext length */
CK_BYTE_PTR pEncryptedPart, /* gets ciphertext */
CK_ULONG_PTR pulEncryptedPartLen /* gets c-text length */
);
#endif
/* C_DecryptVerifyUpdate continues a multiple-part decryption and
* verify operation.
*/
CK_PKCS11_FUNCTION_INFO(C_DecryptVerifyUpdate)
#ifdef CK_NEED_ARG_LIST
(
CK_SESSION_HANDLE hSession, /* session's handle */
CK_BYTE_PTR pEncryptedPart, /* ciphertext */
CK_ULONG ulEncryptedPartLen, /* ciphertext length */
CK_BYTE_PTR pPart, /* gets plaintext */
CK_ULONG_PTR pulPartLen /* gets p-text length */
);
#endif
/* Key management */
/* C_GenerateKey generates a secret key, creating a new key
* object.
*/
CK_PKCS11_FUNCTION_INFO(C_GenerateKey)
#ifdef CK_NEED_ARG_LIST
(
CK_SESSION_HANDLE hSession, /* the session's handle */
CK_MECHANISM_PTR pMechanism, /* key generation mech. */
CK_ATTRIBUTE_PTR pTemplate, /* template for new key */
CK_ULONG ulCount, /* # of attrs in template */
CK_OBJECT_HANDLE_PTR phKey /* gets handle of new key */
);
#endif
/* C_GenerateKeyPair generates a public-key/private-key pair,
* creating new key objects.
*/
CK_PKCS11_FUNCTION_INFO(C_GenerateKeyPair)
#ifdef CK_NEED_ARG_LIST
(
CK_SESSION_HANDLE hSession, /* session handle */
CK_MECHANISM_PTR pMechanism, /* key-gen mech. */
CK_ATTRIBUTE_PTR pPublicKeyTemplate, /* template for pub. key */
CK_ULONG ulPublicKeyAttributeCount, /* # pub. attrs. */
CK_ATTRIBUTE_PTR pPrivateKeyTemplate, /* template for priv. key */
CK_ULONG ulPrivateKeyAttributeCount, /* # priv. attrs. */
CK_OBJECT_HANDLE_PTR phPublicKey, /* gets pub. key handle */
CK_OBJECT_HANDLE_PTR phPrivateKey /* gets priv. key handle */
);
#endif
/* C_WrapKey wraps (i.e., encrypts) a key. */
CK_PKCS11_FUNCTION_INFO(C_WrapKey)
#ifdef CK_NEED_ARG_LIST
(
CK_SESSION_HANDLE hSession, /* the session's handle */
CK_MECHANISM_PTR pMechanism, /* the wrapping mechanism */
CK_OBJECT_HANDLE hWrappingKey, /* wrapping key */
CK_OBJECT_HANDLE hKey, /* key to be wrapped */
CK_BYTE_PTR pWrappedKey, /* gets wrapped key */
CK_ULONG_PTR pulWrappedKeyLen /* gets wrapped key size */
);
#endif
/* C_UnwrapKey unwraps (decrypts) a wrapped key, creating a new
* key object.
*/
CK_PKCS11_FUNCTION_INFO(C_UnwrapKey)
#ifdef CK_NEED_ARG_LIST
(
CK_SESSION_HANDLE hSession, /* session's handle */
CK_MECHANISM_PTR pMechanism, /* unwrapping mech. */
CK_OBJECT_HANDLE hUnwrappingKey, /* unwrapping key */
CK_BYTE_PTR pWrappedKey, /* the wrapped key */
CK_ULONG ulWrappedKeyLen, /* wrapped key len */
CK_ATTRIBUTE_PTR pTemplate, /* new key template */
CK_ULONG ulAttributeCount, /* template length */
CK_OBJECT_HANDLE_PTR phKey /* gets new handle */
);
#endif
/* C_DeriveKey derives a key from a base key, creating a new key
* object.
*/
CK_PKCS11_FUNCTION_INFO(C_DeriveKey)
#ifdef CK_NEED_ARG_LIST
(
CK_SESSION_HANDLE hSession, /* session's handle */
CK_MECHANISM_PTR pMechanism, /* key deriv. mech. */
CK_OBJECT_HANDLE hBaseKey, /* base key */
CK_ATTRIBUTE_PTR pTemplate, /* new key template */
CK_ULONG ulAttributeCount, /* template length */
CK_OBJECT_HANDLE_PTR phKey /* gets new handle */
);
#endif
/* Random number generation */
/* C_SeedRandom mixes additional seed material into the token's
* random number generator.
*/
CK_PKCS11_FUNCTION_INFO(C_SeedRandom)
#ifdef CK_NEED_ARG_LIST
(
CK_SESSION_HANDLE hSession, /* the session's handle */
CK_BYTE_PTR pSeed, /* the seed material */
CK_ULONG ulSeedLen /* length of seed material */
);
#endif
/* C_GenerateRandom generates random data. */
CK_PKCS11_FUNCTION_INFO(C_GenerateRandom)
#ifdef CK_NEED_ARG_LIST
(
CK_SESSION_HANDLE hSession, /* the session's handle */
CK_BYTE_PTR RandomData, /* receives the random data */
CK_ULONG ulRandomLen /* # of bytes to generate */
);
#endif
/* Parallel function management */
/* C_GetFunctionStatus is a legacy function; it obtains an
* updated status of a function running in parallel with an
* application.
*/
CK_PKCS11_FUNCTION_INFO(C_GetFunctionStatus)
#ifdef CK_NEED_ARG_LIST
(
CK_SESSION_HANDLE hSession /* the session's handle */
);
#endif
/* C_CancelFunction is a legacy function; it cancels a function
* running in parallel.
*/
CK_PKCS11_FUNCTION_INFO(C_CancelFunction)
#ifdef CK_NEED_ARG_LIST
(
CK_SESSION_HANDLE hSession /* the session's handle */
);
#endif
/* C_WaitForSlotEvent waits for a slot event (token insertion,
* removal, etc.) to occur.
*/
CK_PKCS11_FUNCTION_INFO(C_WaitForSlotEvent)
#ifdef CK_NEED_ARG_LIST
(
CK_FLAGS flags, /* blocking/nonblocking flag */
CK_SLOT_ID_PTR pSlot, /* location that receives the slot ID */
CK_VOID_PTR pRserved /* reserved. Should be NULL_PTR */
);
#endif

View File

@ -1,33 +0,0 @@
//
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//
#define CK_PTR *
#ifndef NULL_PTR
#define NULL_PTR 0
#endif
#define CK_DEFINE_FUNCTION(returnType, name) returnType name
#define CK_DECLARE_FUNCTION(returnType, name) returnType name
#define CK_DECLARE_FUNCTION_POINTER(returnType, name) returnType (* name)
#define CK_CALLBACK_FUNCTION(returnType, name) returnType (* name)
#include <unistd.h>
#ifdef PACKED_STRUCTURES
# pragma pack(push, 1)
# include "pkcs11.h"
# pragma pack(pop)
#else
# include "pkcs11.h"
#endif
// Copy of CK_INFO but with default alignment (not packed). Go hides unaligned
// struct fields so copying to an aligned struct is necessary to read CK_INFO
// from Go on Windows where packing is required.
typedef struct ckInfo {
CK_VERSION cryptokiVersion;
CK_UTF8CHAR manufacturerID[32];
CK_FLAGS flags;
CK_UTF8CHAR libraryDescription[32];
CK_VERSION libraryVersion;
} ckInfo, *ckInfoPtr;

File diff suppressed because it is too large Load Diff

View File

@ -1,18 +0,0 @@
//go:build release
// +build release
package pkcs11
import "fmt"
// Release is current version of the pkcs11 library.
var Release = R{1, 1, 1}
// R holds the version of this library.
type R struct {
Major, Minor, Patch int
}
func (r R) String() string {
return fmt.Sprintf("%d.%d.%d", r.Major, r.Minor, r.Patch)
}

View File

@ -1 +0,0 @@
0:hsm.db

View File

@ -1,4 +0,0 @@
log.level = INFO
objectstore.backend = file
directories.tokendir = test_data
slots.removable = false

View File

@ -1,315 +0,0 @@
// Copyright 2013 Miek Gieben. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package pkcs11
/*
#include <stdlib.h>
#include <string.h>
#include "pkcs11go.h"
CK_ULONG Index(CK_ULONG_PTR array, CK_ULONG i)
{
return array[i];
}
static inline void putAttributePval(CK_ATTRIBUTE_PTR a, CK_VOID_PTR pValue)
{
a->pValue = pValue;
}
static inline void putMechanismParam(CK_MECHANISM_PTR m, CK_VOID_PTR pParameter)
{
m->pParameter = pParameter;
}
*/
import "C"
import (
"fmt"
"time"
"unsafe"
)
type arena []unsafe.Pointer
func (a *arena) Allocate(obj []byte) (C.CK_VOID_PTR, C.CK_ULONG) {
cobj := C.calloc(C.size_t(len(obj)), 1)
*a = append(*a, cobj)
C.memmove(cobj, unsafe.Pointer(&obj[0]), C.size_t(len(obj)))
return C.CK_VOID_PTR(cobj), C.CK_ULONG(len(obj))
}
func (a arena) Free() {
for _, p := range a {
C.free(p)
}
}
// toList converts from a C style array to a []uint.
func toList(clist C.CK_ULONG_PTR, size C.CK_ULONG) []uint {
l := make([]uint, int(size))
for i := 0; i < len(l); i++ {
l[i] = uint(C.Index(clist, C.CK_ULONG(i)))
}
defer C.free(unsafe.Pointer(clist))
return l
}
// cBBool converts a bool to a CK_BBOOL.
func cBBool(x bool) C.CK_BBOOL {
if x {
return C.CK_BBOOL(C.CK_TRUE)
}
return C.CK_BBOOL(C.CK_FALSE)
}
func uintToBytes(x uint64) []byte {
ul := C.CK_ULONG(x)
return C.GoBytes(unsafe.Pointer(&ul), C.int(unsafe.Sizeof(ul)))
}
// Error represents an PKCS#11 error.
type Error uint
func (e Error) Error() string {
return fmt.Sprintf("pkcs11: 0x%X: %s", uint(e), strerror[uint(e)])
}
func toError(e C.CK_RV) error {
if e == C.CKR_OK {
return nil
}
return Error(e)
}
// SessionHandle is a Cryptoki-assigned value that identifies a session.
type SessionHandle uint
// ObjectHandle is a token-specific identifier for an object.
type ObjectHandle uint
// Version represents any version information from the library.
type Version struct {
Major byte
Minor byte
}
func toVersion(version C.CK_VERSION) Version {
return Version{byte(version.major), byte(version.minor)}
}
// SlotEvent holds the SlotID which for which an slot event (token insertion,
// removal, etc.) occurred.
type SlotEvent struct {
SlotID uint
}
// Info provides information about the library and hardware used.
type Info struct {
CryptokiVersion Version
ManufacturerID string
Flags uint
LibraryDescription string
LibraryVersion Version
}
// SlotInfo provides information about a slot.
type SlotInfo struct {
SlotDescription string // 64 bytes.
ManufacturerID string // 32 bytes.
Flags uint
HardwareVersion Version
FirmwareVersion Version
}
// TokenInfo provides information about a token.
type TokenInfo struct {
Label string
ManufacturerID string
Model string
SerialNumber string
Flags uint
MaxSessionCount uint
SessionCount uint
MaxRwSessionCount uint
RwSessionCount uint
MaxPinLen uint
MinPinLen uint
TotalPublicMemory uint
FreePublicMemory uint
TotalPrivateMemory uint
FreePrivateMemory uint
HardwareVersion Version
FirmwareVersion Version
UTCTime string
}
// SessionInfo provides information about a session.
type SessionInfo struct {
SlotID uint
State uint
Flags uint
DeviceError uint
}
// Attribute holds an attribute type/value combination.
type Attribute struct {
Type uint
Value []byte
}
// NewAttribute allocates a Attribute and returns a pointer to it.
// Note that this is merely a convenience function, as values returned
// from the HSM are not converted back to Go values, those are just raw
// byte slices.
func NewAttribute(typ uint, x interface{}) *Attribute {
// This function nicely transforms *to* an attribute, but there is
// no corresponding function that transform back *from* an attribute,
// which in PKCS#11 is just an byte array.
a := new(Attribute)
a.Type = typ
if x == nil {
return a
}
switch v := x.(type) {
case bool:
if v {
a.Value = []byte{1}
} else {
a.Value = []byte{0}
}
case int:
a.Value = uintToBytes(uint64(v))
case int16:
a.Value = uintToBytes(uint64(v))
case int32:
a.Value = uintToBytes(uint64(v))
case int64:
a.Value = uintToBytes(uint64(v))
case uint:
a.Value = uintToBytes(uint64(v))
case uint16:
a.Value = uintToBytes(uint64(v))
case uint32:
a.Value = uintToBytes(uint64(v))
case uint64:
a.Value = uintToBytes(uint64(v))
case string:
a.Value = []byte(v)
case []byte:
a.Value = v
case time.Time: // for CKA_DATE
a.Value = cDate(v)
default:
panic("pkcs11: unhandled attribute type")
}
return a
}
// cAttribute returns the start address and the length of an attribute list.
func cAttributeList(a []*Attribute) (arena, C.CK_ATTRIBUTE_PTR, C.CK_ULONG) {
var arena arena
if len(a) == 0 {
return nil, nil, 0
}
pa := make([]C.CK_ATTRIBUTE, len(a))
for i, attr := range a {
pa[i]._type = C.CK_ATTRIBUTE_TYPE(attr.Type)
if len(attr.Value) != 0 {
buf, len := arena.Allocate(attr.Value)
// field is unaligned on windows so this has to call into C
C.putAttributePval(&pa[i], buf)
pa[i].ulValueLen = len
}
}
return arena, &pa[0], C.CK_ULONG(len(a))
}
func cDate(t time.Time) []byte {
b := make([]byte, 8)
year, month, day := t.Date()
y := fmt.Sprintf("%4d", year)
m := fmt.Sprintf("%02d", month)
d1 := fmt.Sprintf("%02d", day)
b[0], b[1], b[2], b[3] = y[0], y[1], y[2], y[3]
b[4], b[5] = m[0], m[1]
b[6], b[7] = d1[0], d1[1]
return b
}
// Mechanism holds an mechanism type/value combination.
type Mechanism struct {
Mechanism uint
Parameter []byte
generator interface{}
}
// NewMechanism returns a pointer to an initialized Mechanism.
func NewMechanism(mech uint, x interface{}) *Mechanism {
m := new(Mechanism)
m.Mechanism = mech
if x == nil {
return m
}
switch p := x.(type) {
case *GCMParams, *OAEPParams, *ECDH1DeriveParams:
// contains pointers; defer serialization until cMechanism
m.generator = p
case []byte:
m.Parameter = p
default:
panic("parameter must be one of type: []byte, *GCMParams, *OAEPParams, *ECDH1DeriveParams")
}
return m
}
func cMechanism(mechList []*Mechanism) (arena, *C.CK_MECHANISM) {
if len(mechList) != 1 {
panic("expected exactly one mechanism")
}
mech := mechList[0]
cmech := &C.CK_MECHANISM{mechanism: C.CK_MECHANISM_TYPE(mech.Mechanism)}
// params that contain pointers are allocated here
param := mech.Parameter
var arena arena
switch p := mech.generator.(type) {
case *GCMParams:
// uses its own arena because it has to outlive this function call (yuck)
param = cGCMParams(p)
case *OAEPParams:
param, arena = cOAEPParams(p, arena)
case *ECDH1DeriveParams:
param, arena = cECDH1DeriveParams(p, arena)
}
if len(param) != 0 {
buf, len := arena.Allocate(param)
// field is unaligned on windows so this has to call into C
C.putMechanismParam(cmech, buf)
cmech.ulParameterLen = len
}
return arena, cmech
}
// MechanismInfo provides information about a particular mechanism.
type MechanismInfo struct {
MinKeySize uint
MaxKeySize uint
Flags uint
}
// stubData is a persistent nonempty byte array used by cMessage.
var stubData = []byte{0}
// cMessage returns the pointer/length pair corresponding to data.
func cMessage(data []byte) (dataPtr C.CK_BYTE_PTR) {
l := len(data)
if l == 0 {
// &data[0] is forbidden in this case, so use a nontrivial array instead.
data = stubData
}
return C.CK_BYTE_PTR(unsafe.Pointer(&data[0]))
}

View File

@ -1,127 +0,0 @@
package pkcs11
// Vendor specific range for Ncipher network HSM.
const (
NFCK_VENDOR_NCIPHER = 0xde436972
CKA_NCIPHER = NFCK_VENDOR_NCIPHER
CKM_NCIPHER = NFCK_VENDOR_NCIPHER
CKK_NCIPHER = NFCK_VENDOR_NCIPHER
)
// Vendor specific mechanisms for HMAC on Ncipher HSMs where Ncipher does not allow use of generic_secret keys.
const (
CKM_NC_SHA_1_HMAC_KEY_GEN = CKM_NCIPHER + 0x3 /* no params */
CKM_NC_MD5_HMAC_KEY_GEN = CKM_NCIPHER + 0x6 /* no params */
CKM_NC_SHA224_HMAC_KEY_GEN = CKM_NCIPHER + 0x24 /* no params */
CKM_NC_SHA256_HMAC_KEY_GEN = CKM_NCIPHER + 0x25 /* no params */
CKM_NC_SHA384_HMAC_KEY_GEN = CKM_NCIPHER + 0x26 /* no params */
CKM_NC_SHA512_HMAC_KEY_GEN = CKM_NCIPHER + 0x27 /* no params */
)
// Vendor specific range for Mozilla NSS.
const (
NSSCK_VENDOR_NSS = 0x4E534350
CKO_NSS = CKO_VENDOR_DEFINED | NSSCK_VENDOR_NSS
CKK_NSS = CKK_VENDOR_DEFINED | NSSCK_VENDOR_NSS
CKC_NSS = CKC_VENDOR_DEFINED | NSSCK_VENDOR_NSS
CKA_NSS = CKA_VENDOR_DEFINED | NSSCK_VENDOR_NSS
CKA_TRUST = CKA_NSS + 0x2000
CKM_NSS = CKM_VENDOR_DEFINED | NSSCK_VENDOR_NSS
CKR_NSS = CKM_VENDOR_DEFINED | NSSCK_VENDOR_NSS
CKT_VENDOR_DEFINED = 0x80000000
CKT_NSS = CKT_VENDOR_DEFINED | NSSCK_VENDOR_NSS
)
// Vendor specific values for Mozilla NSS.
const (
CKO_NSS_CRL = CKO_NSS + 1
CKO_NSS_SMIME = CKO_NSS + 2
CKO_NSS_TRUST = CKO_NSS + 3
CKO_NSS_BUILTIN_ROOT_LIST = CKO_NSS + 4
CKO_NSS_NEWSLOT = CKO_NSS + 5
CKO_NSS_DELSLOT = CKO_NSS + 6
CKK_NSS_PKCS8 = CKK_NSS + 1
CKK_NSS_JPAKE_ROUND1 = CKK_NSS + 2
CKK_NSS_JPAKE_ROUND2 = CKK_NSS + 3
CKK_NSS_CHACHA20 = CKK_NSS + 4
CKA_NSS_URL = CKA_NSS + 1
CKA_NSS_EMAIL = CKA_NSS + 2
CKA_NSS_SMIME_INFO = CKA_NSS + 3
CKA_NSS_SMIME_TIMESTAMP = CKA_NSS + 4
CKA_NSS_PKCS8_SALT = CKA_NSS + 5
CKA_NSS_PASSWORD_CHECK = CKA_NSS + 6
CKA_NSS_EXPIRES = CKA_NSS + 7
CKA_NSS_KRL = CKA_NSS + 8
CKA_NSS_PQG_COUNTER = CKA_NSS + 20
CKA_NSS_PQG_SEED = CKA_NSS + 21
CKA_NSS_PQG_H = CKA_NSS + 22
CKA_NSS_PQG_SEED_BITS = CKA_NSS + 23
CKA_NSS_MODULE_SPEC = CKA_NSS + 24
CKA_NSS_OVERRIDE_EXTENSIONS = CKA_NSS + 25
CKA_NSS_JPAKE_SIGNERID = CKA_NSS + 26
CKA_NSS_JPAKE_PEERID = CKA_NSS + 27
CKA_NSS_JPAKE_GX1 = CKA_NSS + 28
CKA_NSS_JPAKE_GX2 = CKA_NSS + 29
CKA_NSS_JPAKE_GX3 = CKA_NSS + 30
CKA_NSS_JPAKE_GX4 = CKA_NSS + 31
CKA_NSS_JPAKE_X2 = CKA_NSS + 32
CKA_NSS_JPAKE_X2S = CKA_NSS + 33
CKA_NSS_MOZILLA_CA_POLICY = CKA_NSS + 34
CKA_TRUST_DIGITAL_SIGNATURE = CKA_TRUST + 1
CKA_TRUST_NON_REPUDIATION = CKA_TRUST + 2
CKA_TRUST_KEY_ENCIPHERMENT = CKA_TRUST + 3
CKA_TRUST_DATA_ENCIPHERMENT = CKA_TRUST + 4
CKA_TRUST_KEY_AGREEMENT = CKA_TRUST + 5
CKA_TRUST_KEY_CERT_SIGN = CKA_TRUST + 6
CKA_TRUST_CRL_SIGN = CKA_TRUST + 7
CKA_TRUST_SERVER_AUTH = CKA_TRUST + 8
CKA_TRUST_CLIENT_AUTH = CKA_TRUST + 9
CKA_TRUST_CODE_SIGNING = CKA_TRUST + 10
CKA_TRUST_EMAIL_PROTECTION = CKA_TRUST + 11
CKA_TRUST_IPSEC_END_SYSTEM = CKA_TRUST + 12
CKA_TRUST_IPSEC_TUNNEL = CKA_TRUST + 13
CKA_TRUST_IPSEC_USER = CKA_TRUST + 14
CKA_TRUST_TIME_STAMPING = CKA_TRUST + 15
CKA_TRUST_STEP_UP_APPROVED = CKA_TRUST + 16
CKA_CERT_SHA1_HASH = CKA_TRUST + 100
CKA_CERT_MD5_HASH = CKA_TRUST + 101
CKM_NSS_AES_KEY_WRAP = CKM_NSS + 1
CKM_NSS_AES_KEY_WRAP_PAD = CKM_NSS + 2
CKM_NSS_HKDF_SHA1 = CKM_NSS + 3
CKM_NSS_HKDF_SHA256 = CKM_NSS + 4
CKM_NSS_HKDF_SHA384 = CKM_NSS + 5
CKM_NSS_HKDF_SHA512 = CKM_NSS + 6
CKM_NSS_JPAKE_ROUND1_SHA1 = CKM_NSS + 7
CKM_NSS_JPAKE_ROUND1_SHA256 = CKM_NSS + 8
CKM_NSS_JPAKE_ROUND1_SHA384 = CKM_NSS + 9
CKM_NSS_JPAKE_ROUND1_SHA512 = CKM_NSS + 10
CKM_NSS_JPAKE_ROUND2_SHA1 = CKM_NSS + 11
CKM_NSS_JPAKE_ROUND2_SHA256 = CKM_NSS + 12
CKM_NSS_JPAKE_ROUND2_SHA384 = CKM_NSS + 13
CKM_NSS_JPAKE_ROUND2_SHA512 = CKM_NSS + 14
CKM_NSS_JPAKE_FINAL_SHA1 = CKM_NSS + 15
CKM_NSS_JPAKE_FINAL_SHA256 = CKM_NSS + 16
CKM_NSS_JPAKE_FINAL_SHA384 = CKM_NSS + 17
CKM_NSS_JPAKE_FINAL_SHA512 = CKM_NSS + 18
CKM_NSS_HMAC_CONSTANT_TIME = CKM_NSS + 19
CKM_NSS_SSL3_MAC_CONSTANT_TIME = CKM_NSS + 20
CKM_NSS_TLS_PRF_GENERAL_SHA256 = CKM_NSS + 21
CKM_NSS_TLS_MASTER_KEY_DERIVE_SHA256 = CKM_NSS + 22
CKM_NSS_TLS_KEY_AND_MAC_DERIVE_SHA256 = CKM_NSS + 23
CKM_NSS_TLS_MASTER_KEY_DERIVE_DH_SHA256 = CKM_NSS + 24
CKM_NSS_TLS_EXTENDED_MASTER_KEY_DERIVE = CKM_NSS + 25
CKM_NSS_TLS_EXTENDED_MASTER_KEY_DERIVE_DH = CKM_NSS + 26
CKM_NSS_CHACHA20_KEY_GEN = CKM_NSS + 27
CKM_NSS_CHACHA20_POLY1305 = CKM_NSS + 28
CKM_NSS_PKCS12_PBE_SHA224_HMAC_KEY_GEN = CKM_NSS + 29
CKM_NSS_PKCS12_PBE_SHA256_HMAC_KEY_GEN = CKM_NSS + 30
CKM_NSS_PKCS12_PBE_SHA384_HMAC_KEY_GEN = CKM_NSS + 31
CKM_NSS_PKCS12_PBE_SHA512_HMAC_KEY_GEN = CKM_NSS + 32
CKR_NSS_CERTDB_FAILED = CKR_NSS + 1
CKR_NSS_KEYDB_FAILED = CKR_NSS + 2
CKT_NSS_TRUSTED = CKT_NSS + 1
CKT_NSS_TRUSTED_DELEGATOR = CKT_NSS + 2
CKT_NSS_MUST_VERIFY_TRUST = CKT_NSS + 3
CKT_NSS_NOT_TRUSTED = CKT_NSS + 10
CKT_NSS_TRUST_UNKNOWN = CKT_NSS + 5
)

View File

@ -1,766 +0,0 @@
// Copyright 2013 Miek Gieben. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Code generated by "go run const_generate.go"; DO NOT EDIT.
package pkcs11
const (
CK_TRUE = 1
CK_FALSE = 0
CK_UNAVAILABLE_INFORMATION = ^uint(0)
CK_EFFECTIVELY_INFINITE = 0
CK_INVALID_HANDLE = 0
CKN_SURRENDER = 0
CKN_OTP_CHANGED = 1
CKF_TOKEN_PRESENT = 0x00000001
CKF_REMOVABLE_DEVICE = 0x00000002
CKF_HW_SLOT = 0x00000004
CKF_RNG = 0x00000001
CKF_WRITE_PROTECTED = 0x00000002
CKF_LOGIN_REQUIRED = 0x00000004
CKF_USER_PIN_INITIALIZED = 0x00000008
CKF_RESTORE_KEY_NOT_NEEDED = 0x00000020
CKF_CLOCK_ON_TOKEN = 0x00000040
CKF_PROTECTED_AUTHENTICATION_PATH = 0x00000100
CKF_DUAL_CRYPTO_OPERATIONS = 0x00000200
CKF_TOKEN_INITIALIZED = 0x00000400
CKF_SECONDARY_AUTHENTICATION = 0x00000800
CKF_USER_PIN_COUNT_LOW = 0x00010000
CKF_USER_PIN_FINAL_TRY = 0x00020000
CKF_USER_PIN_LOCKED = 0x00040000
CKF_USER_PIN_TO_BE_CHANGED = 0x00080000
CKF_SO_PIN_COUNT_LOW = 0x00100000
CKF_SO_PIN_FINAL_TRY = 0x00200000
CKF_SO_PIN_LOCKED = 0x00400000
CKF_SO_PIN_TO_BE_CHANGED = 0x00800000
CKF_ERROR_STATE = 0x01000000
CKU_SO = 0
CKU_USER = 1
CKU_CONTEXT_SPECIFIC = 2
CKS_RO_PUBLIC_SESSION = 0
CKS_RO_USER_FUNCTIONS = 1
CKS_RW_PUBLIC_SESSION = 2
CKS_RW_USER_FUNCTIONS = 3
CKS_RW_SO_FUNCTIONS = 4
CKF_RW_SESSION = 0x00000002
CKF_SERIAL_SESSION = 0x00000004
CKO_DATA = 0x00000000
CKO_CERTIFICATE = 0x00000001
CKO_PUBLIC_KEY = 0x00000002
CKO_PRIVATE_KEY = 0x00000003
CKO_SECRET_KEY = 0x00000004
CKO_HW_FEATURE = 0x00000005
CKO_DOMAIN_PARAMETERS = 0x00000006
CKO_MECHANISM = 0x00000007
CKO_OTP_KEY = 0x00000008
CKO_VENDOR_DEFINED = 0x80000000
CKH_MONOTONIC_COUNTER = 0x00000001
CKH_CLOCK = 0x00000002
CKH_USER_INTERFACE = 0x00000003
CKH_VENDOR_DEFINED = 0x80000000
CKK_RSA = 0x00000000
CKK_DSA = 0x00000001
CKK_DH = 0x00000002
CKK_ECDSA = 0x00000003 // Deprecated
CKK_EC = 0x00000003
CKK_X9_42_DH = 0x00000004
CKK_KEA = 0x00000005
CKK_GENERIC_SECRET = 0x00000010
CKK_RC2 = 0x00000011
CKK_RC4 = 0x00000012
CKK_DES = 0x00000013
CKK_DES2 = 0x00000014
CKK_DES3 = 0x00000015
CKK_CAST = 0x00000016
CKK_CAST3 = 0x00000017
CKK_CAST5 = 0x00000018 // Deprecated
CKK_CAST128 = 0x00000018
CKK_RC5 = 0x00000019
CKK_IDEA = 0x0000001A
CKK_SKIPJACK = 0x0000001B
CKK_BATON = 0x0000001C
CKK_JUNIPER = 0x0000001D
CKK_CDMF = 0x0000001E
CKK_AES = 0x0000001F
CKK_BLOWFISH = 0x00000020
CKK_TWOFISH = 0x00000021
CKK_SECURID = 0x00000022
CKK_HOTP = 0x00000023
CKK_ACTI = 0x00000024
CKK_CAMELLIA = 0x00000025
CKK_ARIA = 0x00000026
CKK_MD5_HMAC = 0x00000027
CKK_SHA_1_HMAC = 0x00000028
CKK_RIPEMD128_HMAC = 0x00000029
CKK_RIPEMD160_HMAC = 0x0000002A
CKK_SHA256_HMAC = 0x0000002B
CKK_SHA384_HMAC = 0x0000002C
CKK_SHA512_HMAC = 0x0000002D
CKK_SHA224_HMAC = 0x0000002E
CKK_SEED = 0x0000002F
CKK_GOSTR3410 = 0x00000030
CKK_GOSTR3411 = 0x00000031
CKK_GOST28147 = 0x00000032
CKK_SHA3_224_HMAC = 0x00000033
CKK_SHA3_256_HMAC = 0x00000034
CKK_SHA3_384_HMAC = 0x00000035
CKK_SHA3_512_HMAC = 0x00000036
CKK_VENDOR_DEFINED = 0x80000000
CK_CERTIFICATE_CATEGORY_UNSPECIFIED = 0
CK_CERTIFICATE_CATEGORY_TOKEN_USER = 1
CK_CERTIFICATE_CATEGORY_AUTHORITY = 2
CK_CERTIFICATE_CATEGORY_OTHER_ENTITY = 3
CK_SECURITY_DOMAIN_UNSPECIFIED = 0
CK_SECURITY_DOMAIN_MANUFACTURER = 1
CK_SECURITY_DOMAIN_OPERATOR = 2
CK_SECURITY_DOMAIN_THIRD_PARTY = 3
CKC_X_509 = 0x00000000
CKC_X_509_ATTR_CERT = 0x00000001
CKC_WTLS = 0x00000002
CKC_VENDOR_DEFINED = 0x80000000
CKF_ARRAY_ATTRIBUTE = 0x40000000
CK_OTP_FORMAT_DECIMAL = 0
CK_OTP_FORMAT_HEXADECIMAL = 1
CK_OTP_FORMAT_ALPHANUMERIC = 2
CK_OTP_FORMAT_BINARY = 3
CK_OTP_PARAM_IGNORED = 0
CK_OTP_PARAM_OPTIONAL = 1
CK_OTP_PARAM_MANDATORY = 2
CKA_CLASS = 0x00000000
CKA_TOKEN = 0x00000001
CKA_PRIVATE = 0x00000002
CKA_LABEL = 0x00000003
CKA_APPLICATION = 0x00000010
CKA_VALUE = 0x00000011
CKA_OBJECT_ID = 0x00000012
CKA_CERTIFICATE_TYPE = 0x00000080
CKA_ISSUER = 0x00000081
CKA_SERIAL_NUMBER = 0x00000082
CKA_AC_ISSUER = 0x00000083
CKA_OWNER = 0x00000084
CKA_ATTR_TYPES = 0x00000085
CKA_TRUSTED = 0x00000086
CKA_CERTIFICATE_CATEGORY = 0x00000087
CKA_JAVA_MIDP_SECURITY_DOMAIN = 0x00000088
CKA_URL = 0x00000089
CKA_HASH_OF_SUBJECT_PUBLIC_KEY = 0x0000008A
CKA_HASH_OF_ISSUER_PUBLIC_KEY = 0x0000008B
CKA_NAME_HASH_ALGORITHM = 0x0000008C
CKA_CHECK_VALUE = 0x00000090
CKA_KEY_TYPE = 0x00000100
CKA_SUBJECT = 0x00000101
CKA_ID = 0x00000102
CKA_SENSITIVE = 0x00000103
CKA_ENCRYPT = 0x00000104
CKA_DECRYPT = 0x00000105
CKA_WRAP = 0x00000106
CKA_UNWRAP = 0x00000107
CKA_SIGN = 0x00000108
CKA_SIGN_RECOVER = 0x00000109
CKA_VERIFY = 0x0000010A
CKA_VERIFY_RECOVER = 0x0000010B
CKA_DERIVE = 0x0000010C
CKA_START_DATE = 0x00000110
CKA_END_DATE = 0x00000111
CKA_MODULUS = 0x00000120
CKA_MODULUS_BITS = 0x00000121
CKA_PUBLIC_EXPONENT = 0x00000122
CKA_PRIVATE_EXPONENT = 0x00000123
CKA_PRIME_1 = 0x00000124
CKA_PRIME_2 = 0x00000125
CKA_EXPONENT_1 = 0x00000126
CKA_EXPONENT_2 = 0x00000127
CKA_COEFFICIENT = 0x00000128
CKA_PUBLIC_KEY_INFO = 0x00000129
CKA_PRIME = 0x00000130
CKA_SUBPRIME = 0x00000131
CKA_BASE = 0x00000132
CKA_PRIME_BITS = 0x00000133
CKA_SUBPRIME_BITS = 0x00000134
CKA_SUB_PRIME_BITS = CKA_SUBPRIME_BITS
CKA_VALUE_BITS = 0x00000160
CKA_VALUE_LEN = 0x00000161
CKA_EXTRACTABLE = 0x00000162
CKA_LOCAL = 0x00000163
CKA_NEVER_EXTRACTABLE = 0x00000164
CKA_ALWAYS_SENSITIVE = 0x00000165
CKA_KEY_GEN_MECHANISM = 0x00000166
CKA_MODIFIABLE = 0x00000170
CKA_COPYABLE = 0x00000171
CKA_DESTROYABLE = 0x00000172
CKA_ECDSA_PARAMS = 0x00000180 // Deprecated
CKA_EC_PARAMS = 0x00000180
CKA_EC_POINT = 0x00000181
CKA_SECONDARY_AUTH = 0x00000200 // Deprecated
CKA_AUTH_PIN_FLAGS = 0x00000201 // Deprecated
CKA_ALWAYS_AUTHENTICATE = 0x00000202
CKA_WRAP_WITH_TRUSTED = 0x00000210
CKA_WRAP_TEMPLATE = (CKF_ARRAY_ATTRIBUTE | 0x00000211)
CKA_UNWRAP_TEMPLATE = (CKF_ARRAY_ATTRIBUTE | 0x00000212)
CKA_DERIVE_TEMPLATE = (CKF_ARRAY_ATTRIBUTE | 0x00000213)
CKA_OTP_FORMAT = 0x00000220
CKA_OTP_LENGTH = 0x00000221
CKA_OTP_TIME_INTERVAL = 0x00000222
CKA_OTP_USER_FRIENDLY_MODE = 0x00000223
CKA_OTP_CHALLENGE_REQUIREMENT = 0x00000224
CKA_OTP_TIME_REQUIREMENT = 0x00000225
CKA_OTP_COUNTER_REQUIREMENT = 0x00000226
CKA_OTP_PIN_REQUIREMENT = 0x00000227
CKA_OTP_COUNTER = 0x0000022E
CKA_OTP_TIME = 0x0000022F
CKA_OTP_USER_IDENTIFIER = 0x0000022A
CKA_OTP_SERVICE_IDENTIFIER = 0x0000022B
CKA_OTP_SERVICE_LOGO = 0x0000022C
CKA_OTP_SERVICE_LOGO_TYPE = 0x0000022D
CKA_GOSTR3410_PARAMS = 0x00000250
CKA_GOSTR3411_PARAMS = 0x00000251
CKA_GOST28147_PARAMS = 0x00000252
CKA_HW_FEATURE_TYPE = 0x00000300
CKA_RESET_ON_INIT = 0x00000301
CKA_HAS_RESET = 0x00000302
CKA_PIXEL_X = 0x00000400
CKA_PIXEL_Y = 0x00000401
CKA_RESOLUTION = 0x00000402
CKA_CHAR_ROWS = 0x00000403
CKA_CHAR_COLUMNS = 0x00000404
CKA_COLOR = 0x00000405
CKA_BITS_PER_PIXEL = 0x00000406
CKA_CHAR_SETS = 0x00000480
CKA_ENCODING_METHODS = 0x00000481
CKA_MIME_TYPES = 0x00000482
CKA_MECHANISM_TYPE = 0x00000500
CKA_REQUIRED_CMS_ATTRIBUTES = 0x00000501
CKA_DEFAULT_CMS_ATTRIBUTES = 0x00000502
CKA_SUPPORTED_CMS_ATTRIBUTES = 0x00000503
CKA_ALLOWED_MECHANISMS = (CKF_ARRAY_ATTRIBUTE | 0x00000600)
CKA_VENDOR_DEFINED = 0x80000000
CKM_RSA_PKCS_KEY_PAIR_GEN = 0x00000000
CKM_RSA_PKCS = 0x00000001
CKM_RSA_9796 = 0x00000002
CKM_RSA_X_509 = 0x00000003
CKM_MD2_RSA_PKCS = 0x00000004
CKM_MD5_RSA_PKCS = 0x00000005
CKM_SHA1_RSA_PKCS = 0x00000006
CKM_RIPEMD128_RSA_PKCS = 0x00000007
CKM_RIPEMD160_RSA_PKCS = 0x00000008
CKM_RSA_PKCS_OAEP = 0x00000009
CKM_RSA_X9_31_KEY_PAIR_GEN = 0x0000000A
CKM_RSA_X9_31 = 0x0000000B
CKM_SHA1_RSA_X9_31 = 0x0000000C
CKM_RSA_PKCS_PSS = 0x0000000D
CKM_SHA1_RSA_PKCS_PSS = 0x0000000E
CKM_DSA_KEY_PAIR_GEN = 0x00000010
CKM_DSA = 0x00000011
CKM_DSA_SHA1 = 0x00000012
CKM_DSA_SHA224 = 0x00000013
CKM_DSA_SHA256 = 0x00000014
CKM_DSA_SHA384 = 0x00000015
CKM_DSA_SHA512 = 0x00000016
CKM_DSA_SHA3_224 = 0x00000018
CKM_DSA_SHA3_256 = 0x00000019
CKM_DSA_SHA3_384 = 0x0000001A
CKM_DSA_SHA3_512 = 0x0000001B
CKM_DH_PKCS_KEY_PAIR_GEN = 0x00000020
CKM_DH_PKCS_DERIVE = 0x00000021
CKM_X9_42_DH_KEY_PAIR_GEN = 0x00000030
CKM_X9_42_DH_DERIVE = 0x00000031
CKM_X9_42_DH_HYBRID_DERIVE = 0x00000032
CKM_X9_42_MQV_DERIVE = 0x00000033
CKM_SHA256_RSA_PKCS = 0x00000040
CKM_SHA384_RSA_PKCS = 0x00000041
CKM_SHA512_RSA_PKCS = 0x00000042
CKM_SHA256_RSA_PKCS_PSS = 0x00000043
CKM_SHA384_RSA_PKCS_PSS = 0x00000044
CKM_SHA512_RSA_PKCS_PSS = 0x00000045
CKM_SHA224_RSA_PKCS = 0x00000046
CKM_SHA224_RSA_PKCS_PSS = 0x00000047
CKM_SHA512_224 = 0x00000048
CKM_SHA512_224_HMAC = 0x00000049
CKM_SHA512_224_HMAC_GENERAL = 0x0000004A
CKM_SHA512_224_KEY_DERIVATION = 0x0000004B
CKM_SHA512_256 = 0x0000004C
CKM_SHA512_256_HMAC = 0x0000004D
CKM_SHA512_256_HMAC_GENERAL = 0x0000004E
CKM_SHA512_256_KEY_DERIVATION = 0x0000004F
CKM_SHA512_T = 0x00000050
CKM_SHA512_T_HMAC = 0x00000051
CKM_SHA512_T_HMAC_GENERAL = 0x00000052
CKM_SHA512_T_KEY_DERIVATION = 0x00000053
CKM_SHA3_256_RSA_PKCS = 0x00000060
CKM_SHA3_384_RSA_PKCS = 0x00000061
CKM_SHA3_512_RSA_PKCS = 0x00000062
CKM_SHA3_256_RSA_PKCS_PSS = 0x00000063
CKM_SHA3_384_RSA_PKCS_PSS = 0x00000064
CKM_SHA3_512_RSA_PKCS_PSS = 0x00000065
CKM_SHA3_224_RSA_PKCS = 0x00000066
CKM_SHA3_224_RSA_PKCS_PSS = 0x00000067
CKM_RC2_KEY_GEN = 0x00000100
CKM_RC2_ECB = 0x00000101
CKM_RC2_CBC = 0x00000102
CKM_RC2_MAC = 0x00000103
CKM_RC2_MAC_GENERAL = 0x00000104
CKM_RC2_CBC_PAD = 0x00000105
CKM_RC4_KEY_GEN = 0x00000110
CKM_RC4 = 0x00000111
CKM_DES_KEY_GEN = 0x00000120
CKM_DES_ECB = 0x00000121
CKM_DES_CBC = 0x00000122
CKM_DES_MAC = 0x00000123
CKM_DES_MAC_GENERAL = 0x00000124
CKM_DES_CBC_PAD = 0x00000125
CKM_DES2_KEY_GEN = 0x00000130
CKM_DES3_KEY_GEN = 0x00000131
CKM_DES3_ECB = 0x00000132
CKM_DES3_CBC = 0x00000133
CKM_DES3_MAC = 0x00000134
CKM_DES3_MAC_GENERAL = 0x00000135
CKM_DES3_CBC_PAD = 0x00000136
CKM_DES3_CMAC_GENERAL = 0x00000137
CKM_DES3_CMAC = 0x00000138
CKM_CDMF_KEY_GEN = 0x00000140
CKM_CDMF_ECB = 0x00000141
CKM_CDMF_CBC = 0x00000142
CKM_CDMF_MAC = 0x00000143
CKM_CDMF_MAC_GENERAL = 0x00000144
CKM_CDMF_CBC_PAD = 0x00000145
CKM_DES_OFB64 = 0x00000150
CKM_DES_OFB8 = 0x00000151
CKM_DES_CFB64 = 0x00000152
CKM_DES_CFB8 = 0x00000153
CKM_MD2 = 0x00000200
CKM_MD2_HMAC = 0x00000201
CKM_MD2_HMAC_GENERAL = 0x00000202
CKM_MD5 = 0x00000210
CKM_MD5_HMAC = 0x00000211
CKM_MD5_HMAC_GENERAL = 0x00000212
CKM_SHA_1 = 0x00000220
CKM_SHA_1_HMAC = 0x00000221
CKM_SHA_1_HMAC_GENERAL = 0x00000222
CKM_RIPEMD128 = 0x00000230
CKM_RIPEMD128_HMAC = 0x00000231
CKM_RIPEMD128_HMAC_GENERAL = 0x00000232
CKM_RIPEMD160 = 0x00000240
CKM_RIPEMD160_HMAC = 0x00000241
CKM_RIPEMD160_HMAC_GENERAL = 0x00000242
CKM_SHA256 = 0x00000250
CKM_SHA256_HMAC = 0x00000251
CKM_SHA256_HMAC_GENERAL = 0x00000252
CKM_SHA224 = 0x00000255
CKM_SHA224_HMAC = 0x00000256
CKM_SHA224_HMAC_GENERAL = 0x00000257
CKM_SHA384 = 0x00000260
CKM_SHA384_HMAC = 0x00000261
CKM_SHA384_HMAC_GENERAL = 0x00000262
CKM_SHA512 = 0x00000270
CKM_SHA512_HMAC = 0x00000271
CKM_SHA512_HMAC_GENERAL = 0x00000272
CKM_SECURID_KEY_GEN = 0x00000280
CKM_SECURID = 0x00000282
CKM_HOTP_KEY_GEN = 0x00000290
CKM_HOTP = 0x00000291
CKM_ACTI = 0x000002A0
CKM_ACTI_KEY_GEN = 0x000002A1
CKM_SHA3_256 = 0x000002B0
CKM_SHA3_256_HMAC = 0x000002B1
CKM_SHA3_256_HMAC_GENERAL = 0x000002B2
CKM_SHA3_256_KEY_GEN = 0x000002B3
CKM_SHA3_224 = 0x000002B5
CKM_SHA3_224_HMAC = 0x000002B6
CKM_SHA3_224_HMAC_GENERAL = 0x000002B7
CKM_SHA3_224_KEY_GEN = 0x000002B8
CKM_SHA3_384 = 0x000002C0
CKM_SHA3_384_HMAC = 0x000002C1
CKM_SHA3_384_HMAC_GENERAL = 0x000002C2
CKM_SHA3_384_KEY_GEN = 0x000002C3
CKM_SHA3_512 = 0x000002D0
CKM_SHA3_512_HMAC = 0x000002D1
CKM_SHA3_512_HMAC_GENERAL = 0x000002D2
CKM_SHA3_512_KEY_GEN = 0x000002D3
CKM_CAST_KEY_GEN = 0x00000300
CKM_CAST_ECB = 0x00000301
CKM_CAST_CBC = 0x00000302
CKM_CAST_MAC = 0x00000303
CKM_CAST_MAC_GENERAL = 0x00000304
CKM_CAST_CBC_PAD = 0x00000305
CKM_CAST3_KEY_GEN = 0x00000310
CKM_CAST3_ECB = 0x00000311
CKM_CAST3_CBC = 0x00000312
CKM_CAST3_MAC = 0x00000313
CKM_CAST3_MAC_GENERAL = 0x00000314
CKM_CAST3_CBC_PAD = 0x00000315
CKM_CAST5_KEY_GEN = 0x00000320
CKM_CAST128_KEY_GEN = 0x00000320
CKM_CAST5_ECB = 0x00000321
CKM_CAST128_ECB = 0x00000321
CKM_CAST5_CBC = 0x00000322 // Deprecated
CKM_CAST128_CBC = 0x00000322
CKM_CAST5_MAC = 0x00000323 // Deprecated
CKM_CAST128_MAC = 0x00000323
CKM_CAST5_MAC_GENERAL = 0x00000324 // Deprecated
CKM_CAST128_MAC_GENERAL = 0x00000324
CKM_CAST5_CBC_PAD = 0x00000325 // Deprecated
CKM_CAST128_CBC_PAD = 0x00000325
CKM_RC5_KEY_GEN = 0x00000330
CKM_RC5_ECB = 0x00000331
CKM_RC5_CBC = 0x00000332
CKM_RC5_MAC = 0x00000333
CKM_RC5_MAC_GENERAL = 0x00000334
CKM_RC5_CBC_PAD = 0x00000335
CKM_IDEA_KEY_GEN = 0x00000340
CKM_IDEA_ECB = 0x00000341
CKM_IDEA_CBC = 0x00000342
CKM_IDEA_MAC = 0x00000343
CKM_IDEA_MAC_GENERAL = 0x00000344
CKM_IDEA_CBC_PAD = 0x00000345
CKM_GENERIC_SECRET_KEY_GEN = 0x00000350
CKM_CONCATENATE_BASE_AND_KEY = 0x00000360
CKM_CONCATENATE_BASE_AND_DATA = 0x00000362
CKM_CONCATENATE_DATA_AND_BASE = 0x00000363
CKM_XOR_BASE_AND_DATA = 0x00000364
CKM_EXTRACT_KEY_FROM_KEY = 0x00000365
CKM_SSL3_PRE_MASTER_KEY_GEN = 0x00000370
CKM_SSL3_MASTER_KEY_DERIVE = 0x00000371
CKM_SSL3_KEY_AND_MAC_DERIVE = 0x00000372
CKM_SSL3_MASTER_KEY_DERIVE_DH = 0x00000373
CKM_TLS_PRE_MASTER_KEY_GEN = 0x00000374
CKM_TLS_MASTER_KEY_DERIVE = 0x00000375
CKM_TLS_KEY_AND_MAC_DERIVE = 0x00000376
CKM_TLS_MASTER_KEY_DERIVE_DH = 0x00000377
CKM_TLS_PRF = 0x00000378
CKM_SSL3_MD5_MAC = 0x00000380
CKM_SSL3_SHA1_MAC = 0x00000381
CKM_MD5_KEY_DERIVATION = 0x00000390
CKM_MD2_KEY_DERIVATION = 0x00000391
CKM_SHA1_KEY_DERIVATION = 0x00000392
CKM_SHA256_KEY_DERIVATION = 0x00000393
CKM_SHA384_KEY_DERIVATION = 0x00000394
CKM_SHA512_KEY_DERIVATION = 0x00000395
CKM_SHA224_KEY_DERIVATION = 0x00000396
CKM_SHA3_256_KEY_DERIVE = 0x00000397
CKM_SHA3_224_KEY_DERIVE = 0x00000398
CKM_SHA3_384_KEY_DERIVE = 0x00000399
CKM_SHA3_512_KEY_DERIVE = 0x0000039A
CKM_SHAKE_128_KEY_DERIVE = 0x0000039B
CKM_SHAKE_256_KEY_DERIVE = 0x0000039C
CKM_PBE_MD2_DES_CBC = 0x000003A0
CKM_PBE_MD5_DES_CBC = 0x000003A1
CKM_PBE_MD5_CAST_CBC = 0x000003A2
CKM_PBE_MD5_CAST3_CBC = 0x000003A3
CKM_PBE_MD5_CAST5_CBC = 0x000003A4 // Deprecated
CKM_PBE_MD5_CAST128_CBC = 0x000003A4
CKM_PBE_SHA1_CAST5_CBC = 0x000003A5 // Deprecated
CKM_PBE_SHA1_CAST128_CBC = 0x000003A5
CKM_PBE_SHA1_RC4_128 = 0x000003A6
CKM_PBE_SHA1_RC4_40 = 0x000003A7
CKM_PBE_SHA1_DES3_EDE_CBC = 0x000003A8
CKM_PBE_SHA1_DES2_EDE_CBC = 0x000003A9
CKM_PBE_SHA1_RC2_128_CBC = 0x000003AA
CKM_PBE_SHA1_RC2_40_CBC = 0x000003AB
CKM_PKCS5_PBKD2 = 0x000003B0
CKM_PBA_SHA1_WITH_SHA1_HMAC = 0x000003C0
CKM_WTLS_PRE_MASTER_KEY_GEN = 0x000003D0
CKM_WTLS_MASTER_KEY_DERIVE = 0x000003D1
CKM_WTLS_MASTER_KEY_DERIVE_DH_ECC = 0x000003D2
CKM_WTLS_PRF = 0x000003D3
CKM_WTLS_SERVER_KEY_AND_MAC_DERIVE = 0x000003D4
CKM_WTLS_CLIENT_KEY_AND_MAC_DERIVE = 0x000003D5
CKM_TLS10_MAC_SERVER = 0x000003D6
CKM_TLS10_MAC_CLIENT = 0x000003D7
CKM_TLS12_MAC = 0x000003D8
CKM_TLS12_KDF = 0x000003D9
CKM_TLS12_MASTER_KEY_DERIVE = 0x000003E0
CKM_TLS12_KEY_AND_MAC_DERIVE = 0x000003E1
CKM_TLS12_MASTER_KEY_DERIVE_DH = 0x000003E2
CKM_TLS12_KEY_SAFE_DERIVE = 0x000003E3
CKM_TLS_MAC = 0x000003E4
CKM_TLS_KDF = 0x000003E5
CKM_KEY_WRAP_LYNKS = 0x00000400
CKM_KEY_WRAP_SET_OAEP = 0x00000401
CKM_CMS_SIG = 0x00000500
CKM_KIP_DERIVE = 0x00000510
CKM_KIP_WRAP = 0x00000511
CKM_KIP_MAC = 0x00000512
CKM_CAMELLIA_KEY_GEN = 0x00000550
CKM_CAMELLIA_ECB = 0x00000551
CKM_CAMELLIA_CBC = 0x00000552
CKM_CAMELLIA_MAC = 0x00000553
CKM_CAMELLIA_MAC_GENERAL = 0x00000554
CKM_CAMELLIA_CBC_PAD = 0x00000555
CKM_CAMELLIA_ECB_ENCRYPT_DATA = 0x00000556
CKM_CAMELLIA_CBC_ENCRYPT_DATA = 0x00000557
CKM_CAMELLIA_CTR = 0x00000558
CKM_ARIA_KEY_GEN = 0x00000560
CKM_ARIA_ECB = 0x00000561
CKM_ARIA_CBC = 0x00000562
CKM_ARIA_MAC = 0x00000563
CKM_ARIA_MAC_GENERAL = 0x00000564
CKM_ARIA_CBC_PAD = 0x00000565
CKM_ARIA_ECB_ENCRYPT_DATA = 0x00000566
CKM_ARIA_CBC_ENCRYPT_DATA = 0x00000567
CKM_SEED_KEY_GEN = 0x00000650
CKM_SEED_ECB = 0x00000651
CKM_SEED_CBC = 0x00000652
CKM_SEED_MAC = 0x00000653
CKM_SEED_MAC_GENERAL = 0x00000654
CKM_SEED_CBC_PAD = 0x00000655
CKM_SEED_ECB_ENCRYPT_DATA = 0x00000656
CKM_SEED_CBC_ENCRYPT_DATA = 0x00000657
CKM_SKIPJACK_KEY_GEN = 0x00001000
CKM_SKIPJACK_ECB64 = 0x00001001
CKM_SKIPJACK_CBC64 = 0x00001002
CKM_SKIPJACK_OFB64 = 0x00001003
CKM_SKIPJACK_CFB64 = 0x00001004
CKM_SKIPJACK_CFB32 = 0x00001005
CKM_SKIPJACK_CFB16 = 0x00001006
CKM_SKIPJACK_CFB8 = 0x00001007
CKM_SKIPJACK_WRAP = 0x00001008
CKM_SKIPJACK_PRIVATE_WRAP = 0x00001009
CKM_SKIPJACK_RELAYX = 0x0000100a
CKM_KEA_KEY_PAIR_GEN = 0x00001010
CKM_KEA_KEY_DERIVE = 0x00001011
CKM_KEA_DERIVE = 0x00001012
CKM_FORTEZZA_TIMESTAMP = 0x00001020
CKM_BATON_KEY_GEN = 0x00001030
CKM_BATON_ECB128 = 0x00001031
CKM_BATON_ECB96 = 0x00001032
CKM_BATON_CBC128 = 0x00001033
CKM_BATON_COUNTER = 0x00001034
CKM_BATON_SHUFFLE = 0x00001035
CKM_BATON_WRAP = 0x00001036
CKM_ECDSA_KEY_PAIR_GEN = 0x00001040 // Deprecated
CKM_EC_KEY_PAIR_GEN = 0x00001040
CKM_ECDSA = 0x00001041
CKM_ECDSA_SHA1 = 0x00001042
CKM_ECDSA_SHA224 = 0x00001043
CKM_ECDSA_SHA256 = 0x00001044
CKM_ECDSA_SHA384 = 0x00001045
CKM_ECDSA_SHA512 = 0x00001046
CKM_ECDH1_DERIVE = 0x00001050
CKM_ECDH1_COFACTOR_DERIVE = 0x00001051
CKM_ECMQV_DERIVE = 0x00001052
CKM_ECDH_AES_KEY_WRAP = 0x00001053
CKM_RSA_AES_KEY_WRAP = 0x00001054
CKM_JUNIPER_KEY_GEN = 0x00001060
CKM_JUNIPER_ECB128 = 0x00001061
CKM_JUNIPER_CBC128 = 0x00001062
CKM_JUNIPER_COUNTER = 0x00001063
CKM_JUNIPER_SHUFFLE = 0x00001064
CKM_JUNIPER_WRAP = 0x00001065
CKM_FASTHASH = 0x00001070
CKM_AES_KEY_GEN = 0x00001080
CKM_AES_ECB = 0x00001081
CKM_AES_CBC = 0x00001082
CKM_AES_MAC = 0x00001083
CKM_AES_MAC_GENERAL = 0x00001084
CKM_AES_CBC_PAD = 0x00001085
CKM_AES_CTR = 0x00001086
CKM_AES_GCM = 0x00001087
CKM_AES_CCM = 0x00001088
CKM_AES_CTS = 0x00001089
CKM_AES_CMAC = 0x0000108A
CKM_AES_CMAC_GENERAL = 0x0000108B
CKM_AES_XCBC_MAC = 0x0000108C
CKM_AES_XCBC_MAC_96 = 0x0000108D
CKM_AES_GMAC = 0x0000108E
CKM_BLOWFISH_KEY_GEN = 0x00001090
CKM_BLOWFISH_CBC = 0x00001091
CKM_TWOFISH_KEY_GEN = 0x00001092
CKM_TWOFISH_CBC = 0x00001093
CKM_BLOWFISH_CBC_PAD = 0x00001094
CKM_TWOFISH_CBC_PAD = 0x00001095
CKM_DES_ECB_ENCRYPT_DATA = 0x00001100
CKM_DES_CBC_ENCRYPT_DATA = 0x00001101
CKM_DES3_ECB_ENCRYPT_DATA = 0x00001102
CKM_DES3_CBC_ENCRYPT_DATA = 0x00001103
CKM_AES_ECB_ENCRYPT_DATA = 0x00001104
CKM_AES_CBC_ENCRYPT_DATA = 0x00001105
CKM_GOSTR3410_KEY_PAIR_GEN = 0x00001200
CKM_GOSTR3410 = 0x00001201
CKM_GOSTR3410_WITH_GOSTR3411 = 0x00001202
CKM_GOSTR3410_KEY_WRAP = 0x00001203
CKM_GOSTR3410_DERIVE = 0x00001204
CKM_GOSTR3411 = 0x00001210
CKM_GOSTR3411_HMAC = 0x00001211
CKM_GOST28147_KEY_GEN = 0x00001220
CKM_GOST28147_ECB = 0x00001221
CKM_GOST28147 = 0x00001222
CKM_GOST28147_MAC = 0x00001223
CKM_GOST28147_KEY_WRAP = 0x00001224
CKM_DSA_PARAMETER_GEN = 0x00002000
CKM_DH_PKCS_PARAMETER_GEN = 0x00002001
CKM_X9_42_DH_PARAMETER_GEN = 0x00002002
CKM_DSA_PROBABLISTIC_PARAMETER_GEN = 0x00002003
CKM_DSA_SHAWE_TAYLOR_PARAMETER_GEN = 0x00002004
CKM_AES_OFB = 0x00002104
CKM_AES_CFB64 = 0x00002105
CKM_AES_CFB8 = 0x00002106
CKM_AES_CFB128 = 0x00002107
CKM_AES_CFB1 = 0x00002108
CKM_AES_KEY_WRAP = 0x00002109
CKM_AES_KEY_WRAP_PAD = 0x0000210A
CKM_RSA_PKCS_TPM_1_1 = 0x00004001
CKM_RSA_PKCS_OAEP_TPM_1_1 = 0x00004002
CKM_VENDOR_DEFINED = 0x80000000
CKF_HW = 0x00000001
CKF_ENCRYPT = 0x00000100
CKF_DECRYPT = 0x00000200
CKF_DIGEST = 0x00000400
CKF_SIGN = 0x00000800
CKF_SIGN_RECOVER = 0x00001000
CKF_VERIFY = 0x00002000
CKF_VERIFY_RECOVER = 0x00004000
CKF_GENERATE = 0x00008000
CKF_GENERATE_KEY_PAIR = 0x00010000
CKF_WRAP = 0x00020000
CKF_UNWRAP = 0x00040000
CKF_DERIVE = 0x00080000
CKF_EC_F_P = 0x00100000
CKF_EC_F_2M = 0x00200000
CKF_EC_ECPARAMETERS = 0x00400000
CKF_EC_NAMEDCURVE = 0x00800000
CKF_EC_UNCOMPRESS = 0x01000000
CKF_EC_COMPRESS = 0x02000000
CKF_EXTENSION = 0x80000000
CKR_OK = 0x00000000
CKR_CANCEL = 0x00000001
CKR_HOST_MEMORY = 0x00000002
CKR_SLOT_ID_INVALID = 0x00000003
CKR_GENERAL_ERROR = 0x00000005
CKR_FUNCTION_FAILED = 0x00000006
CKR_ARGUMENTS_BAD = 0x00000007
CKR_NO_EVENT = 0x00000008
CKR_NEED_TO_CREATE_THREADS = 0x00000009
CKR_CANT_LOCK = 0x0000000A
CKR_ATTRIBUTE_READ_ONLY = 0x00000010
CKR_ATTRIBUTE_SENSITIVE = 0x00000011
CKR_ATTRIBUTE_TYPE_INVALID = 0x00000012
CKR_ATTRIBUTE_VALUE_INVALID = 0x00000013
CKR_ACTION_PROHIBITED = 0x0000001B
CKR_DATA_INVALID = 0x00000020
CKR_DATA_LEN_RANGE = 0x00000021
CKR_DEVICE_ERROR = 0x00000030
CKR_DEVICE_MEMORY = 0x00000031
CKR_DEVICE_REMOVED = 0x00000032
CKR_ENCRYPTED_DATA_INVALID = 0x00000040
CKR_ENCRYPTED_DATA_LEN_RANGE = 0x00000041
CKR_FUNCTION_CANCELED = 0x00000050
CKR_FUNCTION_NOT_PARALLEL = 0x00000051
CKR_FUNCTION_NOT_SUPPORTED = 0x00000054
CKR_KEY_HANDLE_INVALID = 0x00000060
CKR_KEY_SIZE_RANGE = 0x00000062
CKR_KEY_TYPE_INCONSISTENT = 0x00000063
CKR_KEY_NOT_NEEDED = 0x00000064
CKR_KEY_CHANGED = 0x00000065
CKR_KEY_NEEDED = 0x00000066
CKR_KEY_INDIGESTIBLE = 0x00000067
CKR_KEY_FUNCTION_NOT_PERMITTED = 0x00000068
CKR_KEY_NOT_WRAPPABLE = 0x00000069
CKR_KEY_UNEXTRACTABLE = 0x0000006A
CKR_MECHANISM_INVALID = 0x00000070
CKR_MECHANISM_PARAM_INVALID = 0x00000071
CKR_OBJECT_HANDLE_INVALID = 0x00000082
CKR_OPERATION_ACTIVE = 0x00000090
CKR_OPERATION_NOT_INITIALIZED = 0x00000091
CKR_PIN_INCORRECT = 0x000000A0
CKR_PIN_INVALID = 0x000000A1
CKR_PIN_LEN_RANGE = 0x000000A2
CKR_PIN_EXPIRED = 0x000000A3
CKR_PIN_LOCKED = 0x000000A4
CKR_SESSION_CLOSED = 0x000000B0
CKR_SESSION_COUNT = 0x000000B1
CKR_SESSION_HANDLE_INVALID = 0x000000B3
CKR_SESSION_PARALLEL_NOT_SUPPORTED = 0x000000B4
CKR_SESSION_READ_ONLY = 0x000000B5
CKR_SESSION_EXISTS = 0x000000B6
CKR_SESSION_READ_ONLY_EXISTS = 0x000000B7
CKR_SESSION_READ_WRITE_SO_EXISTS = 0x000000B8
CKR_SIGNATURE_INVALID = 0x000000C0
CKR_SIGNATURE_LEN_RANGE = 0x000000C1
CKR_TEMPLATE_INCOMPLETE = 0x000000D0
CKR_TEMPLATE_INCONSISTENT = 0x000000D1
CKR_TOKEN_NOT_PRESENT = 0x000000E0
CKR_TOKEN_NOT_RECOGNIZED = 0x000000E1
CKR_TOKEN_WRITE_PROTECTED = 0x000000E2
CKR_UNWRAPPING_KEY_HANDLE_INVALID = 0x000000F0
CKR_UNWRAPPING_KEY_SIZE_RANGE = 0x000000F1
CKR_UNWRAPPING_KEY_TYPE_INCONSISTENT = 0x000000F2
CKR_USER_ALREADY_LOGGED_IN = 0x00000100
CKR_USER_NOT_LOGGED_IN = 0x00000101
CKR_USER_PIN_NOT_INITIALIZED = 0x00000102
CKR_USER_TYPE_INVALID = 0x00000103
CKR_USER_ANOTHER_ALREADY_LOGGED_IN = 0x00000104
CKR_USER_TOO_MANY_TYPES = 0x00000105
CKR_WRAPPED_KEY_INVALID = 0x00000110
CKR_WRAPPED_KEY_LEN_RANGE = 0x00000112
CKR_WRAPPING_KEY_HANDLE_INVALID = 0x00000113
CKR_WRAPPING_KEY_SIZE_RANGE = 0x00000114
CKR_WRAPPING_KEY_TYPE_INCONSISTENT = 0x00000115
CKR_RANDOM_SEED_NOT_SUPPORTED = 0x00000120
CKR_RANDOM_NO_RNG = 0x00000121
CKR_DOMAIN_PARAMS_INVALID = 0x00000130
CKR_CURVE_NOT_SUPPORTED = 0x00000140
CKR_BUFFER_TOO_SMALL = 0x00000150
CKR_SAVED_STATE_INVALID = 0x00000160
CKR_INFORMATION_SENSITIVE = 0x00000170
CKR_STATE_UNSAVEABLE = 0x00000180
CKR_CRYPTOKI_NOT_INITIALIZED = 0x00000190
CKR_CRYPTOKI_ALREADY_INITIALIZED = 0x00000191
CKR_MUTEX_BAD = 0x000001A0
CKR_MUTEX_NOT_LOCKED = 0x000001A1
CKR_NEW_PIN_MODE = 0x000001B0
CKR_NEXT_OTP = 0x000001B1
CKR_EXCEEDED_MAX_ITERATIONS = 0x000001B5
CKR_FIPS_SELF_TEST_FAILED = 0x000001B6
CKR_LIBRARY_LOAD_FAILED = 0x000001B7
CKR_PIN_TOO_WEAK = 0x000001B8
CKR_PUBLIC_KEY_INVALID = 0x000001B9
CKR_FUNCTION_REJECTED = 0x00000200
CKR_VENDOR_DEFINED = 0x80000000
CKF_LIBRARY_CANT_CREATE_OS_THREADS = 0x00000001
CKF_OS_LOCKING_OK = 0x00000002
CKF_DONT_BLOCK = 1
CKG_MGF1_SHA1 = 0x00000001
CKG_MGF1_SHA256 = 0x00000002
CKG_MGF1_SHA384 = 0x00000003
CKG_MGF1_SHA512 = 0x00000004
CKG_MGF1_SHA224 = 0x00000005
CKZ_DATA_SPECIFIED = 0x00000001
CKD_NULL = 0x00000001
CKD_SHA1_KDF = 0x00000002
CKD_SHA1_KDF_ASN1 = 0x00000003
CKD_SHA1_KDF_CONCATENATE = 0x00000004
CKD_SHA224_KDF = 0x00000005
CKD_SHA256_KDF = 0x00000006
CKD_SHA384_KDF = 0x00000007
CKD_SHA512_KDF = 0x00000008
CKD_CPDIVERSIFY_KDF = 0x00000009
CKD_SHA3_224_KDF = 0x0000000A
CKD_SHA3_256_KDF = 0x0000000B
CKD_SHA3_384_KDF = 0x0000000C
CKD_SHA3_512_KDF = 0x0000000D
CKP_PKCS5_PBKD2_HMAC_SHA1 = 0x00000001
CKP_PKCS5_PBKD2_HMAC_GOSTR3411 = 0x00000002
CKP_PKCS5_PBKD2_HMAC_SHA224 = 0x00000003
CKP_PKCS5_PBKD2_HMAC_SHA256 = 0x00000004
CKP_PKCS5_PBKD2_HMAC_SHA384 = 0x00000005
CKP_PKCS5_PBKD2_HMAC_SHA512 = 0x00000006
CKP_PKCS5_PBKD2_HMAC_SHA512_224 = 0x00000007
CKP_PKCS5_PBKD2_HMAC_SHA512_256 = 0x00000008
CKZ_SALT_SPECIFIED = 0x00000001
CK_OTP_VALUE = 0
CK_OTP_PIN = 1
CK_OTP_CHALLENGE = 2
CK_OTP_TIME = 3
CK_OTP_COUNTER = 4
CK_OTP_FLAGS = 5
CK_OTP_OUTPUT_LENGTH = 6
CK_OTP_OUTPUT_FORMAT = 7
CKF_NEXT_OTP = 0x00000001
CKF_EXCLUDE_TIME = 0x00000002
CKF_EXCLUDE_COUNTER = 0x00000004
CKF_EXCLUDE_CHALLENGE = 0x00000008
CKF_EXCLUDE_PIN = 0x00000010
CKF_USER_FRIENDLY_OTP = 0x00000020
)

View File

@ -1,17 +0,0 @@
/.vscode
/cmd/notary-server/notary-server
/cmd/notary-server/local.config.*
/cmd/notary-signer/notary-signer
/cmd/notary-signer/local.config.*
/cmd/escrow/escrow
/cmd/escrow/local.config.*
cover
bin
cross
.cover
*.swp
.idea
*.iml
*.test
coverage*.txt
gosec_output.csv

View File

@ -1,156 +0,0 @@
# Changelog
## [v0.7.0](https://github.com/docker/notary/releases/tag/v0.7.0) 12/01/2021
+ Switch to Go modules [#1523](https://github.com/theupdateframework/notary/pull/1523)
+ Use golang/x/crypto for ed25519 [#1344](https://github.com/theupdateframework/notary/pull/1344)
+ Update Go version
+ Update dependency versions
+ Fixes from using Gosec for source analysis
## [v0.6.1](https://github.com/docker/notary/releases/tag/v0.6.0) 04/10/2018
+ Fixed bug where CLI requested admin privileges for all metadata operations, including listing targets on a repo [#1315](https://github.com/theupdateframework/notary/pull/1315)
+ Prevented notary signer from being dumpable or ptraceable in Linux, except in debug mode [#1327](https://github.com/theupdateframework/notary/pull/1327)
+ Bumped JWT dependency to fix potential Invalid Curve Attack on NIST curves within ECDH key management [#1334](https://github.com/theupdateframework/notary/pull/1334)
+ If the home directory cannot be found, log a warning instead of erroring out [#1318](https://github.com/theupdateframework/notary/pull/1318)
+ Bumped go version and various dependencies [#1323](https://github.com/theupdateframework/notary/pull/1323) [#1332](https://github.com/theupdateframework/notary/pull/1332) [#1335](https://github.com/theupdateframework/notary/pull/1335) [#1336](https://github.com/theupdateframework/notary/pull/1336)
+ Various internal and documentation fixes [#1312](https://github.com/theupdateframework/notary/pull/1312) [#1313](https://github.com/theupdateframework/notary/pull/1313) [#1319](https://github.com/theupdateframework/notary/pull/1319) [#1320](https://github.com/theupdateframework/notary/pull/1320) [#1324](https://github.com/theupdateframework/notary/pull/1324) [#1326](https://github.com/theupdateframework/notary/pull/1326) [#1328](https://github.com/theupdateframework/notary/pull/1328) [#1329](https://github.com/theupdateframework/notary/pull/1329) [#1333](https://github.com/theupdateframework/notary/pull/1333)
## [v0.6.0](https://github.com/docker/notary/releases/tag/v0.6.0) 02/28/2018
+ **The project has been moved from https://github.com/docker/notary to https://github.com/theupdateframework/notary, as it has been accepted into the CNCF. Downstream users should update their go imports.**
+ Removed support for RSA-key exchange ciphers supported by the server and signer and require TLS >= 1.2 for the server and signer. [#1307](https://github.com/theupdateframework/notary/pull/1307)
+ `libykcs11` can be found in several additional locations on Fedora. [#1286](https://github.com/theupdateframework/notary/pull/1286/)
+ If a certificate is used as a delegation public key, notary no longer warns if the certificate has expired, since notary should be relying on the role expiry instead. [#1263](https://github.com/theupdateframework/notary/pull/1263)
+ An error is now returned when importing keys if there were invalid PEM blocks. [#1260](https://github.com/theupdateframework/notary/pull/1260)
+ Notary server authentication credentials can now be provided as an environment variable `NOTARY_AUTH`, which should contain a base64-encoded "username:password" value. [#1246](https://github.com/theupdateframework/notary/pull/1246)
+ Changefeeds are now supported for RethinkDB as well as SQL servers. [#1214](https://github.com/theupdateframework/notary/pull/1214)
+ Notary CLI will now time out after 30 seconds if a username and password are not provided when authenticating to anotary server, fixing an issue where scripts for the notary CLI may hang forever. [#1200](https://github.com/theupdateframework/notary/pull/1200)
+ Fixed potential race condition in the signer keystore. [#1198](https://github.com/theupdateframework/notary/pull/1198)
+ Notary now no longer provides the option to generate RSA keys for a repository, but externally generated RSA keys can still be imported as keys for a repository. [#1191](https://github.com/theupdateframework/notary/pull/1191)
+ Fixed bug where the notary client would `ioutil.ReadAll` responses from the server without limiting the size. [#1186](https://github.com/theupdateframework/notary/pull/1186)
+ Default notary CLI log level is now `warn`, and if the `-v` option is passed, it is at `info`. [#1179](https://github.com/theupdateframework/notary/pull/1179)
+ Example Postgres config now includes an example of mutual TLS authentication between the server/signer and Postgres. [#1160](https://github.com/theupdateframework/notary/pull/1160) [#1163](https://github.com/theupdateframework/notary/pull/1163/)
+ Fixed an error where piping the server authentication credentials via STDIN when scripting the notary CLI did not work. [#1155](https://github.com/theupdateframework/notary/pull/1155)
+ If the server and signer configurations forget to specify `parseTime=true` when using MySQL, notary server and signer will automatically add the option. [#1150](https://github.com/theupdateframework/notary/pull/1150)
+ Custom metadata can now be provided and read on a target when using the notary client as a library (not yet exposed on the CLI). [#1146](https://github.com/theupdateframework/notary/pull/1146)
+ `notary init` now accepts a `--root-cert` and `--root-key` flag for use with privately generated certificates and keys. [#1144](https://github.com/theupdateframework/notary/pull/1144)
+ `notary key generate` now accepts a `--role` flag as well as a `--output` flag. This means it can generate new targets or delegation keys, and it can also output keys to a file instead of storing it in the default notary key store. [#1134](https://github.com/theupdateframework/notary/pull/1134)
+ Newly generated keys are now stored encrypted and encoded in PKCS#8 format. **This is not forwards-compatible against notary<0.6.0 and docker<17.12.x. Also please note that docker>=17.12.x is not forwards compatible with notary<0.6.0.**. [#1130](https://github.com/theupdateframework/notary/pull/1130) [#1201](https://github.com/theupdateframework/notary/pull/1201)
+ Added support for wildcarded certificate IDs in the trustpinning configuration [#1126](https://github.com/theupdateframework/notary/pull/1126)
+ Added support using the client against notary servers which are hosted as subpath under another server (e.g. https://domain.com/notary instead of https://notary.com) [#1108](https://github.com/theupdateframework/notary/pull/1108)
+ If no changes were made to the targets file, you are no longer required to sign the target [#1104](https://github.com/theupdateframework/notary/pull/1104)
+ escrow placeholder [#1096](https://github.com/theupdateframework/notary/pull/1096)
+ Added support for wildcard suffixes for root certificates CNs for root keys, so that a single root certificate would be valid for multiple repositories [#1088](https://github.com/theupdateframework/notary/pull/1088)
+ Root key rotations now do not require all previous root keys sign new root metadata. [#942](https://github.com/theupdateframework/notary/pull/942).
+ New keys are trusted if the root metadata file specifying the new key was signed by the previous root key/threshold
+ Root metadata can now be requested by version from the server, allowing clients with older root metadata to validate each new version one by one up to the current metadata
+ `notary key rotate` now accepts a flag specifying which key to rotate to [#942](https://github.com/theupdateframework/notary/pull/942)
+ Refactoring of the client to make it easier to use as a library and to inject dependencies:
+ References to GUN have now been changed to "imagename". [#1081](https://github.com/theupdateframework/notary/pull/1081)
+ `NewNotaryRepository` can now be provided with a remote store and changelist, as opposed to always constructing its own. [#1094](https://github.com/theupdateframework/notary/pull/1094)
+ If needed, the notary repository will be initialized first when publishing. [#1105](https://github.com/theupdateframework/notary/pull/1105)
+ `NewNotaryReository` now requires a non-nil cache store. [#1185](https://github.com/theupdateframework/notary/pull/1185)
+ The "No valid trust data" error is now typed. [#1212](https://github.com/theupdateframework/notary/pull/1212)
+ `TUFClient` was previously mistakenly exported, and is now unexported. [#1215](https://github.com/theupdateframework/notary/pull/1215)
+ The notary client now has a `Repository` interface type to standardize `client.NotaryRepository`. [#1220](https://github.com/theupdateframework/notary/pull/1220)
+ The constructor functions `NewFileCachedNotaryRepository` and `NewNotaryRepository` have been renamed, respectively, to `NewFileCachedRepository` and `NewRepository` to reduce redundancy. [#1226](https://github.com/theupdateframework/notary/pull/1226)
+ `NewRepository` returns an interface as opposed to the concrete type `NotaryRepository` it previously did. `NotaryRepository` is also now an unexported concrete type. [#1226](https://github.com/theupdateframework/notary/pull/1226)
+ Key import/export logic has been moved from the `utils` package to the `trustmanager` package. [#1250](https://github.com/theupdateframework/notary/pull/1250)
## [v0.5.0](https://github.com/docker/notary/releases/tag/v0.5.0) 11/14/2016
+ Non-certificate public keys in PEM format can now be added to delegation roles [#965](https://github.com/docker/notary/pull/965)
+ PostgreSQL support as a storage backend for Server and Signer [#920](https://github.com/docker/notary/pull/920)
+ Notary server's health check now fails if it cannot connect to the signer, since no new repositories can be created and existing repositories cannot be updated if the server cannot reach the signer [#952](https://github.com/docker/notary/pull/952)
+ Server runs its connectivity healthcheck to the server once every 10 seconds instead of once every minute. [#902](https://github.com/docker/notary/pull/902)
+ The keys on disk are now stored in the `~/.notary/private` directory, rather than in a key hierarchy that separates them by GUN and by role. Notary will automatically migrate old-style directory layouts to the new style. **This is not forwards-compatible against notary<0.4.2 and docker<=1.12** [#872](https://github.com/docker/notary/pull/872)
+ A new changefeed API has been added to Notary Server. It is only supported when using one of the relational database backends: MySQL, PostgreSQL, or SQLite.[#1019](https://github.com/docker/notary/pull/1019)
## [v0.4.3](https://github.com/docker/notary/releases/tag/v0.4.3) 1/3/2017
+ Fix build tags for static notary client binaries in linux [#1039](https://github.com/docker/notary/pull/1039)
+ Fix key import for exported delegation keys [#1067](https://github.com/docker/notary/pull/1067)
## [v0.4.2](https://github.com/docker/notary/releases/tag/v0.4.2) 9/30/2016
+ Bump the cross compiler to golang 1.7.1, since [1.6.3 builds binaries that could have non-deterministic bugs in OS X Sierra](https://groups.google.com/forum/#!msg/golang-dev/Jho5sBHZgAg/cq6d97S1AwAJ) [#984](https://github.com/docker/notary/pull/984)
## [v0.4.1](https://github.com/docker/notary/releases/tag/v0.4.1) 9/27/2016
+ Preliminary Windows support for notary client [#970](https://github.com/docker/notary/pull/970)
+ Output message to CLI when repo changes have been successfully published [#974](https://github.com/docker/notary/pull/974)
+ Improved error messages for client authentication errors and for the witness command [#972](https://github.com/docker/notary/pull/972)
+ Support for finding keys that are anywhere in the notary directory's "private" directory, not just under "private/root_keys" or "private/tuf_keys" [#981](https://github.com/docker/notary/pull/981)
+ Previously, on any error updating, the client would fall back on the cache. Now we only do so if there is a network error or if the server is unavailable or missing the TUF data. Invalid TUF data will cause the update to fail - for example if there was an invalid root rotation. [#884](https://github.com/docker/notary/pull/884) [#982](https://github.com/docker/notary/pull/982)
## [v0.4.0](https://github.com/docker/notary/releases/tag/v0.4.0) 9/21/2016
+ Server-managed key rotations [#889](https://github.com/docker/notary/pull/889)
+ Remove `timestamp_keys` table, which stored redundant information [#889](https://github.com/docker/notary/pull/889)
+ Introduce `notary delete` command to delete local and/or remote repo data [#895](https://github.com/docker/notary/pull/895)
+ Introduce `notary witness` command to stage signatures for specified roles [#875](https://github.com/docker/notary/pull/875)
+ Add `-p` flag to offline commands to attempt auto-publish [#886](https://github.com/docker/notary/pull/886) [#912](https://github.com/docker/notary/pull/912) [#923](https://github.com/docker/notary/pull/923)
+ Introduce `notary reset` command to manage staged changes [#959](https://github.com/docker/notary/pull/959) [#856](https://github.com/docker/notary/pull/856)
+ Add `--rootkey` flag to `notary init` to provide a private root key for a repo [#801](https://github.com/docker/notary/pull/801)
+ Introduce `notary delegation purge` command to remove a specified key from all delegations [#855](https://github.com/docker/notary/pull/855)
+ Removed HTTP endpoint from notary-signer [#870](https://github.com/docker/notary/pull/870)
+ Refactored and unified key storage [#825](https://github.com/docker/notary/pull/825)
+ Batched key import and export now operate on PEM files (potentially with multiple blocks) instead of ZIP [#825](https://github.com/docker/notary/pull/825) [#882](https://github.com/docker/notary/pull/882)
+ Add full database integration test-suite [#824](https://github.com/docker/notary/pull/824) [#854](https://github.com/docker/notary/pull/854) [#863](https://github.com/docker/notary/pull/863)
+ Improve notary-server, trust pinning, and yubikey logging [#798](https://github.com/docker/notary/pull/798) [#858](https://github.com/docker/notary/pull/858) [#891](https://github.com/docker/notary/pull/891)
+ Warn if certificates for root or delegations are near expiry [#802](https://github.com/docker/notary/pull/802)
+ Warn if role metadata is near expiry [#786](https://github.com/docker/notary/pull/786)
+ Reformat CLI table output to use the `text/tabwriter` package [#809](https://github.com/docker/notary/pull/809)
+ Fix passphrase retrieval attempt counting and terminal detection [#906](https://github.com/docker/notary/pull/906)
+ Fix listing nested delegations [#864](https://github.com/docker/notary/pull/864)
+ Bump go version to 1.6.3, fix go1.7 compatibility [#851](https://github.com/docker/notary/pull/851) [#793](https://github.com/docker/notary/pull/793)
+ Convert docker-compose files to v2 format [#755](https://github.com/docker/notary/pull/755)
+ Validate root rotations against trust pinning [#800](https://github.com/docker/notary/pull/800)
+ Update fixture certificates for two-year expiry window [#951](https://github.com/docker/notary/pull/951)
## [v0.3.0](https://github.com/docker/notary/releases/tag/v0.3.0) 5/11/2016
+ Root rotations
+ RethinkDB support as a storage backend for Server and Signer
+ A new TUF repo builder that merges server and client validation
+ Trust Pinning: configure known good key IDs and CAs to replace TOFU.
+ Add --input, --output, and --quiet flags to notary verify command
+ Remove local certificate store. It was redundant as all certs were also stored in the cached root.json
+ Cleanup of dead code in client side key storage logic
+ Update project to Go 1.6.1
+ Reorganize vendoring to meet Go 1.6+ standard. Still using Godeps to manage vendored packages
+ Add targets by hash, no longer necessary to have the original target data available
+ Active Key ID verification during signature verification
+ Switch all testing from assert to require, reduces noise in test runs
+ Use alpine based images for smaller downloads and faster setup times
+ Clean up out of data signatures when re-signing content
+ Set cache control headers on HTTP responses from Notary Server
+ Add sha512 support for targets
+ Add environment variable for delegation key passphrase
+ Reduce permissions requested by client from token server
+ Update formatting for delegation list output
+ Move SQLite dependency to tests only so it doesn't get built into official images
+ Fixed asking for password to list private repositories
+ Enable using notary client with username/password in a scripted fashion
+ Fix static compilation of client
+ Enforce TUF version to be >= 1, previously 0 was acceptable although unused
+ json.RawMessage should always be used as *json.RawMessage due to concepts of addressability in Go and effects on encoding
## [v0.2](https://github.com/docker/notary/releases/tag/v0.2.0) 2/24/2016
+ Add support for delegation roles in `notary` server and client
+ Add `notary CLI` commands for managing delegation roles: `notary delegation`
+ `add`, `list` and `remove` subcommands
+ Enhance `notary CLI` commands for adding targets to delegation roles
+ `notary add --roles` and `notary remove --roles` to manipulate targets for delegations
+ Support for rotating the snapshot key to one managed by the `notary` server
+ Add consistent download functionality to download metadata and content by checksum
+ Update `docker-compose` configuration to use official mariadb image
+ deprecate `notarymysql`
+ default to using a volume for `data` directory
+ use separate databases for `notary-server` and `notary-signer` with separate users
+ Add `notary CLI` command for changing private key passphrases: `notary key passwd`
+ Enhance `notary CLI` commands for importing and exporting keys
+ Change default `notary CLI` log level to fatal, introduce new verbose (error-level) and debug-level settings
+ Store roles as PEM headers in private keys, incompatible with previous notary v0.1 key format
+ No longer store keys as `<KEY_ID>_role.key`, instead store as `<KEY_ID>.key`; new private keys from new notary clients will crash old notary clients
+ Support logging as JSON format on server and signer
+ Support mutual TLS between notary client and notary server
## [v0.1](https://github.com/docker/notary/releases/tag/v0.1) 11/15/2015
+ Initial non-alpha `notary` version
+ Implement TUF (the update framework) with support for root, targets, snapshot, and timestamp roles
+ Add PKCS11 interface to store and sign with keys in HSMs (i.e. Yubikey)

View File

@ -1,43 +0,0 @@
## CNCF Community Code of Conduct v1.0
### Contributor Code of Conduct
As contributors and maintainers of this project, and in the interest of fostering
an open and welcoming community, we pledge to respect all people who contribute
through reporting issues, posting feature requests, updating documentation,
submitting pull requests or patches, and other activities.
We are committed to making participation in this project a harassment-free experience for
everyone, regardless of level of experience, gender, gender identity and expression,
sexual orientation, disability, personal appearance, body size, race, ethnicity, age,
religion, or nationality.
Examples of unacceptable behavior by participants include:
* The use of sexualized language or imagery
* Personal attacks
* Trolling or insulting/derogatory comments
* Public or private harassment
* Publishing other's private information, such as physical or electronic addresses,
without explicit permission
* Other unethical or unprofessional conduct.
Project maintainers have the right and responsibility to remove, edit, or reject
comments, commits, code, wiki edits, issues, and other contributions that are not
aligned to this Code of Conduct. By adopting this Code of Conduct, project maintainers
commit themselves to fairly and consistently applying these principles to every aspect
of managing this project. Project maintainers who do not follow or enforce the Code of
Conduct may be permanently removed from the project team.
This code of conduct applies both within project spaces and in public spaces
when an individual is representing the project or its community.
Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting a CNCF project maintainer, Sarah Novotny <sarahnovotny@google.com>, and/or Dan Kohn <dan@linuxfoundation.org>.
This Code of Conduct is adapted from the Contributor Covenant
(https://contributor-covenant.org), version 1.2.0, available at
https://contributor-covenant.org/version/1/2/0/
### CNCF Events Code of Conduct
CNCF events are governed by the Linux Foundation [Code of Conduct](https://events.linuxfoundation.org/events/cloudnativecon/attend/code-of-conduct) available on the event page. This is designed to be compatible with the above policy and also includes more details on responding to incidents.

View File

@ -1,95 +0,0 @@
# Contributing to notary
## Before reporting an issue...
### If your problem is with...
- automated builds
- your account on the [Docker Hub](https://hub.docker.com/)
- any other [Docker Hub](https://hub.docker.com/) issue
Then please do not report your issue here - you should instead report it to [https://support.docker.com](https://support.docker.com)
### If you...
- need help setting up notary
- can't figure out something
- are not sure what's going on or what your problem is
Then please do not open an issue here yet - you should first try one of the following support forums:
- irc: #docker-trust on freenode
## Reporting an issue properly
By following these simple rules you will get better and faster feedback on your issue.
- search the bugtracker for an already reported issue
### If you found an issue that describes your problem:
- please read other user comments first, and confirm this is the same issue: a given error condition might be indicative of different problems - you may also find a workaround in the comments
- please refrain from adding "same thing here" or "+1" comments
- you don't need to comment on an issue to get notified of updates: just hit the "subscribe" button
- comment if you have some new, technical and relevant information to add to the case
### If you have not found an existing issue that describes your problem:
1. create a new issue, with a succinct title that describes your issue:
- bad title: "It doesn't work with my docker"
- good title: "Publish fail: 400 error with E_INVALID_DIGEST"
2. copy the output of:
- `notary version` or `docker version`
3. Run `notary` or `docker` with the `-D` option for debug output, and please include a copy of the command and the output.
4. If relevant, copy your `notaryserver` and `notarysigner` logs that show the error (this is likely the output from running `docker-compose up`)
## Contributing a patch for a known bug, or a small correction
You should follow the basic GitHub workflow:
1. fork
2. commit a change
3. make sure the tests pass
4. PR
Additionally, you must [sign your commits](https://github.com/docker/docker/blob/master/CONTRIBUTING.md#sign-your-work). It's very simple:
- configure your name with git: `git config user.name "Real Name" && git config user.email mail@example.com`
- sign your commits using `-s`: `git commit -s -m "My commit"`
Some simple rules to ensure quick merge:
- clearly point to the issue(s) you want to fix in your PR comment (e.g., `closes #12345`)
- prefer multiple (smaller) PRs addressing individual issues over a big one trying to address multiple issues at once
- if you need to amend your PR following comments, please squash instead of adding more commits
- if fixing a bug or adding a feature, please add or update the relevant `CHANGELOG.md` entry with your pull request number
and a description of the change
## Contributing new features
You are heavily encouraged to first discuss what you want to do. You can do so on the irc channel, or by opening an issue that clearly describes the use case you want to fulfill, or the problem you are trying to solve.
If this is a major new feature, you should then submit a proposal that describes your technical solution and reasoning.
If you did discuss it first, this will likely be greenlighted very fast. It's advisable to address all feedback on this proposal before starting actual work
Then you should submit your implementation, clearly linking to the issue (and possible proposal).
Your PR will be reviewed by the community, then ultimately by the project maintainers, before being merged.
It's mandatory to:
- interact respectfully with other community members and maintainers - more generally, you are expected to abide by the [Docker community rules](https://github.com/docker/docker/blob/master/CONTRIBUTING.md#docker-community-guidelines)
- address maintainers' comments and modify your submission accordingly
- write tests for any new code
Complying to these simple rules will greatly accelerate the review process, and will ensure you have a pleasant experience in contributing code to the Registry.
## Review and Development notes
- All merges require LGTMs from any 2 maintainers.
- We use the git flow model (as best we can) using the `releases` branch as the stable branch, and the `master` branch as the development branch. When we get near a potential release, a release branch (`release/<semver>`) will be created from `master`. Any PRs that should go into the release should be made against that branch. Hotfixes for a minor release will be added to the branch `hotfix/<semver>`.
## Vendoring new dependency versions
We use [VNDR](https://github.com/LK4D4/vndr); please update `vendor.conf` with the new dependency or the new version, and run
`vndr <top level package name>`.

View File

@ -1,4 +0,0 @@
David Williamson <david.williamson@docker.com> (github: davidwilliamson)
Aaron Lehmann <aaron.lehmann@docker.com> (github: aaronlehmann)
Lewis Marshall <lewis@flynn.io> (github: lmars)
Jonathan Rudenberg <jonathan@flynn.io> (github: titanous)

View File

@ -1,27 +0,0 @@
FROM golang:1.14.1
RUN apt-get update && apt-get install -y \
curl \
clang \
libsqlite3-dev \
patch \
tar \
xz-utils \
python \
python-pip \
python-setuptools \
--no-install-recommends \
&& rm -rf /var/lib/apt/lists/*
RUN useradd -ms /bin/bash notary \
&& pip install codecov \
&& go get golang.org/x/lint/golint github.com/fzipp/gocyclo github.com/client9/misspell/cmd/misspell github.com/gordonklaus/ineffassign github.com/securego/gosec/cmd/gosec/...
ENV NOTARYDIR /go/src/github.com/theupdateframework/notary
COPY . ${NOTARYDIR}
RUN chmod -R a+rw /go && chmod 0600 ${NOTARYDIR}/fixtures/database/*
ENV GO111MODULE=on
WORKDIR ${NOTARYDIR}

View File

@ -1,7 +0,0 @@
// Only run on Linux atm
wrappedNode(label: 'ubuntu && ec2 && docker-edge') {
deleteDir()
stage "checkout"
checkout scm
}

View File

@ -1,201 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2015 Docker, Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -1,70 +0,0 @@
# Notary maintainers file
#
# This file describes who runs the theupdateframework/notary project and how.
# This is a living document - if you see something out of date or missing, speak up!
#
# It is structured to be consumable by both humans and programs.
# To extract its contents programmatically, use any TOML-compliant parser.
#
# This file is compiled into the MAINTAINERS file in docker/opensource.
#
[Org]
[Org."Core maintainers"]
people = [
"cyli",
"diogomonica",
"endophage",
"ecordell",
"hukeping",
"justincormack",
"nathanmccauley",
"riyazdf",
]
[people]
# A reference list of all people associated with the project.
# All other sections should refer to people by their canonical key
# in the people section.
# ADD YOURSELF HERE IN ALPHABETICAL ORDER
[people.cyli]
Name = "Ying Li"
Email = "ying.li@docker.com"
GitHub = "cyli"
[people.diogomonica]
Name = "Diogo Monica"
Email = "diogo@docker.com"
GitHub = "diogomonica"
[people.endophage]
Name = "David Lawrence"
Email = "david.lawrence@docker.com"
GitHub = "endophage"
[people.ecordell]
Name = "Evan Cordell"
Email = "evan.cordell@coreos.com"
GitHub = "ecordell"
[people.hukeping]
Name = "Hu Keping"
Email = "hukeping@huawei.com"
GitHub = "hukeping"
[people.justincormack]
Name = "Justin Cormack"
Email = "justin.cormack@docker.com"
GitHub = "justincormack"
[people.nathanmccauley]
Name = "Nathan McCauley"
Email = "nathan.mccauley@docker.com"
GitHub = "nathanmccauley"
[people.riyazdf]
Name = "Riyaz Faizullabhoy"
Email = "riyazdf@berkeley.edu"
GitHub = "riyazdf"

View File

@ -1,22 +0,0 @@
# Notary maintainers alumni file
#
# This file describes past maintainers who have stepped down from the role.
# This is a living document - if you see something out of date or missing, speak up!
#
# It is structured to be consumable by both humans and programs.
# To extract its contents programmatically, use any TOML-compliant parser.
#
[Org]
[Org."Notary Alumni"]
people = [
"dmcgowan",
]
[people]
# ADD YOURSELF HERE IN ALPHABETICAL ORDER
[people.dmcgowan]
Name = "Derek McGowan"
Email = "derek@docker.com"
GitHub = "dmcgowan"

View File

@ -1,39 +0,0 @@
# Maintainers Rules
This document lays out some basic rules and guidelines all maintainers are expected to follow.
Changes to the [Acceptance Criteria](#hard-acceptance-criteria-for-merging-a-pr) for merging PRs require a ceiling(two-thirds) supermajority from the maintainers.
Changes to the [Repo Guidelines](#repo-guidelines) require a simple majority.
## Hard Acceptance Criteria for merging a PR:
- 2 LGTMs are required when merging a PR
- If there is obviously still discussion going on in the PR, even with 2 LGTMs, let the discussion resolve before merging. If youre not sure, reach out to the maintainers involved in the discussion.
- All checks must be green
- There are limited mitigating circumstances for this, like if the docs builds are just broken and thats the only test failing.
- Adding or removing a check requires simple majority approval from the maintainers.
## Repo Guidelines:
- Consistency is vital to keep complexity low and understandable.
- Automate as much as possible (we dont have guidelines about coding style for example because weve automated fmt, vet, lint, etc…).
- Try to keep PRs small and focussed (this is not always possible, i.e. builder refactor, storage refactor, etc… but a good target).
## Process for becoming a maintainer:
- Invitation is proposed by an existing maintainer.
- Ceiling(two-thirds) supermajority approval from existing maintainers (including vote of proposing maintainer) required to accept proposal.
- Newly approved maintainer submits PR adding themselves to the MAINTAINERS file.
- Existing maintainers publicly mark their approval on the PR.
- Existing maintainer updates repository permissions to grant write access to new maintainer.
- New maintainer merges their PR.
## Removing maintainers
It is preferrable that a maintainer gracefully removes themselves from the MAINTAINERS file if they are
aware they will no longer have the time or motivation to contribute to the project. Maintainers that
have been inactive in the repo for a period of at least one year should be contacted to ask if they
wish to be removed.
In the case that an inactive maintainer is unresponsive for any reason, a ceiling(two-thirds) supermajority
vote of the existing maintainers can be used to approve their removal from the MAINTAINERS file, and revoke
their merge permissions on the repository.

View File

@ -1,205 +0,0 @@
# Set an output prefix, which is the local directory if not specified
PREFIX?=$(shell pwd)
GOFLAGS := -mod=vendor
# Populate version variables
# Add to compile time flags
NOTARY_PKG := github.com/theupdateframework/notary
NOTARY_VERSION := $(shell cat NOTARY_VERSION)
GITCOMMIT := $(shell git rev-parse --short HEAD)
GITUNTRACKEDCHANGES := $(shell git status --porcelain --untracked-files=no)
ifneq ($(GITUNTRACKEDCHANGES),)
GITCOMMIT := $(GITCOMMIT)-dirty
endif
CTIMEVAR=-X $(NOTARY_PKG)/version.GitCommit=$(GITCOMMIT) -X $(NOTARY_PKG)/version.NotaryVersion=$(NOTARY_VERSION)
GO_LDFLAGS=-ldflags "-w $(CTIMEVAR)"
GO_LDFLAGS_STATIC=-ldflags "-w $(CTIMEVAR) -extldflags -static"
GOOSES = darwin linux windows
NOTARY_BUILDTAGS ?= pkcs11
NOTARYDIR := /go/src/github.com/theupdateframework/notary
# check to be sure pkcs11 lib is always imported with a build tag
GO_LIST_PKCS11 := $(shell go list -tags "${NOTARY_BUILDTAGS}" -e -f '{{join .Deps "\n"}}' ./... | grep -v /vendor/ | xargs go list -e -f '{{if not .Standard}}{{.ImportPath}}{{end}}' | grep -q pkcs11)
ifeq ($(GO_LIST_PKCS11),)
$(info pkcs11 import was not found anywhere without a build tag, yay)
else
$(error You are importing pkcs11 somewhere and not using a build tag)
endif
_empty :=
_space := $(empty) $(empty)
# go cover test variables
COVERPROFILE?=coverage.txt
COVERMODE=atomic
PKGS ?= $(shell go list -tags "${NOTARY_BUILDTAGS}" ./... | grep -v /vendor/ | tr '\n' ' ')
.PHONY: clean all lint build test binaries cross cover docker-images notary-dockerfile
.DELETE_ON_ERROR: cover
.DEFAULT: default
all: clean lint build test binaries
# This only needs to be generated by hand when cutting full releases.
version/version.go:
./version/version.sh > $@
${PREFIX}/bin/notary-server: NOTARY_VERSION $(shell find . -type f -name '*.go')
@echo "+ $@"
@go build -tags ${NOTARY_BUILDTAGS} -o $@ ${GO_LDFLAGS} ./cmd/notary-server
${PREFIX}/bin/notary: NOTARY_VERSION $(shell find . -type f -name '*.go')
@echo "+ $@"
@go build -tags ${NOTARY_BUILDTAGS} -o $@ ${GO_LDFLAGS} ./cmd/notary
${PREFIX}/bin/notary-signer: NOTARY_VERSION $(shell find . -type f -name '*.go')
@echo "+ $@"
@go build -tags ${NOTARY_BUILDTAGS} -o $@ ${GO_LDFLAGS} ./cmd/notary-signer
${PREFIX}/bin/escrow: NOTARY_VERSION $(shell find . -type f -name '*.go')
@echo "+ $@"
@go build -tags ${NOTARY_BUILDTAGS} -o $@ ${GO_LDFLAGS} ./cmd/escrow
ifeq ($(shell uname -s),Darwin)
${PREFIX}/bin/static/notary-server:
@echo "notary-server: static builds not supported on OS X"
${PREFIX}/bin/static/notary-signer:
@echo "notary-signer: static builds not supported on OS X"
${PREFIX}/bin/static/notary:
@echo "notary: static builds not supported on OS X"
else
${PREFIX}/bin/static/notary-server: NOTARY_VERSION $(shell find . -type f -name '*.go')
@echo "+ $@"
@(export CGO_ENABLED=0; go build -tags "${NOTARY_BUILDTAGS} netgo" -o $@ ${GO_LDFLAGS_STATIC} ./cmd/notary-server)
${PREFIX}/bin/static/notary-signer: NOTARY_VERSION $(shell find . -type f -name '*.go')
@echo "+ $@"
@(export CGO_ENABLED=0; go build -tags "${NOTARY_BUILDTAGS} netgo" -o $@ ${GO_LDFLAGS_STATIC} ./cmd/notary-signer)
${PREFIX}/bin/static/notary:
@echo "+ $@"
@go build -tags "${NOTARY_BUILDTAGS} netgo" -o $@ ${GO_LDFLAGS_STATIC} ./cmd/notary
endif
# run all lint functionality - excludes Godep directory, vendoring, binaries, python tests, and git files
lint:
@echo "+ $@: golint, go vet, go fmt, gocycle, misspell, ineffassign"
# golint
@test -z "$(shell find . -type f -name "*.go" -not -path "./vendor/*" -not -name "*.pb.*" -exec golint {} \; | tee /dev/stderr)"
# gofmt
@test -z "$$(gofmt -s -l .| grep -v .pb. | grep -v vendor/ | tee /dev/stderr)"
# govet
ifeq ($(shell uname -s), Darwin)
@test -z "$(shell find . -iname *test*.go | grep -v _test.go | grep -v vendor | xargs echo "This file should end with '_test':" | tee /dev/stderr)"
else
@test -z "$(shell find . -iname *test*.go | grep -v _test.go | grep -v vendor | xargs -r echo "This file should end with '_test':" | tee /dev/stderr)"
endif
@test -z "$$(go vet -printf=false . 2>&1 | grep -v vendor/ | tee /dev/stderr)"
# gocyclo - we require cyclomatic complexity to be < 16
@test -z "$(shell find . -type f -name "*.go" -not -path "./vendor/*" -not -name "*.pb.*" -exec gocyclo -over 15 {} \; | tee /dev/stderr)"
# misspell - requires that the following be run first:
# go get -u github.com/client9/misspell/cmd/misspell
@test -z "$$(find . -type f | grep -v vendor/ | grep -v bin/ | grep -v misc/ | grep -v .git/ | grep -v \.pdf | xargs misspell | tee /dev/stderr)"
# ineffassign - requires that the following be run first:
# go get -u github.com/gordonklaus/ineffassign
@test -z "$(shell find . -type f -name "*.go" -not -path "./vendor/*" -not -name "*.pb.*" -exec ineffassign {} \; | tee /dev/stderr)"
# gosec - requires that the following be run first:
# go get -u github.com/securego/gosec/cmd/gosec/...
@rm -f gosec_output.csv
@gosec -fmt=csv -out=gosec_output.csv -exclude=G104,G304 ./... || (cat gosec_output.csv >&2; exit 1)
build:
@echo "+ $@"
@go build -tags "${NOTARY_BUILDTAGS}" -v ${GO_LDFLAGS} $(PKGS)
# When running `go test ./...`, it runs all the suites in parallel, which causes
# problems when running with a yubikey
test: TESTOPTS =
test:
@echo Note: when testing with a yubikey plugged in, make sure to include 'TESTOPTS="-p 1"'
@echo "+ $@ $(TESTOPTS)"
@echo
go test -tags "${NOTARY_BUILDTAGS}" $(TESTOPTS) $(PKGS)
integration: TESTDB = mysql
integration: clean
buildscripts/integrationtest.sh $(TESTDB)
testdb: TESTDB = mysql
testdb:
buildscripts/dbtests.sh $(TESTDB)
protos:
@protoc --go_out=plugins=grpc:. proto/*.proto
# This allows coverage for a package to come from tests in different package.
# Requires that the following:
# go get github.com/wadey/gocovmerge; go install github.com/wadey/gocovmerge
#
# be run first
gen-cover:
gen-cover:
@python -u buildscripts/covertest.py --tags "$(NOTARY_BUILDTAGS)" --pkgs="$(PKGS)" --testopts="${TESTOPTS}"
# Generates the cover binaries and runs them all in serial, so this can be used
# run all tests with a yubikey without any problems
cover: gen-cover covmerge
@go tool cover -html="$(COVERPROFILE)"
# Generates the cover binaries and runs them all in serial, so this can be used
# run all tests with a yubikey without any problems
ci: override TESTOPTS = -race
# Codecov knows how to merge multiple coverage files, so covmerge is not needed
ci: gen-cover
yubikey-tests: override PKGS = github.com/theupdateframework/notary/cmd/notary github.com/theupdateframework/notary/trustmanager/yubikey
yubikey-tests: ci
covmerge:
@gocovmerge $(shell find . -name coverage*.txt | tr "\n" " ") > $(COVERPROFILE)
@go tool cover -func="$(COVERPROFILE)"
clean-protos:
@rm proto/*.pb.go
client: ${PREFIX}/bin/notary
@echo "+ $@"
binaries: ${PREFIX}/bin/notary-server ${PREFIX}/bin/notary ${PREFIX}/bin/notary-signer
@echo "+ $@"
escrow: ${PREFIX}/bin/escrow
@echo "+ $@"
static: ${PREFIX}/bin/static/notary-server ${PREFIX}/bin/static/notary-signer ${PREFIX}/bin/static/notary
@echo "+ $@"
notary-dockerfile:
@docker build --rm --force-rm -t notary .
server-dockerfile:
@docker build --rm --force-rm -f server.Dockerfile -t notary-server .
signer-dockerfile:
@docker build --rm --force-rm -f signer.Dockerfile -t notary-signer .
docker-images: notary-dockerfile server-dockerfile signer-dockerfile
shell: notary-dockerfile
docker run --rm -it -v $(CURDIR)/cross:$(NOTARYDIR)/cross -v $(CURDIR)/bin:$(NOTARYDIR)/bin notary bash
cross:
@rm -rf $(CURDIR)/cross
@docker build --rm --force-rm -t notary -f cross.Dockerfile .
docker run --rm -v $(CURDIR)/cross:$(NOTARYDIR)/cross -e CTIMEVAR="${CTIMEVAR}" -e NOTARY_BUILDTAGS=$(NOTARY_BUILDTAGS) notary buildscripts/cross.sh $(GOOSES)
clean:
@echo "+ $@"
@rm -rf .cover cross
find . -name coverage.txt -delete
@rm -rf "${PREFIX}/bin/notary-server" "${PREFIX}/bin/notary" "${PREFIX}/bin/notary-signer"
@rm -rf "${PREFIX}/bin/static"

View File

@ -1 +0,0 @@
0.6.1

View File

@ -1,135 +0,0 @@
<img src="docs/images/notary-blk.svg" alt="Notary" width="400px"/>
[![GoDoc](https://godoc.org/github.com/theupdateframework/notary?status.svg)](https://godoc.org/github.com/theupdateframework/notary)
[![Circle CI](https://circleci.com/gh/theupdateframework/notary/tree/master.svg?style=shield)](https://circleci.com/gh/theupdateframework/notary/tree/master) [![CodeCov](https://codecov.io/github/theupdateframework/notary/coverage.svg?branch=master)](https://codecov.io/github/theupdateframework/notary) [![GoReportCard](https://goreportcard.com/badge/theupdateframework/notary)](https://goreportcard.com/report/github.com/theupdateframework/notary)
[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Ftheupdateframework%2Fnotary.svg?type=shield)](https://app.fossa.io/projects/git%2Bgithub.com%2Ftheupdateframework%2Fnotary?ref=badge_shield)
# Notice
The Notary project has officially been accepted in to the Cloud Native Computing Foundation (CNCF).
It has moved to https://github.com/theupdateframework/notary. Any downstream consumers should update
their Go imports to use this new location, which will be the canonical location going forward.
We have moved the repo in GitHub, which will allow existing importers to continue using the old
location via GitHub's redirect.
# Overview
The Notary project comprises a [server](cmd/notary-server) and a [client](cmd/notary) for running and interacting
with trusted collections. See the [service architecture](docs/service_architecture.md) documentation
for more information.
Notary aims to make the internet more secure by making it easy for people to
publish and verify content. We often rely on TLS to secure our communications
with a web server, which is inherently flawed, as any compromise of the server
enables malicious content to be substituted for the legitimate content.
With Notary, publishers can sign their content offline using keys kept highly
secure. Once the publisher is ready to make the content available, they can
push their signed trusted collection to a Notary Server.
Consumers, having acquired the publisher's public key through a secure channel,
can then communicate with any Notary server or (insecure) mirror, relying
only on the publisher's key to determine the validity and integrity of the
received content.
## Goals
Notary is based on [The Update Framework](https://www.theupdateframework.com/), a secure general design for the problem of software distribution and updates. By using TUF, Notary achieves a number of key advantages:
* **Survivable Key Compromise**: Content publishers must manage keys in order to sign their content. Signing keys may be compromised or lost so systems must be designed in order to be flexible and recoverable in the case of key compromise. TUF's notion of key roles is utilized to separate responsibilities across a hierarchy of keys such that loss of any particular key (except the root role) by itself is not fatal to the security of the system.
* **Freshness Guarantees**: Replay attacks are a common problem in designing secure systems, where previously valid payloads are replayed to trick another system. The same problem exists in the software update systems, where old signed can be presented as the most recent. Notary makes use of timestamping on publishing so that consumers can know that they are receiving the most up to date content. This is particularly important when dealing with software update where old vulnerable versions could be used to attack users.
* **Configurable Trust Thresholds**: Oftentimes there are a large number of publishers that are allowed to publish a particular piece of content. For example, open source projects where there are a number of core maintainers. Trust thresholds can be used so that content consumers require a configurable number of signatures on a piece of content in order to trust it. Using thresholds increases security so that loss of individual signing keys doesn't allow publishing of malicious content.
* **Signing Delegation**: To allow for flexible publishing of trusted collections, a content publisher can delegate part of their collection to another signer. This delegation is represented as signed metadata so that a consumer of the content can verify both the content and the delegation.
* **Use of Existing Distribution**: Notary's trust guarantees are not tied at all to particular distribution channels from which content is delivered. Therefore, trust can be added to any existing content delivery mechanism.
* **Untrusted Mirrors and Transport**: All of the notary metadata can be mirrored and distributed via arbitrary channels.
## Security
Any security vulnerabilities can be reported to security@docker.com.
See Notary's [service architecture docs](docs/service_architecture.md#threat-model) for more information about our threat model, which details the varying survivability and severities for key compromise as well as mitigations.
### Security Audits
Notary has had two public security audits:
* [August 7, 2018 by Cure53](docs/resources/cure53_tuf_notary_audit_2018_08_07.pdf) covering TUF and Notary
* [July 31, 2015 by NCC](docs/resources/ncc_docker_notary_audit_2015_07_31.pdf) covering Notary
# Getting started with the Notary CLI
Get the Notary Client CLI binary from [the official releases page](https://github.com/theupdateframework/notary/releases) or you can [build one yourself](#building-notary).
The version of the Notary server and signer should be greater than or equal to Notary CLI's version to ensure feature compatibility (ex: CLI version 0.2, server/signer version >= 0.2), and all official releases are associated with GitHub tags.
To use the Notary CLI with Docker hub images, have a look at Notary's
[getting started docs](docs/getting_started.md).
For more advanced usage, see the
[advanced usage docs](docs/advanced_usage.md).
To use the CLI against a local Notary server rather than against Docker Hub:
1. Ensure that you have [docker and docker-compose](https://docs.docker.com/compose/install/) installed.
1. `git clone https://github.com/theupdateframework/notary.git` and from the cloned repository path,
start up a local Notary server and signer and copy the config file and testing certs to your
local Notary config directory:
```sh
$ docker-compose build
$ docker-compose up -d
$ mkdir -p ~/.notary && cp cmd/notary/config.json cmd/notary/root-ca.crt ~/.notary
```
1. Add `127.0.0.1 notary-server` to your `/etc/hosts`, or if using docker-machine,
add `$(docker-machine ip) notary-server`).
You can run through the examples in the
[getting started docs](docs/getting_started.md) and
[advanced usage docs](docs/advanced_usage.md), but
without the `-s` (server URL) argument to the `notary` command since the server
URL is specified already in the configuration, file you copied.
You can also leave off the `-d ~/.docker/trust` argument if you do not care
to use `notary` with Docker images.
## Upgrading dependencies
To prevent mistakes in vendoring the go modules a buildscript has been added to properly vendor the modules using the correct version of Go to mitigate differences in CI and development environment.
Following procedure should be executed to upgrade a dependency. Preferably keep dependency upgrades in a separate commit from your code changes.
```bash
go get -u github.com/spf13/viper
buildscripts/circle-validate-vendor.sh
git add .
git commit -m "Upgraded github.com/spf13/viper"
```
The `buildscripts/circle-validate-vendor.sh` runs `go mod tidy` and `go mod vendor` using the given version of Go to prevent differences if you are for example running on a different version of Go.
## Building Notary
Note that Notary's [latest stable release](https://github.com/theupdateframework/notary/releases) is at the head of the
[releases branch](https://github.com/theupdateframework/notary/tree/releases). The master branch is the development
branch and contains features for the next release.
Prerequisites:
* Go >= 1.12
Set [```GOPATH```](https://golang.org/doc/code.html#GOPATH). Then, run:
```bash
$ export GO111MODULE=on
$ go get github.com/theupdateframework/notary
# build with pkcs11 support by default to support yubikey
$ go install -tags pkcs11 github.com/theupdateframework/notary/cmd/notary
$ notary
```
To build the server and signer, run `docker-compose build`.
## License
[![FOSSA Status](https://app.fossa.io/api/projects/git%2Bgithub.com%2Ftheupdateframework%2Fnotary.svg?type=large)](https://app.fossa.io/projects/git%2Bgithub.com%2Ftheupdateframework%2Fnotary?ref=badge_large)

View File

@ -1,100 +0,0 @@
package changelist
import (
"github.com/theupdateframework/notary/tuf/data"
)
// Scopes for TUFChanges are simply the TUF roles.
// Unfortunately because of targets delegations, we can only
// cover the base roles.
const (
ScopeRoot = "root"
ScopeTargets = "targets"
)
// Types for TUFChanges are namespaced by the Role they
// are relevant for. The Root and Targets roles are the
// only ones for which user action can cause a change, as
// all changes in Snapshot and Timestamp are programmatically
// generated base on Root and Targets changes.
const (
TypeBaseRole = "role"
TypeTargetsTarget = "target"
TypeTargetsDelegation = "delegation"
TypeWitness = "witness"
)
// TUFChange represents a change to a TUF repo
type TUFChange struct {
// Abbreviated because Go doesn't permit a field and method of the same name
Actn string `json:"action"`
Role data.RoleName `json:"role"`
ChangeType string `json:"type"`
ChangePath string `json:"path"`
Data []byte `json:"data"`
}
// TUFRootData represents a modification of the keys associated
// with a role that appears in the root.json
type TUFRootData struct {
Keys data.KeyList `json:"keys"`
RoleName data.RoleName `json:"role"`
}
// NewTUFChange initializes a TUFChange object
func NewTUFChange(action string, role data.RoleName, changeType, changePath string, content []byte) *TUFChange {
return &TUFChange{
Actn: action,
Role: role,
ChangeType: changeType,
ChangePath: changePath,
Data: content,
}
}
// Action return c.Actn
func (c TUFChange) Action() string {
return c.Actn
}
// Scope returns c.Role
func (c TUFChange) Scope() data.RoleName {
return c.Role
}
// Type returns c.ChangeType
func (c TUFChange) Type() string {
return c.ChangeType
}
// Path return c.ChangePath
func (c TUFChange) Path() string {
return c.ChangePath
}
// Content returns c.Data
func (c TUFChange) Content() []byte {
return c.Data
}
// TUFDelegation represents a modification to a target delegation
// this includes creating a delegations. This format is used to avoid
// unexpected race conditions between humans modifying the same delegation
type TUFDelegation struct {
NewName data.RoleName `json:"new_name,omitempty"`
NewThreshold int `json:"threshold,omitempty"`
AddKeys data.KeyList `json:"add_keys,omitempty"`
RemoveKeys []string `json:"remove_keys,omitempty"`
AddPaths []string `json:"add_paths,omitempty"`
RemovePaths []string `json:"remove_paths,omitempty"`
ClearAllPaths bool `json:"clear_paths,omitempty"`
}
// ToNewRole creates a fresh role object from the TUFDelegation data
func (td TUFDelegation) ToNewRole(scope data.RoleName) (*data.Role, error) {
name := scope
if td.NewName != "" {
name = td.NewName
}
return data.NewRole(name, td.NewThreshold, td.AddKeys.IDs(), td.AddPaths)
}

View File

@ -1,82 +0,0 @@
package changelist
// memChangeList implements a simple in memory change list.
type memChangelist struct {
changes []Change
}
// NewMemChangelist instantiates a new in-memory changelist
func NewMemChangelist() Changelist {
return &memChangelist{}
}
// List returns a list of Changes
func (cl memChangelist) List() []Change {
return cl.changes
}
// Add adds a change to the in-memory change list
func (cl *memChangelist) Add(c Change) error {
cl.changes = append(cl.changes, c)
return nil
}
// Location returns the string "memory"
func (cl memChangelist) Location() string {
return "memory"
}
// Remove deletes the changes found at the given indices
func (cl *memChangelist) Remove(idxs []int) error {
remove := make(map[int]struct{})
for _, i := range idxs {
remove[i] = struct{}{}
}
var keep []Change
for i, c := range cl.changes {
if _, ok := remove[i]; ok {
continue
}
keep = append(keep, c)
}
cl.changes = keep
return nil
}
// Clear empties the changelist file.
func (cl *memChangelist) Clear(archive string) error {
// appending to a nil list initializes it.
cl.changes = nil
return nil
}
// Close is a no-op in this in-memory change-list
func (cl *memChangelist) Close() error {
return nil
}
func (cl *memChangelist) NewIterator() (ChangeIterator, error) {
return &MemChangeListIterator{index: 0, collection: cl.changes}, nil
}
// MemChangeListIterator is a concrete instance of ChangeIterator
type MemChangeListIterator struct {
index int
collection []Change // Same type as memChangeList.changes
}
// Next returns the next Change
func (m *MemChangeListIterator) Next() (item Change, err error) {
if m.index >= len(m.collection) {
return nil, IteratorBoundsError(m.index)
}
item = m.collection[m.index]
m.index++
return item, err
}
// HasNext indicates whether the iterator is exhausted
func (m *MemChangeListIterator) HasNext() bool {
return m.index < len(m.collection)
}

View File

@ -1,208 +0,0 @@
package changelist
import (
"encoding/json"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"sort"
"time"
"github.com/docker/distribution/uuid"
"github.com/sirupsen/logrus"
)
// FileChangelist stores all the changes as files
type FileChangelist struct {
dir string
}
// NewFileChangelist is a convenience method for returning FileChangeLists
func NewFileChangelist(dir string) (*FileChangelist, error) {
logrus.Debug("Making dir path: ", dir)
err := os.MkdirAll(dir, 0700)
if err != nil {
return nil, err
}
return &FileChangelist{dir: dir}, nil
}
// getFileNames reads directory, filtering out child directories
func getFileNames(dirName string) ([]os.FileInfo, error) {
var dirListing, fileInfos []os.FileInfo
dir, err := os.Open(dirName)
if err != nil {
return fileInfos, err
}
defer func() {
_ = dir.Close()
}()
dirListing, err = dir.Readdir(0)
if err != nil {
return fileInfos, err
}
for _, f := range dirListing {
if f.IsDir() {
continue
}
fileInfos = append(fileInfos, f)
}
sort.Sort(fileChanges(fileInfos))
return fileInfos, nil
}
// Read a JSON formatted file from disk; convert to TUFChange struct
func unmarshalFile(dirname string, f os.FileInfo) (*TUFChange, error) {
c := &TUFChange{}
raw, err := ioutil.ReadFile(filepath.Join(dirname, f.Name()))
if err != nil {
return c, err
}
err = json.Unmarshal(raw, c)
if err != nil {
return c, err
}
return c, nil
}
// List returns a list of sorted changes
func (cl FileChangelist) List() []Change {
var changes []Change
fileInfos, err := getFileNames(cl.dir)
if err != nil {
return changes
}
for _, f := range fileInfos {
c, err := unmarshalFile(cl.dir, f)
if err != nil {
logrus.Warn(err.Error())
continue
}
changes = append(changes, c)
}
return changes
}
// Add adds a change to the file change list
func (cl FileChangelist) Add(c Change) error {
cJSON, err := json.Marshal(c)
if err != nil {
return err
}
filename := fmt.Sprintf("%020d_%s.change", time.Now().UnixNano(), uuid.Generate())
return ioutil.WriteFile(filepath.Join(cl.dir, filename), cJSON, 0600)
}
// Remove deletes the changes found at the given indices
func (cl FileChangelist) Remove(idxs []int) error {
fileInfos, err := getFileNames(cl.dir)
if err != nil {
return err
}
remove := make(map[int]struct{})
for _, i := range idxs {
remove[i] = struct{}{}
}
for i, c := range fileInfos {
if _, ok := remove[i]; ok {
file := filepath.Join(cl.dir, c.Name())
if err := os.Remove(file); err != nil {
logrus.Errorf("could not remove change %d: %s", i, err.Error())
}
}
}
return nil
}
// Clear clears the change list
// N.B. archiving not currently implemented
func (cl FileChangelist) Clear(archive string) error {
dir, err := os.Open(cl.dir)
if err != nil {
return err
}
defer func() {
_ = dir.Close()
}()
files, err := dir.Readdir(0)
if err != nil {
return err
}
for _, f := range files {
os.Remove(filepath.Join(cl.dir, f.Name()))
}
return nil
}
// Close is a no-op
func (cl FileChangelist) Close() error {
// Nothing to do here
return nil
}
// Location returns the file path to the changelist
func (cl FileChangelist) Location() string {
return cl.dir
}
// NewIterator creates an iterator from FileChangelist
func (cl FileChangelist) NewIterator() (ChangeIterator, error) {
fileInfos, err := getFileNames(cl.dir)
if err != nil {
return &FileChangeListIterator{}, err
}
return &FileChangeListIterator{dirname: cl.dir, collection: fileInfos}, nil
}
// IteratorBoundsError is an Error type used by Next()
type IteratorBoundsError int
// Error implements the Error interface
func (e IteratorBoundsError) Error() string {
return fmt.Sprintf("Iterator index (%d) out of bounds", e)
}
// FileChangeListIterator is a concrete instance of ChangeIterator
type FileChangeListIterator struct {
index int
dirname string
collection []os.FileInfo
}
// Next returns the next Change in the FileChangeList
func (m *FileChangeListIterator) Next() (item Change, err error) {
if m.index >= len(m.collection) {
return nil, IteratorBoundsError(m.index)
}
f := m.collection[m.index]
m.index++
item, err = unmarshalFile(m.dirname, f)
return
}
// HasNext indicates whether iterator is exhausted
func (m *FileChangeListIterator) HasNext() bool {
return m.index < len(m.collection)
}
type fileChanges []os.FileInfo
// Len returns the length of a file change list
func (cs fileChanges) Len() int {
return len(cs)
}
// Less compares the names of two different file changes
func (cs fileChanges) Less(i, j int) bool {
return cs[i].Name() < cs[j].Name()
}
// Swap swaps the position of two file changes
func (cs fileChanges) Swap(i, j int) {
tmp := cs[i]
cs[i] = cs[j]
cs[j] = tmp
}

View File

@ -1,78 +0,0 @@
package changelist
import "github.com/theupdateframework/notary/tuf/data"
// Changelist is the interface for all TUF change lists
type Changelist interface {
// List returns the ordered list of changes
// currently stored
List() []Change
// Add change appends the provided change to
// the list of changes
Add(Change) error
// Clear empties the current change list.
// Archive may be provided as a directory path
// to save a copy of the changelist in that location
Clear(archive string) error
// Remove deletes the changes corresponding with the indices given
Remove(idxs []int) error
// Close synchronizes any pending writes to the underlying
// storage and closes the file/connection
Close() error
// NewIterator returns an iterator for walking through the list
// of changes currently stored
NewIterator() (ChangeIterator, error)
// Location returns the place the changelist is stores
Location() string
}
const (
// ActionCreate represents a Create action
ActionCreate = "create"
// ActionUpdate represents an Update action
ActionUpdate = "update"
// ActionDelete represents a Delete action
ActionDelete = "delete"
)
// Change is the interface for a TUF Change
type Change interface {
// "create","update", or "delete"
Action() string
// Where the change should be made.
// For TUF this will be the role
Scope() data.RoleName
// The content type being affected.
// For TUF this will be "target", or "delegation".
// If the type is "delegation", the Scope will be
// used to determine if a root role is being updated
// or a target delegation.
Type() string
// Path indicates the entry within a role to be affected by the
// change. For targets, this is simply the target's path,
// for delegations it's the delegated role name.
Path() string
// Serialized content that the interpreter of a changelist
// can use to apply the change.
// For TUF this will be the serialized JSON that needs
// to be inserted or merged. In the case of a "delete"
// action, it will be nil.
Content() []byte
}
// ChangeIterator is the interface for iterating across collections of
// TUF Change items
type ChangeIterator interface {
Next() (Change, error)
HasNext() bool
}

View File

@ -1,998 +0,0 @@
//Package client implements everything required for interacting with a Notary repository.
package client
import (
"bytes"
"encoding/json"
"fmt"
"io/ioutil"
"net/http"
"os"
"path/filepath"
"time"
canonicaljson "github.com/docker/go/canonical/json"
"github.com/sirupsen/logrus"
"github.com/theupdateframework/notary"
"github.com/theupdateframework/notary/client/changelist"
"github.com/theupdateframework/notary/cryptoservice"
store "github.com/theupdateframework/notary/storage"
"github.com/theupdateframework/notary/trustpinning"
"github.com/theupdateframework/notary/tuf"
"github.com/theupdateframework/notary/tuf/data"
"github.com/theupdateframework/notary/tuf/signed"
"github.com/theupdateframework/notary/tuf/utils"
)
const (
tufDir = "tuf"
// SignWithAllOldVersions is a sentinel constant for LegacyVersions flag
SignWithAllOldVersions = -1
)
func init() {
data.SetDefaultExpiryTimes(data.NotaryDefaultExpiries)
}
// repository stores all the information needed to operate on a notary repository.
type repository struct {
gun data.GUN
baseURL string
changelist changelist.Changelist
cache store.MetadataStore
remoteStore store.RemoteStore
cryptoService signed.CryptoService
tufRepo *tuf.Repo
invalid *tuf.Repo // known data that was parsable but deemed invalid
roundTrip http.RoundTripper
trustPinning trustpinning.TrustPinConfig
LegacyVersions int // number of versions back to fetch roots to sign with
}
// NewFileCachedRepository is a wrapper for NewRepository that initializes
// a file cache from the provided repository, local config information and a crypto service.
// It also retrieves the remote store associated to the base directory under where all the
// trust files will be stored (This is normally defaults to "~/.notary" or "~/.docker/trust"
// when enabling Docker content trust) and the specified GUN.
//
// In case of a nil RoundTripper, a default offline store is used instead.
func NewFileCachedRepository(baseDir string, gun data.GUN, baseURL string, rt http.RoundTripper,
retriever notary.PassRetriever, trustPinning trustpinning.TrustPinConfig) (Repository, error) {
cache, err := store.NewFileStore(
filepath.Join(baseDir, tufDir, filepath.FromSlash(gun.String()), "metadata"),
"json",
)
if err != nil {
return nil, err
}
keyStores, err := getKeyStores(baseDir, retriever)
if err != nil {
return nil, err
}
cryptoService := cryptoservice.NewCryptoService(keyStores...)
remoteStore, err := getRemoteStore(baseURL, gun, rt)
if err != nil {
// baseURL is syntactically invalid
return nil, err
}
cl, err := changelist.NewFileChangelist(filepath.Join(
filepath.Join(baseDir, tufDir, filepath.FromSlash(gun.String()), "changelist"),
))
if err != nil {
return nil, err
}
return NewRepository(gun, baseURL, remoteStore, cache, trustPinning, cryptoService, cl)
}
// NewRepository is the base method that returns a new notary repository.
// It expects an initialized cache. In case of a nil remote store, a default
// offline store is used.
func NewRepository(gun data.GUN, baseURL string, remoteStore store.RemoteStore, cache store.MetadataStore,
trustPinning trustpinning.TrustPinConfig, cryptoService signed.CryptoService, cl changelist.Changelist) (Repository, error) {
// Repo's remote store is either a valid remote store or an OfflineStore
if remoteStore == nil {
remoteStore = store.OfflineStore{}
}
if cache == nil {
return nil, fmt.Errorf("got an invalid cache (nil metadata store)")
}
nRepo := &repository{
gun: gun,
baseURL: baseURL,
changelist: cl,
cache: cache,
remoteStore: remoteStore,
cryptoService: cryptoService,
trustPinning: trustPinning,
LegacyVersions: 0, // By default, don't sign with legacy roles
}
return nRepo, nil
}
// GetGUN is a getter for the GUN object from a Repository
func (r *repository) GetGUN() data.GUN {
return r.gun
}
func (r *repository) updateTUF(forWrite bool) error {
repo, invalid, err := LoadTUFRepo(TUFLoadOptions{
GUN: r.gun,
TrustPinning: r.trustPinning,
CryptoService: r.cryptoService,
Cache: r.cache,
RemoteStore: r.remoteStore,
AlwaysCheckInitialized: forWrite,
})
if err != nil {
return err
}
r.tufRepo = repo
r.invalid = invalid
return nil
}
// ListTargets calls update first before listing targets
func (r *repository) ListTargets(roles ...data.RoleName) ([]*TargetWithRole, error) {
if err := r.updateTUF(false); err != nil {
return nil, err
}
return NewReadOnly(r.tufRepo).ListTargets(roles...)
}
// GetTargetByName calls update first before getting target by name
func (r *repository) GetTargetByName(name string, roles ...data.RoleName) (*TargetWithRole, error) {
if err := r.updateTUF(false); err != nil {
return nil, err
}
return NewReadOnly(r.tufRepo).GetTargetByName(name, roles...)
}
// GetAllTargetMetadataByName calls update first before getting targets by name
func (r *repository) GetAllTargetMetadataByName(name string) ([]TargetSignedStruct, error) {
if err := r.updateTUF(false); err != nil {
return nil, err
}
return NewReadOnly(r.tufRepo).GetAllTargetMetadataByName(name)
}
// ListRoles calls update first before getting roles
func (r *repository) ListRoles() ([]RoleWithSignatures, error) {
if err := r.updateTUF(false); err != nil {
return nil, err
}
return NewReadOnly(r.tufRepo).ListRoles()
}
// GetDelegationRoles calls update first before getting all delegation roles
func (r *repository) GetDelegationRoles() ([]data.Role, error) {
if err := r.updateTUF(false); err != nil {
return nil, err
}
return NewReadOnly(r.tufRepo).GetDelegationRoles()
}
// NewTarget is a helper method that returns a Target
func NewTarget(targetName, targetPath string, targetCustom *canonicaljson.RawMessage) (*Target, error) {
b, err := ioutil.ReadFile(targetPath)
if err != nil {
return nil, err
}
meta, err := data.NewFileMeta(bytes.NewBuffer(b), data.NotaryDefaultHashes...)
if err != nil {
return nil, err
}
return &Target{Name: targetName, Hashes: meta.Hashes, Length: meta.Length, Custom: targetCustom}, nil
}
// rootCertKey generates the corresponding certificate for the private key given the privKey and repo's GUN
func rootCertKey(gun data.GUN, privKey data.PrivateKey) (data.PublicKey, error) {
// Hard-coded policy: the generated certificate expires in 10 years.
startTime := time.Now()
cert, err := cryptoservice.GenerateCertificate(
privKey, gun, startTime, startTime.Add(notary.Year*10))
if err != nil {
return nil, err
}
x509PublicKey := utils.CertToKey(cert)
if x509PublicKey == nil {
return nil, fmt.Errorf("cannot generate public key from private key with id: %v and algorithm: %v", privKey.ID(), privKey.Algorithm())
}
return x509PublicKey, nil
}
// GetCryptoService is the getter for the repository's CryptoService
func (r *repository) GetCryptoService() signed.CryptoService {
return r.cryptoService
}
// initialize initializes the notary repository with a set of rootkeys, root certificates and roles.
func (r *repository) initialize(rootKeyIDs []string, rootCerts []data.PublicKey, serverManagedRoles ...data.RoleName) error {
// currently we only support server managing timestamps and snapshots, and
// nothing else - timestamps are always managed by the server, and implicit
// (do not have to be passed in as part of `serverManagedRoles`, so that
// the API of Initialize doesn't change).
var serverManagesSnapshot bool
locallyManagedKeys := []data.RoleName{
data.CanonicalTargetsRole,
data.CanonicalSnapshotRole,
// root is also locally managed, but that should have been created
// already
}
remotelyManagedKeys := []data.RoleName{data.CanonicalTimestampRole}
for _, role := range serverManagedRoles {
switch role {
case data.CanonicalTimestampRole:
continue // timestamp is already in the right place
case data.CanonicalSnapshotRole:
// because we put Snapshot last
locallyManagedKeys = []data.RoleName{data.CanonicalTargetsRole}
remotelyManagedKeys = append(
remotelyManagedKeys, data.CanonicalSnapshotRole)
serverManagesSnapshot = true
default:
return ErrInvalidRemoteRole{Role: role}
}
}
// gets valid public keys corresponding to the rootKeyIDs or generate if necessary
var publicKeys []data.PublicKey
var err error
if len(rootCerts) == 0 {
publicKeys, err = r.createNewPublicKeyFromKeyIDs(rootKeyIDs)
} else {
publicKeys, err = r.publicKeysOfKeyIDs(rootKeyIDs, rootCerts)
}
if err != nil {
return err
}
//initialize repo with public keys
rootRole, targetsRole, snapshotRole, timestampRole, err := r.initializeRoles(
publicKeys,
locallyManagedKeys,
remotelyManagedKeys,
)
if err != nil {
return err
}
r.tufRepo = tuf.NewRepo(r.GetCryptoService())
if err := r.tufRepo.InitRoot(
rootRole,
timestampRole,
snapshotRole,
targetsRole,
false,
); err != nil {
logrus.Debug("Error on InitRoot: ", err.Error())
return err
}
if _, err := r.tufRepo.InitTargets(data.CanonicalTargetsRole); err != nil {
logrus.Debug("Error on InitTargets: ", err.Error())
return err
}
if err := r.tufRepo.InitSnapshot(); err != nil {
logrus.Debug("Error on InitSnapshot: ", err.Error())
return err
}
return r.saveMetadata(serverManagesSnapshot)
}
// createNewPublicKeyFromKeyIDs generates a set of public keys corresponding to the given list of
// key IDs existing in the repository's CryptoService.
// the public keys returned are ordered to correspond to the keyIDs
func (r *repository) createNewPublicKeyFromKeyIDs(keyIDs []string) ([]data.PublicKey, error) {
publicKeys := []data.PublicKey{}
privKeys, err := getAllPrivKeys(keyIDs, r.GetCryptoService())
if err != nil {
return nil, err
}
for _, privKey := range privKeys {
rootKey, err := rootCertKey(r.gun, privKey)
if err != nil {
return nil, err
}
publicKeys = append(publicKeys, rootKey)
}
return publicKeys, nil
}
// publicKeysOfKeyIDs confirms that the public key and private keys (by Key IDs) forms valid, strictly ordered key pairs
// (eg. keyIDs[0] must match pubKeys[0] and keyIDs[1] must match certs[1] and so on).
// Or throw error when they mismatch.
func (r *repository) publicKeysOfKeyIDs(keyIDs []string, pubKeys []data.PublicKey) ([]data.PublicKey, error) {
if len(keyIDs) != len(pubKeys) {
err := fmt.Errorf("require matching number of keyIDs and public keys but got %d IDs and %d public keys", len(keyIDs), len(pubKeys))
return nil, err
}
if err := matchKeyIdsWithPubKeys(r, keyIDs, pubKeys); err != nil {
return nil, fmt.Errorf("could not obtain public key from IDs: %v", err)
}
return pubKeys, nil
}
// matchKeyIdsWithPubKeys validates that the private keys (represented by their IDs) and the public keys
// forms matching key pairs
func matchKeyIdsWithPubKeys(r *repository, ids []string, pubKeys []data.PublicKey) error {
for i := 0; i < len(ids); i++ {
privKey, _, err := r.GetCryptoService().GetPrivateKey(ids[i])
if err != nil {
return fmt.Errorf("could not get the private key matching id %v: %v", ids[i], err)
}
pubKey := pubKeys[i]
err = signed.VerifyPublicKeyMatchesPrivateKey(privKey, pubKey)
if err != nil {
return err
}
}
return nil
}
// Initialize creates a new repository by using rootKey as the root Key for the
// TUF repository. The server must be reachable (and is asked to generate a
// timestamp key and possibly other serverManagedRoles), but the created repository
// result is only stored on local disk, not published to the server. To do that,
// use r.Publish() eventually.
func (r *repository) Initialize(rootKeyIDs []string, serverManagedRoles ...data.RoleName) error {
return r.initialize(rootKeyIDs, nil, serverManagedRoles...)
}
type errKeyNotFound struct{}
func (errKeyNotFound) Error() string {
return fmt.Sprintf("cannot find matching private key id")
}
// keyExistsInList returns the id of the private key in ids that matches the public key
// otherwise return empty string
func keyExistsInList(cert data.PublicKey, ids map[string]bool) error {
pubKeyID, err := utils.CanonicalKeyID(cert)
if err != nil {
return fmt.Errorf("failed to obtain the public key id from the given certificate: %v", err)
}
if _, ok := ids[pubKeyID]; ok {
return nil
}
return errKeyNotFound{}
}
// InitializeWithCertificate initializes the repository with root keys and their corresponding certificates
func (r *repository) InitializeWithCertificate(rootKeyIDs []string, rootCerts []data.PublicKey,
serverManagedRoles ...data.RoleName) error {
// If we explicitly pass in certificate(s) but not key, then look keys up using certificate
if len(rootKeyIDs) == 0 && len(rootCerts) != 0 {
rootKeyIDs = []string{}
availableRootKeyIDs := make(map[string]bool)
for _, k := range r.GetCryptoService().ListKeys(data.CanonicalRootRole) {
availableRootKeyIDs[k] = true
}
for _, cert := range rootCerts {
if err := keyExistsInList(cert, availableRootKeyIDs); err != nil {
return fmt.Errorf("error initializing repository with certificate: %v", err)
}
keyID, _ := utils.CanonicalKeyID(cert)
rootKeyIDs = append(rootKeyIDs, keyID)
}
}
return r.initialize(rootKeyIDs, rootCerts, serverManagedRoles...)
}
func (r *repository) initializeRoles(rootKeys []data.PublicKey, localRoles, remoteRoles []data.RoleName) (
root, targets, snapshot, timestamp data.BaseRole, err error) {
root = data.NewBaseRole(
data.CanonicalRootRole,
notary.MinThreshold,
rootKeys...,
)
// we want to create all the local keys first so we don't have to
// make unnecessary network calls
for _, role := range localRoles {
// This is currently hardcoding the keys to ECDSA.
var key data.PublicKey
key, err = r.GetCryptoService().Create(role, r.gun, data.ECDSAKey)
if err != nil {
return
}
switch role {
case data.CanonicalSnapshotRole:
snapshot = data.NewBaseRole(
role,
notary.MinThreshold,
key,
)
case data.CanonicalTargetsRole:
targets = data.NewBaseRole(
role,
notary.MinThreshold,
key,
)
}
}
remote := r.getRemoteStore()
for _, role := range remoteRoles {
// This key is generated by the remote server.
var key data.PublicKey
key, err = getRemoteKey(role, remote)
if err != nil {
return
}
logrus.Debugf("got remote %s %s key with keyID: %s",
role, key.Algorithm(), key.ID())
switch role {
case data.CanonicalSnapshotRole:
snapshot = data.NewBaseRole(
role,
notary.MinThreshold,
key,
)
case data.CanonicalTimestampRole:
timestamp = data.NewBaseRole(
role,
notary.MinThreshold,
key,
)
}
}
return root, targets, snapshot, timestamp, nil
}
// adds a TUF Change template to the given roles
func addChange(cl changelist.Changelist, c changelist.Change, roles ...data.RoleName) error {
if len(roles) == 0 {
roles = []data.RoleName{data.CanonicalTargetsRole}
}
var changes []changelist.Change
for _, role := range roles {
// Ensure we can only add targets to the CanonicalTargetsRole,
// or a Delegation role (which is <CanonicalTargetsRole>/something else)
if role != data.CanonicalTargetsRole && !data.IsDelegation(role) && !data.IsWildDelegation(role) {
return data.ErrInvalidRole{
Role: role,
Reason: "cannot add targets to this role",
}
}
changes = append(changes, changelist.NewTUFChange(
c.Action(),
role,
c.Type(),
c.Path(),
c.Content(),
))
}
for _, c := range changes {
if err := cl.Add(c); err != nil {
return err
}
}
return nil
}
// AddTarget creates new changelist entries to add a target to the given roles
// in the repository when the changelist gets applied at publish time.
// If roles are unspecified, the default role is "targets"
func (r *repository) AddTarget(target *Target, roles ...data.RoleName) error {
if len(target.Hashes) == 0 {
return fmt.Errorf("no hashes specified for target \"%s\"", target.Name)
}
logrus.Debugf("Adding target \"%s\" with sha256 \"%x\" and size %d bytes.\n", target.Name, target.Hashes["sha256"], target.Length)
meta := data.FileMeta{Length: target.Length, Hashes: target.Hashes, Custom: target.Custom}
metaJSON, err := json.Marshal(meta)
if err != nil {
return err
}
template := changelist.NewTUFChange(
changelist.ActionCreate, "", changelist.TypeTargetsTarget,
target.Name, metaJSON)
return addChange(r.changelist, template, roles...)
}
// RemoveTarget creates new changelist entries to remove a target from the given
// roles in the repository when the changelist gets applied at publish time.
// If roles are unspecified, the default role is "target".
func (r *repository) RemoveTarget(targetName string, roles ...data.RoleName) error {
logrus.Debugf("Removing target \"%s\"", targetName)
template := changelist.NewTUFChange(changelist.ActionDelete, "",
changelist.TypeTargetsTarget, targetName, nil)
return addChange(r.changelist, template, roles...)
}
// GetChangelist returns the list of the repository's unpublished changes
func (r *repository) GetChangelist() (changelist.Changelist, error) {
return r.changelist, nil
}
// getRemoteStore returns the remoteStore of a repository if valid or
// or an OfflineStore otherwise
func (r *repository) getRemoteStore() store.RemoteStore {
if r.remoteStore != nil {
return r.remoteStore
}
r.remoteStore = &store.OfflineStore{}
return r.remoteStore
}
// Publish pushes the local changes in signed material to the remote notary-server
// Conceptually it performs an operation similar to a `git rebase`
func (r *repository) Publish() error {
if err := r.publish(r.changelist); err != nil {
return err
}
if err := r.changelist.Clear(""); err != nil {
// This is not a critical problem when only a single host is pushing
// but will cause weird behaviour if changelist cleanup is failing
// and there are multiple hosts writing to the repo.
logrus.Warn("Unable to clear changelist. You may want to manually delete the folder ", r.changelist.Location())
}
return nil
}
// publish pushes the changes in the given changelist to the remote notary-server
// Conceptually it performs an operation similar to a `git rebase`
func (r *repository) publish(cl changelist.Changelist) error {
var initialPublish bool
// update first before publishing
if err := r.updateTUF(true); err != nil {
// If the remote is not aware of the repo, then this is being published
// for the first time. Try to initialize the repository before publishing.
if _, ok := err.(ErrRepositoryNotExist); ok {
err := r.bootstrapRepo()
if _, ok := err.(store.ErrMetaNotFound); ok {
logrus.Infof("No TUF data found locally or remotely - initializing repository %s for the first time", r.gun.String())
err = r.Initialize(nil)
}
if err != nil {
logrus.WithError(err).Debugf("Unable to load or initialize repository during first publish: %s", err.Error())
return err
}
// Ensure we will push the initial root and targets file. Either or
// both of the root and targets may not be marked as Dirty, since
// there may not be any changes that update them, so use a
// different boolean.
initialPublish = true
} else {
// We could not update, so we cannot publish.
logrus.Error("Could not publish Repository since we could not update: ", err.Error())
return err
}
}
// apply the changelist to the repo
if err := applyChangelist(r.tufRepo, r.invalid, cl); err != nil {
logrus.Debug("Error applying changelist")
return err
}
// these are the TUF files we will need to update, serialized as JSON before
// we send anything to remote
updatedFiles := make(map[data.RoleName][]byte)
// Fetch old keys to support old clients
legacyKeys, err := r.oldKeysForLegacyClientSupport(r.LegacyVersions, initialPublish)
if err != nil {
return err
}
// check if our root file is nearing expiry or dirty. Resign if it is. If
// root is not dirty but we are publishing for the first time, then just
// publish the existing root we have.
if err := signRootIfNecessary(updatedFiles, r.tufRepo, legacyKeys, initialPublish); err != nil {
return err
}
if err := signTargets(updatedFiles, r.tufRepo, initialPublish); err != nil {
return err
}
// if we initialized the repo while designating the server as the snapshot
// signer, then there won't be a snapshots file. However, we might now
// have a local key (if there was a rotation), so initialize one.
if r.tufRepo.Snapshot == nil {
if err := r.tufRepo.InitSnapshot(); err != nil {
return err
}
}
if snapshotJSON, err := serializeCanonicalRole(
r.tufRepo, data.CanonicalSnapshotRole, nil); err == nil {
// Only update the snapshot if we've successfully signed it.
updatedFiles[data.CanonicalSnapshotRole] = snapshotJSON
} else if signErr, ok := err.(signed.ErrInsufficientSignatures); ok && signErr.FoundKeys == 0 {
// If signing fails due to us not having the snapshot key, then
// assume the server is going to sign, and do not include any snapshot
// data.
logrus.Debugf("Client does not have the key to sign snapshot. " +
"Assuming that server should sign the snapshot.")
} else {
logrus.Debugf("Client was unable to sign the snapshot: %s", err.Error())
return err
}
remote := r.getRemoteStore()
return remote.SetMulti(data.MetadataRoleMapToStringMap(updatedFiles))
}
func signRootIfNecessary(updates map[data.RoleName][]byte, repo *tuf.Repo, extraSigningKeys data.KeyList, initialPublish bool) error {
if len(extraSigningKeys) > 0 {
repo.Root.Dirty = true
}
if nearExpiry(repo.Root.Signed.SignedCommon) || repo.Root.Dirty {
rootJSON, err := serializeCanonicalRole(repo, data.CanonicalRootRole, extraSigningKeys)
if err != nil {
return err
}
updates[data.CanonicalRootRole] = rootJSON
} else if initialPublish {
rootJSON, err := repo.Root.MarshalJSON()
if err != nil {
return err
}
updates[data.CanonicalRootRole] = rootJSON
}
return nil
}
// Fetch back a `legacyVersions` number of roots files, collect the root public keys
// This includes old `root` roles as well as legacy versioned root roles, e.g. `1.root`
func (r *repository) oldKeysForLegacyClientSupport(legacyVersions int, initialPublish bool) (data.KeyList, error) {
if initialPublish {
return nil, nil
}
var oldestVersion int
prevVersion := r.tufRepo.Root.Signed.Version
if legacyVersions == SignWithAllOldVersions {
oldestVersion = 1
} else {
oldestVersion = r.tufRepo.Root.Signed.Version - legacyVersions
}
if oldestVersion < 1 {
oldestVersion = 1
}
if prevVersion <= 1 || oldestVersion == prevVersion {
return nil, nil
}
oldKeys := make(map[string]data.PublicKey)
c, err := bootstrapClient(TUFLoadOptions{
GUN: r.gun,
TrustPinning: r.trustPinning,
CryptoService: r.cryptoService,
Cache: r.cache,
RemoteStore: r.remoteStore,
AlwaysCheckInitialized: true,
})
// require a server connection to fetch old roots
if err != nil {
return nil, err
}
for v := prevVersion; v >= oldestVersion; v-- {
logrus.Debugf("fetching old keys from version %d", v)
// fetch old root version
versionedRole := fmt.Sprintf("%d.%s", v, data.CanonicalRootRole.String())
raw, err := c.remote.GetSized(versionedRole, -1)
if err != nil {
logrus.Debugf("error downloading %s: %s", versionedRole, err)
continue
}
signedOldRoot := &data.Signed{}
if err := json.Unmarshal(raw, signedOldRoot); err != nil {
return nil, err
}
oldRootVersion, err := data.RootFromSigned(signedOldRoot)
if err != nil {
return nil, err
}
// extract legacy versioned root keys
oldRootVersionKeys := getOldRootPublicKeys(oldRootVersion)
for _, oldKey := range oldRootVersionKeys {
oldKeys[oldKey.ID()] = oldKey
}
}
oldKeyList := make(data.KeyList, 0, len(oldKeys))
for _, key := range oldKeys {
oldKeyList = append(oldKeyList, key)
}
return oldKeyList, nil
}
// get all the saved previous roles keys < the current root version
func getOldRootPublicKeys(root *data.SignedRoot) data.KeyList {
rootRole, err := root.BuildBaseRole(data.CanonicalRootRole)
if err != nil {
return nil
}
return rootRole.ListKeys()
}
func signTargets(updates map[data.RoleName][]byte, repo *tuf.Repo, initialPublish bool) error {
// iterate through all the targets files - if they are dirty, sign and update
for roleName, roleObj := range repo.Targets {
if roleObj.Dirty || (roleName == data.CanonicalTargetsRole && initialPublish) {
targetsJSON, err := serializeCanonicalRole(repo, roleName, nil)
if err != nil {
return err
}
updates[roleName] = targetsJSON
}
}
return nil
}
// bootstrapRepo loads the repository from the local file system (i.e.
// a not yet published repo or a possibly obsolete local copy) into
// r.tufRepo. This attempts to load metadata for all roles. Since server
// snapshots are supported, if the snapshot metadata fails to load, that's ok.
// This assumes that bootstrapRepo is only used by Publish() or RotateKey()
func (r *repository) bootstrapRepo() error {
b := tuf.NewRepoBuilder(r.gun, r.GetCryptoService(), r.trustPinning)
logrus.Debugf("Loading trusted collection.")
for _, role := range data.BaseRoles {
jsonBytes, err := r.cache.GetSized(role.String(), store.NoSizeLimit)
if err != nil {
if _, ok := err.(store.ErrMetaNotFound); ok &&
// server snapshots are supported, and server timestamp management
// is required, so if either of these fail to load that's ok - especially
// if the repo is new
role == data.CanonicalSnapshotRole || role == data.CanonicalTimestampRole {
continue
}
return err
}
if err := b.Load(role, jsonBytes, 1, true); err != nil {
return err
}
}
tufRepo, _, err := b.Finish()
if err == nil {
r.tufRepo = tufRepo
}
return nil
}
// saveMetadata saves contents of r.tufRepo onto the local disk, creating
// signatures as necessary, possibly prompting for passphrases.
func (r *repository) saveMetadata(ignoreSnapshot bool) error {
logrus.Debugf("Saving changes to Trusted Collection.")
rootJSON, err := serializeCanonicalRole(r.tufRepo, data.CanonicalRootRole, nil)
if err != nil {
return err
}
err = r.cache.Set(data.CanonicalRootRole.String(), rootJSON)
if err != nil {
return err
}
targetsToSave := make(map[data.RoleName][]byte)
for t := range r.tufRepo.Targets {
signedTargets, err := r.tufRepo.SignTargets(t, data.DefaultExpires(data.CanonicalTargetsRole))
if err != nil {
return err
}
targetsJSON, err := json.Marshal(signedTargets)
if err != nil {
return err
}
targetsToSave[t] = targetsJSON
}
for role, blob := range targetsToSave {
// If the parent directory does not exist, the cache.Set will create it
r.cache.Set(role.String(), blob)
}
if ignoreSnapshot {
return nil
}
snapshotJSON, err := serializeCanonicalRole(r.tufRepo, data.CanonicalSnapshotRole, nil)
if err != nil {
return err
}
return r.cache.Set(data.CanonicalSnapshotRole.String(), snapshotJSON)
}
// RotateKey removes all existing keys associated with the role. If no keys are
// specified in keyList, then this creates and adds one new key or delegates
// managing the key to the server. If key(s) are specified by keyList, then they are
// used for signing the role.
// These changes are staged in a changelist until publish is called.
func (r *repository) RotateKey(role data.RoleName, serverManagesKey bool, keyList []string) error {
if err := checkRotationInput(role, serverManagesKey); err != nil {
return err
}
pubKeyList, err := r.pubKeyListForRotation(role, serverManagesKey, keyList)
if err != nil {
return err
}
cl := changelist.NewMemChangelist()
if err := r.rootFileKeyChange(cl, role, changelist.ActionCreate, pubKeyList); err != nil {
return err
}
return r.publish(cl)
}
// Given a set of new keys to rotate to and a set of keys to drop, returns the list of current keys to use
func (r *repository) pubKeyListForRotation(role data.RoleName, serverManaged bool, newKeys []string) (pubKeyList data.KeyList, err error) {
var pubKey data.PublicKey
// If server manages the key being rotated, request a rotation and return the new key
if serverManaged {
remote := r.getRemoteStore()
pubKey, err = rotateRemoteKey(role, remote)
pubKeyList = make(data.KeyList, 0, 1)
pubKeyList = append(pubKeyList, pubKey)
if err != nil {
return nil, fmt.Errorf("unable to rotate remote key: %s", err)
}
return pubKeyList, nil
}
// If no new keys are passed in, we generate one
if len(newKeys) == 0 {
pubKeyList = make(data.KeyList, 0, 1)
pubKey, err = r.GetCryptoService().Create(role, r.gun, data.ECDSAKey)
pubKeyList = append(pubKeyList, pubKey)
}
if err != nil {
return nil, fmt.Errorf("unable to generate key: %s", err)
}
// If a list of keys to rotate to are provided, we add those
if len(newKeys) > 0 {
pubKeyList = make(data.KeyList, 0, len(newKeys))
for _, keyID := range newKeys {
pubKey = r.GetCryptoService().GetKey(keyID)
if pubKey == nil {
return nil, fmt.Errorf("unable to find key: %s", keyID)
}
pubKeyList = append(pubKeyList, pubKey)
}
}
// Convert to certs (for root keys)
if pubKeyList, err = r.pubKeysToCerts(role, pubKeyList); err != nil {
return nil, err
}
return pubKeyList, nil
}
func (r *repository) pubKeysToCerts(role data.RoleName, pubKeyList data.KeyList) (data.KeyList, error) {
// only generate certs for root keys
if role != data.CanonicalRootRole {
return pubKeyList, nil
}
for i, pubKey := range pubKeyList {
privKey, loadedRole, err := r.GetCryptoService().GetPrivateKey(pubKey.ID())
if err != nil {
return nil, err
}
if loadedRole != role {
return nil, fmt.Errorf("attempted to load root key but given %s key instead", loadedRole)
}
pubKey, err = rootCertKey(r.gun, privKey)
if err != nil {
return nil, err
}
pubKeyList[i] = pubKey
}
return pubKeyList, nil
}
func checkRotationInput(role data.RoleName, serverManaged bool) error {
// We currently support remotely managing timestamp and snapshot keys
canBeRemoteKey := role == data.CanonicalTimestampRole || role == data.CanonicalSnapshotRole
// And locally managing root, targets, and snapshot keys
canBeLocalKey := role == data.CanonicalSnapshotRole || role == data.CanonicalTargetsRole ||
role == data.CanonicalRootRole
switch {
case !data.ValidRole(role) || data.IsDelegation(role):
return fmt.Errorf("notary does not currently permit rotating the %s key", role)
case serverManaged && !canBeRemoteKey:
return ErrInvalidRemoteRole{Role: role}
case !serverManaged && !canBeLocalKey:
return ErrInvalidLocalRole{Role: role}
}
return nil
}
func (r *repository) rootFileKeyChange(cl changelist.Changelist, role data.RoleName, action string, keyList []data.PublicKey) error {
meta := changelist.TUFRootData{
RoleName: role,
Keys: keyList,
}
metaJSON, err := json.Marshal(meta)
if err != nil {
return err
}
c := changelist.NewTUFChange(
action,
changelist.ScopeRoot,
changelist.TypeBaseRole,
role.String(),
metaJSON,
)
return cl.Add(c)
}
// DeleteTrustData removes the trust data stored for this repo in the TUF cache on the client side
// Note that we will not delete any private key material from local storage
func DeleteTrustData(baseDir string, gun data.GUN, URL string, rt http.RoundTripper, deleteRemote bool) error {
localRepo := filepath.Join(baseDir, tufDir, filepath.FromSlash(gun.String()))
// Remove the tufRepoPath directory, which includes local TUF metadata files and changelist information
if err := os.RemoveAll(localRepo); err != nil {
return fmt.Errorf("error clearing TUF repo data: %v", err)
}
// Note that this will require admin permission for the gun in the roundtripper
if deleteRemote {
remote, err := getRemoteStore(URL, gun, rt)
if err != nil {
logrus.Errorf("unable to instantiate a remote store: %v", err)
return err
}
if err := remote.RemoveAll(); err != nil {
return err
}
}
return nil
}
// SetLegacyVersions allows the number of legacy versions of the root
// to be inspected for old signing keys to be configured.
func (r *repository) SetLegacyVersions(n int) {
r.LegacyVersions = n
}

View File

@ -1,226 +0,0 @@
package client
import (
"encoding/json"
"fmt"
"github.com/sirupsen/logrus"
"github.com/theupdateframework/notary"
"github.com/theupdateframework/notary/client/changelist"
"github.com/theupdateframework/notary/tuf/data"
"github.com/theupdateframework/notary/tuf/utils"
)
// AddDelegation creates changelist entries to add provided delegation public keys and paths.
// This method composes AddDelegationRoleAndKeys and AddDelegationPaths (each creates one changelist if called).
func (r *repository) AddDelegation(name data.RoleName, delegationKeys []data.PublicKey, paths []string) error {
if len(delegationKeys) > 0 {
err := r.AddDelegationRoleAndKeys(name, delegationKeys)
if err != nil {
return err
}
}
if len(paths) > 0 {
err := r.AddDelegationPaths(name, paths)
if err != nil {
return err
}
}
return nil
}
// AddDelegationRoleAndKeys creates a changelist entry to add provided delegation public keys.
// This method is the simplest way to create a new delegation, because the delegation must have at least
// one key upon creation to be valid since we will reject the changelist while validating the threshold.
func (r *repository) AddDelegationRoleAndKeys(name data.RoleName, delegationKeys []data.PublicKey) error {
if !data.IsDelegation(name) {
return data.ErrInvalidRole{Role: name, Reason: "invalid delegation role name"}
}
logrus.Debugf(`Adding delegation "%s" with threshold %d, and %d keys\n`,
name, notary.MinThreshold, len(delegationKeys))
// Defaulting to threshold of 1, since we don't allow for larger thresholds at the moment.
tdJSON, err := json.Marshal(&changelist.TUFDelegation{
NewThreshold: notary.MinThreshold,
AddKeys: data.KeyList(delegationKeys),
})
if err != nil {
return err
}
template := newCreateDelegationChange(name, tdJSON)
return addChange(r.changelist, template, name)
}
// AddDelegationPaths creates a changelist entry to add provided paths to an existing delegation.
// This method cannot create a new delegation itself because the role must meet the key threshold upon creation.
func (r *repository) AddDelegationPaths(name data.RoleName, paths []string) error {
if !data.IsDelegation(name) {
return data.ErrInvalidRole{Role: name, Reason: "invalid delegation role name"}
}
logrus.Debugf(`Adding %s paths to delegation %s\n`, paths, name)
tdJSON, err := json.Marshal(&changelist.TUFDelegation{
AddPaths: paths,
})
if err != nil {
return err
}
template := newCreateDelegationChange(name, tdJSON)
return addChange(r.changelist, template, name)
}
// RemoveDelegationKeysAndPaths creates changelist entries to remove provided delegation key IDs and paths.
// This method composes RemoveDelegationPaths and RemoveDelegationKeys (each creates one changelist entry if called).
func (r *repository) RemoveDelegationKeysAndPaths(name data.RoleName, keyIDs, paths []string) error {
if len(paths) > 0 {
err := r.RemoveDelegationPaths(name, paths)
if err != nil {
return err
}
}
if len(keyIDs) > 0 {
err := r.RemoveDelegationKeys(name, keyIDs)
if err != nil {
return err
}
}
return nil
}
// RemoveDelegationRole creates a changelist to remove all paths and keys from a role, and delete the role in its entirety.
func (r *repository) RemoveDelegationRole(name data.RoleName) error {
if !data.IsDelegation(name) {
return data.ErrInvalidRole{Role: name, Reason: "invalid delegation role name"}
}
logrus.Debugf(`Removing delegation "%s"\n`, name)
template := newDeleteDelegationChange(name, nil)
return addChange(r.changelist, template, name)
}
// RemoveDelegationPaths creates a changelist entry to remove provided paths from an existing delegation.
func (r *repository) RemoveDelegationPaths(name data.RoleName, paths []string) error {
if !data.IsDelegation(name) {
return data.ErrInvalidRole{Role: name, Reason: "invalid delegation role name"}
}
logrus.Debugf(`Removing %s paths from delegation "%s"\n`, paths, name)
tdJSON, err := json.Marshal(&changelist.TUFDelegation{
RemovePaths: paths,
})
if err != nil {
return err
}
template := newUpdateDelegationChange(name, tdJSON)
return addChange(r.changelist, template, name)
}
// RemoveDelegationKeys creates a changelist entry to remove provided keys from an existing delegation.
// When this changelist is applied, if the specified keys are the only keys left in the role,
// the role itself will be deleted in its entirety.
// It can also delete a key from all delegations under a parent using a name
// with a wildcard at the end.
func (r *repository) RemoveDelegationKeys(name data.RoleName, keyIDs []string) error {
if !data.IsDelegation(name) && !data.IsWildDelegation(name) {
return data.ErrInvalidRole{Role: name, Reason: "invalid delegation role name"}
}
logrus.Debugf(`Removing %s keys from delegation "%s"\n`, keyIDs, name)
tdJSON, err := json.Marshal(&changelist.TUFDelegation{
RemoveKeys: keyIDs,
})
if err != nil {
return err
}
template := newUpdateDelegationChange(name, tdJSON)
return addChange(r.changelist, template, name)
}
// ClearDelegationPaths creates a changelist entry to remove all paths from an existing delegation.
func (r *repository) ClearDelegationPaths(name data.RoleName) error {
if !data.IsDelegation(name) {
return data.ErrInvalidRole{Role: name, Reason: "invalid delegation role name"}
}
logrus.Debugf(`Removing all paths from delegation "%s"\n`, name)
tdJSON, err := json.Marshal(&changelist.TUFDelegation{
ClearAllPaths: true,
})
if err != nil {
return err
}
template := newUpdateDelegationChange(name, tdJSON)
return addChange(r.changelist, template, name)
}
func newUpdateDelegationChange(name data.RoleName, content []byte) *changelist.TUFChange {
return changelist.NewTUFChange(
changelist.ActionUpdate,
name,
changelist.TypeTargetsDelegation,
"", // no path for delegations
content,
)
}
func newCreateDelegationChange(name data.RoleName, content []byte) *changelist.TUFChange {
return changelist.NewTUFChange(
changelist.ActionCreate,
name,
changelist.TypeTargetsDelegation,
"", // no path for delegations
content,
)
}
func newDeleteDelegationChange(name data.RoleName, content []byte) *changelist.TUFChange {
return changelist.NewTUFChange(
changelist.ActionDelete,
name,
changelist.TypeTargetsDelegation,
"", // no path for delegations
content,
)
}
func translateDelegationsToCanonicalIDs(delegationInfo data.Delegations) ([]data.Role, error) {
canonicalDelegations := make([]data.Role, len(delegationInfo.Roles))
// Do a copy by value to ensure local delegation metadata is untouched
for idx, origRole := range delegationInfo.Roles {
canonicalDelegations[idx] = *origRole
}
delegationKeys := delegationInfo.Keys
for i, delegation := range canonicalDelegations {
canonicalKeyIDs := []string{}
for _, keyID := range delegation.KeyIDs {
pubKey, ok := delegationKeys[keyID]
if !ok {
return []data.Role{}, fmt.Errorf("Could not translate canonical key IDs for %s", delegation.Name)
}
canonicalKeyID, err := utils.CanonicalKeyID(pubKey)
if err != nil {
return []data.Role{}, fmt.Errorf("Could not translate canonical key IDs for %s: %v", delegation.Name, err)
}
canonicalKeyIDs = append(canonicalKeyIDs, canonicalKeyID)
}
canonicalDelegations[i].KeyIDs = canonicalKeyIDs
}
return canonicalDelegations, nil
}

View File

@ -1,48 +0,0 @@
package client
import (
"fmt"
"github.com/theupdateframework/notary/tuf/data"
)
// ErrRepoNotInitialized is returned when trying to publish an uninitialized
// notary repository
type ErrRepoNotInitialized struct{}
func (err ErrRepoNotInitialized) Error() string {
return "repository has not been initialized"
}
// ErrInvalidRemoteRole is returned when the server is requested to manage
// a key type that is not permitted
type ErrInvalidRemoteRole struct {
Role data.RoleName
}
func (err ErrInvalidRemoteRole) Error() string {
return fmt.Sprintf(
"notary does not permit the server managing the %s key", err.Role.String())
}
// ErrInvalidLocalRole is returned when the client wants to manage
// a key type that is not permitted
type ErrInvalidLocalRole struct {
Role data.RoleName
}
func (err ErrInvalidLocalRole) Error() string {
return fmt.Sprintf(
"notary does not permit the client managing the %s key", err.Role)
}
// ErrRepositoryNotExist is returned when an action is taken on a remote
// repository that doesn't exist
type ErrRepositoryNotExist struct {
remote string
gun data.GUN
}
func (err ErrRepositoryNotExist) Error() string {
return fmt.Sprintf("%s does not have trust data for %s", err.remote, err.gun.String())
}

View File

@ -1,306 +0,0 @@
package client
import (
"encoding/json"
"fmt"
"net/http"
"time"
"github.com/sirupsen/logrus"
"github.com/theupdateframework/notary/client/changelist"
store "github.com/theupdateframework/notary/storage"
"github.com/theupdateframework/notary/tuf"
"github.com/theupdateframework/notary/tuf/data"
"github.com/theupdateframework/notary/tuf/signed"
"github.com/theupdateframework/notary/tuf/utils"
)
// Use this to initialize remote HTTPStores from the config settings
func getRemoteStore(baseURL string, gun data.GUN, rt http.RoundTripper) (store.RemoteStore, error) {
s, err := store.NewHTTPStore(
baseURL+"/v2/"+gun.String()+"/_trust/tuf/",
"",
"json",
"key",
rt,
)
if err != nil {
return store.OfflineStore{}, err
}
return s, nil
}
func applyChangelist(repo *tuf.Repo, invalid *tuf.Repo, cl changelist.Changelist) error {
it, err := cl.NewIterator()
if err != nil {
return err
}
index := 0
for it.HasNext() {
c, err := it.Next()
if err != nil {
return err
}
isDel := data.IsDelegation(c.Scope()) || data.IsWildDelegation(c.Scope())
switch {
case c.Scope() == changelist.ScopeTargets || isDel:
err = applyTargetsChange(repo, invalid, c)
case c.Scope() == changelist.ScopeRoot:
err = applyRootChange(repo, c)
default:
return fmt.Errorf("scope not supported: %s", c.Scope().String())
}
if err != nil {
logrus.Debugf("error attempting to apply change #%d: %s, on scope: %s path: %s type: %s", index, c.Action(), c.Scope(), c.Path(), c.Type())
return err
}
index++
}
logrus.Debugf("applied %d change(s)", index)
return nil
}
func applyTargetsChange(repo *tuf.Repo, invalid *tuf.Repo, c changelist.Change) error {
switch c.Type() {
case changelist.TypeTargetsTarget:
return changeTargetMeta(repo, c)
case changelist.TypeTargetsDelegation:
return changeTargetsDelegation(repo, c)
case changelist.TypeWitness:
return witnessTargets(repo, invalid, c.Scope())
default:
return fmt.Errorf("only target meta and delegations changes supported")
}
}
func changeTargetsDelegation(repo *tuf.Repo, c changelist.Change) error {
switch c.Action() {
case changelist.ActionCreate:
td := changelist.TUFDelegation{}
err := json.Unmarshal(c.Content(), &td)
if err != nil {
return err
}
// Try to create brand new role or update one
// First add the keys, then the paths. We can only add keys and paths in this scenario
err = repo.UpdateDelegationKeys(c.Scope(), td.AddKeys, []string{}, td.NewThreshold)
if err != nil {
return err
}
return repo.UpdateDelegationPaths(c.Scope(), td.AddPaths, []string{}, false)
case changelist.ActionUpdate:
td := changelist.TUFDelegation{}
err := json.Unmarshal(c.Content(), &td)
if err != nil {
return err
}
if data.IsWildDelegation(c.Scope()) {
return repo.PurgeDelegationKeys(c.Scope(), td.RemoveKeys)
}
delgRole, err := repo.GetDelegationRole(c.Scope())
if err != nil {
return err
}
// We need to translate the keys from canonical ID to TUF ID for compatibility
canonicalToTUFID := make(map[string]string)
for tufID, pubKey := range delgRole.Keys {
canonicalID, err := utils.CanonicalKeyID(pubKey)
if err != nil {
return err
}
canonicalToTUFID[canonicalID] = tufID
}
removeTUFKeyIDs := []string{}
for _, canonID := range td.RemoveKeys {
removeTUFKeyIDs = append(removeTUFKeyIDs, canonicalToTUFID[canonID])
}
err = repo.UpdateDelegationKeys(c.Scope(), td.AddKeys, removeTUFKeyIDs, td.NewThreshold)
if err != nil {
return err
}
return repo.UpdateDelegationPaths(c.Scope(), td.AddPaths, td.RemovePaths, td.ClearAllPaths)
case changelist.ActionDelete:
return repo.DeleteDelegation(c.Scope())
default:
return fmt.Errorf("unsupported action against delegations: %s", c.Action())
}
}
func changeTargetMeta(repo *tuf.Repo, c changelist.Change) error {
var err error
switch c.Action() {
case changelist.ActionCreate:
logrus.Debug("changelist add: ", c.Path())
meta := &data.FileMeta{}
err = json.Unmarshal(c.Content(), meta)
if err != nil {
return err
}
files := data.Files{c.Path(): *meta}
// Attempt to add the target to this role
if _, err = repo.AddTargets(c.Scope(), files); err != nil {
logrus.Errorf("couldn't add target to %s: %s", c.Scope(), err.Error())
}
case changelist.ActionDelete:
logrus.Debug("changelist remove: ", c.Path())
// Attempt to remove the target from this role
if err = repo.RemoveTargets(c.Scope(), c.Path()); err != nil {
logrus.Errorf("couldn't remove target from %s: %s", c.Scope(), err.Error())
}
default:
err = fmt.Errorf("action not yet supported: %s", c.Action())
}
return err
}
func applyRootChange(repo *tuf.Repo, c changelist.Change) error {
var err error
switch c.Type() {
case changelist.TypeBaseRole:
err = applyRootRoleChange(repo, c)
default:
err = fmt.Errorf("type of root change not yet supported: %s", c.Type())
}
return err // might be nil
}
func applyRootRoleChange(repo *tuf.Repo, c changelist.Change) error {
switch c.Action() {
case changelist.ActionCreate:
// replaces all keys for a role
d := &changelist.TUFRootData{}
err := json.Unmarshal(c.Content(), d)
if err != nil {
return err
}
err = repo.ReplaceBaseKeys(d.RoleName, d.Keys...)
if err != nil {
return err
}
default:
return fmt.Errorf("action not yet supported for root: %s", c.Action())
}
return nil
}
func nearExpiry(r data.SignedCommon) bool {
plus6mo := time.Now().AddDate(0, 6, 0)
return r.Expires.Before(plus6mo)
}
func warnRolesNearExpiry(r *tuf.Repo) {
//get every role and its respective signed common and call nearExpiry on it
//Root check
if nearExpiry(r.Root.Signed.SignedCommon) {
logrus.Warn("root is nearing expiry, you should re-sign the role metadata")
}
//Targets and delegations check
for role, signedTOrD := range r.Targets {
//signedTOrD is of type *data.SignedTargets
if nearExpiry(signedTOrD.Signed.SignedCommon) {
logrus.Warn(role, " metadata is nearing expiry, you should re-sign the role metadata")
}
}
//Snapshot check
if nearExpiry(r.Snapshot.Signed.SignedCommon) {
logrus.Warn("snapshot is nearing expiry, you should re-sign the role metadata")
}
//do not need to worry about Timestamp, notary signer will re-sign with the timestamp key
}
// Fetches a public key from a remote store, given a gun and role
func getRemoteKey(role data.RoleName, remote store.RemoteStore) (data.PublicKey, error) {
rawPubKey, err := remote.GetKey(role)
if err != nil {
return nil, err
}
pubKey, err := data.UnmarshalPublicKey(rawPubKey)
if err != nil {
return nil, err
}
return pubKey, nil
}
// Rotates a private key in a remote store and returns the public key component
func rotateRemoteKey(role data.RoleName, remote store.RemoteStore) (data.PublicKey, error) {
rawPubKey, err := remote.RotateKey(role)
if err != nil {
return nil, err
}
pubKey, err := data.UnmarshalPublicKey(rawPubKey)
if err != nil {
return nil, err
}
return pubKey, nil
}
// signs and serializes the metadata for a canonical role in a TUF repo to JSON
func serializeCanonicalRole(tufRepo *tuf.Repo, role data.RoleName, extraSigningKeys data.KeyList) (out []byte, err error) {
var s *data.Signed
switch {
case role == data.CanonicalRootRole:
s, err = tufRepo.SignRoot(data.DefaultExpires(role), extraSigningKeys)
case role == data.CanonicalSnapshotRole:
s, err = tufRepo.SignSnapshot(data.DefaultExpires(role))
case tufRepo.Targets[role] != nil:
s, err = tufRepo.SignTargets(
role, data.DefaultExpires(data.CanonicalTargetsRole))
default:
err = fmt.Errorf("%s not supported role to sign on the client", role)
}
if err != nil {
return
}
return json.Marshal(s)
}
func getAllPrivKeys(rootKeyIDs []string, cryptoService signed.CryptoService) ([]data.PrivateKey, error) {
if cryptoService == nil {
return nil, fmt.Errorf("no crypto service available to get private keys from")
}
privKeys := make([]data.PrivateKey, 0, len(rootKeyIDs))
for _, keyID := range rootKeyIDs {
privKey, _, err := cryptoService.GetPrivateKey(keyID)
if err != nil {
return nil, err
}
privKeys = append(privKeys, privKey)
}
if len(privKeys) == 0 {
var rootKeyID string
rootKeyList := cryptoService.ListKeys(data.CanonicalRootRole)
if len(rootKeyList) == 0 {
rootPublicKey, err := cryptoService.Create(data.CanonicalRootRole, "", data.ECDSAKey)
if err != nil {
return nil, err
}
rootKeyID = rootPublicKey.ID()
} else {
rootKeyID = rootKeyList[0]
}
privKey, _, err := cryptoService.GetPrivateKey(rootKeyID)
if err != nil {
return nil, err
}
privKeys = append(privKeys, privKey)
}
return privKeys, nil
}

View File

@ -1,150 +0,0 @@
package client
import (
"github.com/theupdateframework/notary/client/changelist"
"github.com/theupdateframework/notary/tuf/data"
"github.com/theupdateframework/notary/tuf/signed"
)
// ReadOnly represents the set of options that must be supported over a TUF repo for
// reading
type ReadOnly interface {
// ListTargets lists all targets for the current repository. The list of
// roles should be passed in order from highest to lowest priority.
//
// IMPORTANT: if you pass a set of roles such as [ "targets/a", "targets/x"
// "targets/a/b" ], even though "targets/a/b" is part of the "targets/a" subtree
// its entries will be strictly shadowed by those in other parts of the "targets/a"
// subtree and also the "targets/x" subtree, as we will defer parsing it until
// we explicitly reach it in our iteration of the provided list of roles.
ListTargets(roles ...data.RoleName) ([]*TargetWithRole, error)
// GetTargetByName returns a target by the given name. If no roles are passed
// it uses the targets role and does a search of the entire delegation
// graph, finding the first entry in a breadth first search of the delegations.
// If roles are passed, they should be passed in descending priority and
// the target entry found in the subtree of the highest priority role
// will be returned.
// See the IMPORTANT section on ListTargets above. Those roles also apply here.
GetTargetByName(name string, roles ...data.RoleName) (*TargetWithRole, error)
// GetAllTargetMetadataByName searches the entire delegation role tree to find
// the specified target by name for all roles, and returns a list of
// TargetSignedStructs for each time it finds the specified target.
// If given an empty string for a target name, it will return back all targets
// signed into the repository in every role
GetAllTargetMetadataByName(name string) ([]TargetSignedStruct, error)
// ListRoles returns a list of RoleWithSignatures objects for this repo
// This represents the latest metadata for each role in this repo
ListRoles() ([]RoleWithSignatures, error)
// GetDelegationRoles returns the keys and roles of the repository's delegations
// Also converts key IDs to canonical key IDs to keep consistent with signing prompts
GetDelegationRoles() ([]data.Role, error)
}
// Repository represents the set of options that must be supported over a TUF repo
// for both reading and writing.
type Repository interface {
ReadOnly
// ------------------- Publishing operations -------------------
// GetGUN returns the GUN associated with the repository
GetGUN() data.GUN
// SetLegacyVersion sets the number of versions back to fetch roots to sign with
SetLegacyVersions(int)
// ----- General management operations -----
// Initialize creates a new repository by using rootKey as the root Key for the
// TUF repository. The remote store/server must be reachable (and is asked to
// generate a timestamp key and possibly other serverManagedRoles), but the
// created repository result is only stored on local cache, not published to
// the remote store. To do that, use r.Publish() eventually.
Initialize(rootKeyIDs []string, serverManagedRoles ...data.RoleName) error
// InitializeWithCertificate initializes the repository with root keys and their
// corresponding certificates
InitializeWithCertificate(rootKeyIDs []string, rootCerts []data.PublicKey, serverManagedRoles ...data.RoleName) error
// Publish pushes the local changes in signed material to the remote notary-server
// Conceptually it performs an operation similar to a `git rebase`
Publish() error
// ----- Target Operations -----
// AddTarget creates new changelist entries to add a target to the given roles
// in the repository when the changelist gets applied at publish time.
// If roles are unspecified, the default role is "targets"
AddTarget(target *Target, roles ...data.RoleName) error
// RemoveTarget creates new changelist entries to remove a target from the given
// roles in the repository when the changelist gets applied at publish time.
// If roles are unspecified, the default role is "target".
RemoveTarget(targetName string, roles ...data.RoleName) error
// ----- Changelist operations -----
// GetChangelist returns the list of the repository's unpublished changes
GetChangelist() (changelist.Changelist, error)
// ----- Role operations -----
// AddDelegation creates changelist entries to add provided delegation public keys and paths.
// This method composes AddDelegationRoleAndKeys and AddDelegationPaths (each creates one changelist if called).
AddDelegation(name data.RoleName, delegationKeys []data.PublicKey, paths []string) error
// AddDelegationRoleAndKeys creates a changelist entry to add provided delegation public keys.
// This method is the simplest way to create a new delegation, because the delegation must have at least
// one key upon creation to be valid since we will reject the changelist while validating the threshold.
AddDelegationRoleAndKeys(name data.RoleName, delegationKeys []data.PublicKey) error
// AddDelegationPaths creates a changelist entry to add provided paths to an existing delegation.
// This method cannot create a new delegation itself because the role must meet the key threshold upon
// creation.
AddDelegationPaths(name data.RoleName, paths []string) error
// RemoveDelegationKeysAndPaths creates changelist entries to remove provided delegation key IDs and
// paths. This method composes RemoveDelegationPaths and RemoveDelegationKeys (each creates one
// changelist entry if called).
RemoveDelegationKeysAndPaths(name data.RoleName, keyIDs, paths []string) error
// RemoveDelegationRole creates a changelist to remove all paths and keys from a role, and delete the
// role in its entirety.
RemoveDelegationRole(name data.RoleName) error
// RemoveDelegationPaths creates a changelist entry to remove provided paths from an existing delegation.
RemoveDelegationPaths(name data.RoleName, paths []string) error
// RemoveDelegationKeys creates a changelist entry to remove provided keys from an existing delegation.
// When this changelist is applied, if the specified keys are the only keys left in the role,
// the role itself will be deleted in its entirety.
// It can also delete a key from all delegations under a parent using a name
// with a wildcard at the end.
RemoveDelegationKeys(name data.RoleName, keyIDs []string) error
// ClearDelegationPaths creates a changelist entry to remove all paths from an existing delegation.
ClearDelegationPaths(name data.RoleName) error
// ----- Witness and other re-signing operations -----
// Witness creates change objects to witness (i.e. re-sign) the given
// roles on the next publish. One change is created per role
Witness(roles ...data.RoleName) ([]data.RoleName, error)
// ----- Key Operations -----
// RotateKey removes all existing keys associated with the role. If no keys are
// specified in keyList, then this creates and adds one new key or delegates
// managing the key to the server. If key(s) are specified by keyList, then they are
// used for signing the role.
// These changes are staged in a changelist until publish is called.
RotateKey(role data.RoleName, serverManagesKey bool, keyList []string) error
// GetCryptoService is the getter for the repository's CryptoService, which is used
// to sign all updates.
GetCryptoService() signed.CryptoService
}

View File

@ -1,257 +0,0 @@
package client
import (
"fmt"
canonicaljson "github.com/docker/go/canonical/json"
store "github.com/theupdateframework/notary/storage"
"github.com/theupdateframework/notary/tuf"
"github.com/theupdateframework/notary/tuf/data"
"github.com/theupdateframework/notary/tuf/utils"
)
// Target represents a simplified version of the data TUF operates on, so external
// applications don't have to depend on TUF data types.
type Target struct {
Name string // the name of the target
Hashes data.Hashes // the hash of the target
Length int64 // the size in bytes of the target
Custom *canonicaljson.RawMessage // the custom data provided to describe the file at TARGETPATH
}
// TargetWithRole represents a Target that exists in a particular role - this is
// produced by ListTargets and GetTargetByName
type TargetWithRole struct {
Target
Role data.RoleName
}
// TargetSignedStruct is a struct that contains a Target, the role it was found in, and the list of signatures for that role
type TargetSignedStruct struct {
Role data.DelegationRole
Target Target
Signatures []data.Signature
}
//ErrNoSuchTarget is returned when no valid trust data is found.
type ErrNoSuchTarget string
func (f ErrNoSuchTarget) Error() string {
return fmt.Sprintf("No valid trust data for %s", string(f))
}
// RoleWithSignatures is a Role with its associated signatures
type RoleWithSignatures struct {
Signatures []data.Signature
data.Role
}
// NewReadOnly is the base method that returns a new notary repository for reading.
// It expects an initialized cache. In case of a nil remote store, a default
// offline store is used.
func NewReadOnly(repo *tuf.Repo) ReadOnly {
return &reader{tufRepo: repo}
}
type reader struct {
tufRepo *tuf.Repo
}
// ListTargets lists all targets for the current repository. The list of
// roles should be passed in order from highest to lowest priority.
//
// IMPORTANT: if you pass a set of roles such as [ "targets/a", "targets/x"
// "targets/a/b" ], even though "targets/a/b" is part of the "targets/a" subtree
// its entries will be strictly shadowed by those in other parts of the "targets/a"
// subtree and also the "targets/x" subtree, as we will defer parsing it until
// we explicitly reach it in our iteration of the provided list of roles.
func (r *reader) ListTargets(roles ...data.RoleName) ([]*TargetWithRole, error) {
if len(roles) == 0 {
roles = []data.RoleName{data.CanonicalTargetsRole}
}
targets := make(map[string]*TargetWithRole)
for _, role := range roles {
// Define an array of roles to skip for this walk (see IMPORTANT comment above)
skipRoles := utils.RoleNameSliceRemove(roles, role)
// Define a visitor function to populate the targets map in priority order
listVisitorFunc := func(tgt *data.SignedTargets, validRole data.DelegationRole) interface{} {
// We found targets so we should try to add them to our targets map
for targetName, targetMeta := range tgt.Signed.Targets {
// Follow the priority by not overriding previously set targets
// and check that this path is valid with this role
if _, ok := targets[targetName]; ok || !validRole.CheckPaths(targetName) {
continue
}
targets[targetName] = &TargetWithRole{
Target: Target{
Name: targetName,
Hashes: targetMeta.Hashes,
Length: targetMeta.Length,
Custom: targetMeta.Custom,
},
Role: validRole.Name,
}
}
return nil
}
r.tufRepo.WalkTargets("", role, listVisitorFunc, skipRoles...)
}
var targetList []*TargetWithRole
for _, v := range targets {
targetList = append(targetList, v)
}
return targetList, nil
}
// GetTargetByName returns a target by the given name. If no roles are passed
// it uses the targets role and does a search of the entire delegation
// graph, finding the first entry in a breadth first search of the delegations.
// If roles are passed, they should be passed in descending priority and
// the target entry found in the subtree of the highest priority role
// will be returned.
// See the IMPORTANT section on ListTargets above. Those roles also apply here.
func (r *reader) GetTargetByName(name string, roles ...data.RoleName) (*TargetWithRole, error) {
if len(roles) == 0 {
roles = append(roles, data.CanonicalTargetsRole)
}
var resultMeta data.FileMeta
var resultRoleName data.RoleName
var foundTarget bool
for _, role := range roles {
// Define an array of roles to skip for this walk (see IMPORTANT comment above)
skipRoles := utils.RoleNameSliceRemove(roles, role)
// Define a visitor function to find the specified target
getTargetVisitorFunc := func(tgt *data.SignedTargets, validRole data.DelegationRole) interface{} {
if tgt == nil {
return nil
}
// We found the target and validated path compatibility in our walk,
// so we should stop our walk and set the resultMeta and resultRoleName variables
if resultMeta, foundTarget = tgt.Signed.Targets[name]; foundTarget {
resultRoleName = validRole.Name
return tuf.StopWalk{}
}
return nil
}
// Check that we didn't error, and that we assigned to our target
if err := r.tufRepo.WalkTargets(name, role, getTargetVisitorFunc, skipRoles...); err == nil && foundTarget {
return &TargetWithRole{Target: Target{Name: name, Hashes: resultMeta.Hashes, Length: resultMeta.Length, Custom: resultMeta.Custom}, Role: resultRoleName}, nil
}
}
return nil, ErrNoSuchTarget(name)
}
// GetAllTargetMetadataByName searches the entire delegation role tree to find the specified target by name for all
// roles, and returns a list of TargetSignedStructs for each time it finds the specified target.
// If given an empty string for a target name, it will return back all targets signed into the repository in every role
func (r *reader) GetAllTargetMetadataByName(name string) ([]TargetSignedStruct, error) {
var targetInfoList []TargetSignedStruct
// Define a visitor function to find the specified target
getAllTargetInfoByNameVisitorFunc := func(tgt *data.SignedTargets, validRole data.DelegationRole) interface{} {
if tgt == nil {
return nil
}
// We found a target and validated path compatibility in our walk,
// so add it to our list if we have a match
// if we have an empty name, add all targets, else check if we have it
var targetMetaToAdd data.Files
if name == "" {
targetMetaToAdd = tgt.Signed.Targets
} else {
if meta, ok := tgt.Signed.Targets[name]; ok {
targetMetaToAdd = data.Files{name: meta}
}
}
for targetName, resultMeta := range targetMetaToAdd {
targetInfo := TargetSignedStruct{
Role: validRole,
Target: Target{Name: targetName, Hashes: resultMeta.Hashes, Length: resultMeta.Length, Custom: resultMeta.Custom},
Signatures: tgt.Signatures,
}
targetInfoList = append(targetInfoList, targetInfo)
}
// continue walking to all child roles
return nil
}
// Check that we didn't error, and that we found the target at least once
if err := r.tufRepo.WalkTargets(name, "", getAllTargetInfoByNameVisitorFunc); err != nil {
return nil, err
}
if len(targetInfoList) == 0 {
return nil, ErrNoSuchTarget(name)
}
return targetInfoList, nil
}
// ListRoles returns a list of RoleWithSignatures objects for this repo
// This represents the latest metadata for each role in this repo
func (r *reader) ListRoles() ([]RoleWithSignatures, error) {
// Get all role info from our updated keysDB, can be empty
roles := r.tufRepo.GetAllLoadedRoles()
var roleWithSigs []RoleWithSignatures
// Populate RoleWithSignatures with Role from keysDB and signatures from TUF metadata
for _, role := range roles {
roleWithSig := RoleWithSignatures{Role: *role, Signatures: nil}
switch role.Name {
case data.CanonicalRootRole:
roleWithSig.Signatures = r.tufRepo.Root.Signatures
case data.CanonicalTargetsRole:
roleWithSig.Signatures = r.tufRepo.Targets[data.CanonicalTargetsRole].Signatures
case data.CanonicalSnapshotRole:
roleWithSig.Signatures = r.tufRepo.Snapshot.Signatures
case data.CanonicalTimestampRole:
roleWithSig.Signatures = r.tufRepo.Timestamp.Signatures
default:
if !data.IsDelegation(role.Name) {
continue
}
if _, ok := r.tufRepo.Targets[role.Name]; ok {
// We'll only find a signature if we've published any targets with this delegation
roleWithSig.Signatures = r.tufRepo.Targets[role.Name].Signatures
}
}
roleWithSigs = append(roleWithSigs, roleWithSig)
}
return roleWithSigs, nil
}
// GetDelegationRoles returns the keys and roles of the repository's delegations
// Also converts key IDs to canonical key IDs to keep consistent with signing prompts
func (r *reader) GetDelegationRoles() ([]data.Role, error) {
// All top level delegations (ex: targets/level1) are stored exclusively in targets.json
_, ok := r.tufRepo.Targets[data.CanonicalTargetsRole]
if !ok {
return nil, store.ErrMetaNotFound{Resource: data.CanonicalTargetsRole.String()}
}
// make a copy for traversing nested delegations
allDelegations := []data.Role{}
// Define a visitor function to populate the delegations list and translate their key IDs to canonical IDs
delegationCanonicalListVisitor := func(tgt *data.SignedTargets, validRole data.DelegationRole) interface{} {
// For the return list, update with a copy that includes canonicalKeyIDs
// These aren't validated by the validRole
canonicalDelegations, err := translateDelegationsToCanonicalIDs(tgt.Signed.Delegations)
if err != nil {
return err
}
allDelegations = append(allDelegations, canonicalDelegations...)
return nil
}
err := r.tufRepo.WalkTargets("", "", delegationCanonicalListVisitor)
if err != nil {
return nil, err
}
return allDelegations, nil
}

View File

@ -1,18 +0,0 @@
// +build !pkcs11
package client
import (
"fmt"
"github.com/theupdateframework/notary"
"github.com/theupdateframework/notary/trustmanager"
)
func getKeyStores(baseDir string, retriever notary.PassRetriever) ([]trustmanager.KeyStore, error) {
fileKeyStore, err := trustmanager.NewKeyFileStore(baseDir, retriever)
if err != nil {
return nil, fmt.Errorf("failed to create private key store in directory: %s", baseDir)
}
return []trustmanager.KeyStore{fileKeyStore}, nil
}

View File

@ -1,25 +0,0 @@
// +build pkcs11
package client
import (
"fmt"
"github.com/theupdateframework/notary"
"github.com/theupdateframework/notary/trustmanager"
"github.com/theupdateframework/notary/trustmanager/yubikey"
)
func getKeyStores(baseDir string, retriever notary.PassRetriever) ([]trustmanager.KeyStore, error) {
fileKeyStore, err := trustmanager.NewKeyFileStore(baseDir, retriever)
if err != nil {
return nil, fmt.Errorf("failed to create private key store in directory: %s", baseDir)
}
keyStores := []trustmanager.KeyStore{fileKeyStore}
yubiKeyStore, _ := yubikey.NewYubiStore(fileKeyStore, retriever)
if yubiKeyStore != nil {
keyStores = []trustmanager.KeyStore{yubiKeyStore, fileKeyStore}
}
return keyStores, nil
}

View File

@ -1,463 +0,0 @@
package client
import (
"encoding/json"
"fmt"
"regexp"
"github.com/sirupsen/logrus"
"github.com/theupdateframework/notary"
"github.com/theupdateframework/notary/cryptoservice"
store "github.com/theupdateframework/notary/storage"
"github.com/theupdateframework/notary/trustpinning"
"github.com/theupdateframework/notary/tuf"
"github.com/theupdateframework/notary/tuf/data"
"github.com/theupdateframework/notary/tuf/signed"
)
// tufClient is a usability wrapper around a raw TUF repo
type tufClient struct {
remote store.RemoteStore
cache store.MetadataStore
oldBuilder tuf.RepoBuilder
newBuilder tuf.RepoBuilder
}
// Update performs an update to the TUF repo as defined by the TUF spec
func (c *tufClient) Update() (*tuf.Repo, *tuf.Repo, error) {
// 1. Get timestamp
// a. If timestamp error (verification, expired, etc...) download new root and return to 1.
// 2. Check if local snapshot is up to date
// a. If out of date, get updated snapshot
// i. If snapshot error, download new root and return to 1.
// 3. Check if root correct against snapshot
// a. If incorrect, download new root and return to 1.
// 4. Iteratively download and search targets and delegations to find target meta
logrus.Debug("updating TUF client")
err := c.update()
if err != nil {
logrus.Debug("Error occurred. Root will be downloaded and another update attempted")
logrus.Debug("Resetting the TUF builder...")
c.newBuilder = c.newBuilder.BootstrapNewBuilder()
if err := c.updateRoot(); err != nil {
logrus.Debug("Client Update (Root): ", err)
return nil, nil, err
}
// If we error again, we now have the latest root and just want to fail
// out as there's no expectation the problem can be resolved automatically
logrus.Debug("retrying TUF client update")
if err := c.update(); err != nil {
return nil, nil, err
}
}
return c.newBuilder.Finish()
}
func (c *tufClient) update() error {
if err := c.downloadTimestamp(); err != nil {
logrus.Debugf("Client Update (Timestamp): %s", err.Error())
return err
}
if err := c.downloadSnapshot(); err != nil {
logrus.Debugf("Client Update (Snapshot): %s", err.Error())
return err
}
// will always need top level targets at a minimum
if err := c.downloadTargets(); err != nil {
logrus.Debugf("Client Update (Targets): %s", err.Error())
return err
}
return nil
}
// updateRoot checks if there is a newer version of the root available, and if so
// downloads all intermediate root files to allow proper key rotation.
func (c *tufClient) updateRoot() error {
// Get current root version
currentRootConsistentInfo := c.oldBuilder.GetConsistentInfo(data.CanonicalRootRole)
currentVersion := c.oldBuilder.GetLoadedVersion(currentRootConsistentInfo.RoleName)
// Get new root version
raw, err := c.downloadRoot()
switch err.(type) {
case *trustpinning.ErrRootRotationFail:
// Rotation errors are okay since we haven't yet downloaded
// all intermediate root files
break
case nil:
// No error updating root - we were at most 1 version behind
return nil
default:
// Return any non-rotation error.
return err
}
// Load current version into newBuilder
currentRaw, err := c.cache.GetSized(data.CanonicalRootRole.String(), -1)
if err != nil {
logrus.Debugf("error loading %d.%s: %s", currentVersion, data.CanonicalRootRole, err)
return err
}
if err := c.newBuilder.LoadRootForUpdate(currentRaw, currentVersion, false); err != nil {
logrus.Debugf("%d.%s is invalid: %s", currentVersion, data.CanonicalRootRole, err)
return err
}
// Extract newest version number
signedRoot := &data.Signed{}
if err := json.Unmarshal(raw, signedRoot); err != nil {
return err
}
newestRoot, err := data.RootFromSigned(signedRoot)
if err != nil {
return err
}
newestVersion := newestRoot.Signed.SignedCommon.Version
// Update from current + 1 (current already loaded) to newest - 1 (newest loaded below)
if err := c.updateRootVersions(currentVersion+1, newestVersion-1); err != nil {
return err
}
// Already downloaded newest, verify it against newest - 1
if err := c.newBuilder.LoadRootForUpdate(raw, newestVersion, true); err != nil {
logrus.Debugf("downloaded %d.%s is invalid: %s", newestVersion, data.CanonicalRootRole, err)
return err
}
logrus.Debugf("successfully verified downloaded %d.%s", newestVersion, data.CanonicalRootRole)
// Write newest to cache
if err := c.cache.Set(data.CanonicalRootRole.String(), raw); err != nil {
logrus.Debugf("unable to write %d.%s to cache: %s", newestVersion, data.CanonicalRootRole, err)
}
logrus.Debugf("finished updating root files")
return nil
}
// updateRootVersions updates the root from it's current version to a target, rotating keys
// as they are found
func (c *tufClient) updateRootVersions(fromVersion, toVersion int) error {
for v := fromVersion; v <= toVersion; v++ {
logrus.Debugf("updating root from version %d to version %d, currently fetching %d", fromVersion, toVersion, v)
versionedRole := fmt.Sprintf("%d.%s", v, data.CanonicalRootRole)
raw, err := c.remote.GetSized(versionedRole, -1)
if err != nil {
logrus.Debugf("error downloading %s: %s", versionedRole, err)
return err
}
if err := c.newBuilder.LoadRootForUpdate(raw, v, false); err != nil {
logrus.Debugf("downloaded %s is invalid: %s", versionedRole, err)
return err
}
logrus.Debugf("successfully verified downloaded %s", versionedRole)
}
return nil
}
// downloadTimestamp is responsible for downloading the timestamp.json
// Timestamps are special in that we ALWAYS attempt to download and only
// use cache if the download fails (and the cache is still valid).
func (c *tufClient) downloadTimestamp() error {
logrus.Debug("Loading timestamp...")
role := data.CanonicalTimestampRole
consistentInfo := c.newBuilder.GetConsistentInfo(role)
// always get the remote timestamp, since it supersedes the local one
cachedTS, cachedErr := c.cache.GetSized(role.String(), notary.MaxTimestampSize)
_, remoteErr := c.tryLoadRemote(consistentInfo, cachedTS)
// check that there was no remote error, or if there was a network problem
// If there was a validation error, we should error out so we can download a new root or fail the update
switch remoteErr.(type) {
case nil:
return nil
case store.ErrMetaNotFound, store.ErrServerUnavailable, store.ErrOffline, store.NetworkError:
break
default:
return remoteErr
}
// since it was a network error: get the cached timestamp, if it exists
if cachedErr != nil {
logrus.Debug("no cached or remote timestamp available")
return remoteErr
}
logrus.Warn("Error while downloading remote metadata, using cached timestamp - this might not be the latest version available remotely")
err := c.newBuilder.Load(role, cachedTS, 1, false)
if err == nil {
logrus.Debug("successfully verified cached timestamp")
}
return err
}
// downloadSnapshot is responsible for downloading the snapshot.json
func (c *tufClient) downloadSnapshot() error {
logrus.Debug("Loading snapshot...")
role := data.CanonicalSnapshotRole
consistentInfo := c.newBuilder.GetConsistentInfo(role)
_, err := c.tryLoadCacheThenRemote(consistentInfo)
return err
}
// downloadTargets downloads all targets and delegated targets for the repository.
// It uses a pre-order tree traversal as it's necessary to download parents first
// to obtain the keys to validate children.
func (c *tufClient) downloadTargets() error {
toDownload := []data.DelegationRole{{
BaseRole: data.BaseRole{Name: data.CanonicalTargetsRole},
Paths: []string{""},
}}
for len(toDownload) > 0 {
role := toDownload[0]
toDownload = toDownload[1:]
consistentInfo := c.newBuilder.GetConsistentInfo(role.Name)
if !consistentInfo.ChecksumKnown() {
logrus.Debugf("skipping %s because there is no checksum for it", role.Name)
continue
}
children, err := c.getTargetsFile(role, consistentInfo)
switch err.(type) {
case signed.ErrExpired, signed.ErrRoleThreshold:
if role.Name == data.CanonicalTargetsRole {
return err
}
logrus.Warnf("Error getting %s: %s", role.Name, err)
break
case nil:
toDownload = append(children, toDownload...)
default:
return err
}
}
return nil
}
func (c tufClient) getTargetsFile(role data.DelegationRole, ci tuf.ConsistentInfo) ([]data.DelegationRole, error) {
logrus.Debugf("Loading %s...", role.Name)
tgs := &data.SignedTargets{}
raw, err := c.tryLoadCacheThenRemote(ci)
if err != nil {
return nil, err
}
// we know it unmarshals because if `tryLoadCacheThenRemote` didn't fail, then
// the raw has already been loaded into the builder
json.Unmarshal(raw, tgs)
return tgs.GetValidDelegations(role), nil
}
// downloadRoot is responsible for downloading the root.json
func (c *tufClient) downloadRoot() ([]byte, error) {
role := data.CanonicalRootRole
consistentInfo := c.newBuilder.GetConsistentInfo(role)
// We can't read an exact size for the root metadata without risking getting stuck in the TUF update cycle
// since it's possible that downloading timestamp/snapshot metadata may fail due to a signature mismatch
if !consistentInfo.ChecksumKnown() {
logrus.Debugf("Loading root with no expected checksum")
// get the cached root, if it exists, just for version checking
cachedRoot, _ := c.cache.GetSized(role.String(), -1)
// prefer to download a new root
return c.tryLoadRemote(consistentInfo, cachedRoot)
}
return c.tryLoadCacheThenRemote(consistentInfo)
}
func (c *tufClient) tryLoadCacheThenRemote(consistentInfo tuf.ConsistentInfo) ([]byte, error) {
cachedTS, err := c.cache.GetSized(consistentInfo.RoleName.String(), consistentInfo.Length())
if err != nil {
logrus.Debugf("no %s in cache, must download", consistentInfo.RoleName)
return c.tryLoadRemote(consistentInfo, nil)
}
if err = c.newBuilder.Load(consistentInfo.RoleName, cachedTS, 1, false); err == nil {
logrus.Debugf("successfully verified cached %s", consistentInfo.RoleName)
return cachedTS, nil
}
logrus.Debugf("cached %s is invalid (must download): %s", consistentInfo.RoleName, err)
return c.tryLoadRemote(consistentInfo, cachedTS)
}
func (c *tufClient) tryLoadRemote(consistentInfo tuf.ConsistentInfo, old []byte) ([]byte, error) {
consistentName := consistentInfo.ConsistentName()
raw, err := c.remote.GetSized(consistentName, consistentInfo.Length())
if err != nil {
logrus.Debugf("error downloading %s: %s", consistentName, err)
return old, err
}
// try to load the old data into the old builder - only use it to validate
// versions if it loads successfully. If it errors, then the loaded version
// will be 1
c.oldBuilder.Load(consistentInfo.RoleName, old, 1, true)
minVersion := c.oldBuilder.GetLoadedVersion(consistentInfo.RoleName)
if err := c.newBuilder.Load(consistentInfo.RoleName, raw, minVersion, false); err != nil {
logrus.Debugf("downloaded %s is invalid: %s", consistentName, err)
return raw, err
}
logrus.Debugf("successfully verified downloaded %s", consistentName)
if err := c.cache.Set(consistentInfo.RoleName.String(), raw); err != nil {
logrus.Debugf("Unable to write %s to cache: %s", consistentInfo.RoleName, err)
}
return raw, nil
}
// TUFLoadOptions are provided to LoadTUFRepo, which loads a TUF repo from cache,
// from a remote store, or both
type TUFLoadOptions struct {
GUN data.GUN
TrustPinning trustpinning.TrustPinConfig
CryptoService signed.CryptoService
Cache store.MetadataStore
RemoteStore store.RemoteStore
AlwaysCheckInitialized bool
}
// bootstrapClient attempts to bootstrap a root.json to be used as the trust
// anchor for a repository. The checkInitialized argument indicates whether
// we should always attempt to contact the server to determine if the repository
// is initialized or not. If set to true, we will always attempt to download
// and return an error if the remote repository errors.
//
// Populates a tuf.RepoBuilder with this root metadata. If the root metadata
// downloaded is a newer version than what is on disk, then intermediate
// versions will be downloaded and verified in order to rotate trusted keys
// properly. Newer root metadata must always be signed with the previous
// threshold and keys.
//
// Fails if the remote server is reachable and does not know the repo
// (i.e. before any metadata has been published), in which case the error is
// store.ErrMetaNotFound, or if the root metadata (from whichever source is used)
// is not trusted.
//
// Returns a TUFClient for the remote server, which may not be actually
// operational (if the URL is invalid but a root.json is cached).
func bootstrapClient(l TUFLoadOptions) (*tufClient, error) {
minVersion := 1
// the old root on disk should not be validated against any trust pinning configuration
// because if we have an old root, it itself is the thing that pins trust
oldBuilder := tuf.NewRepoBuilder(l.GUN, l.CryptoService, trustpinning.TrustPinConfig{})
// by default, we want to use the trust pinning configuration on any new root that we download
newBuilder := tuf.NewRepoBuilder(l.GUN, l.CryptoService, l.TrustPinning)
// Try to read root from cache first. We will trust this root until we detect a problem
// during update which will cause us to download a new root and perform a rotation.
// If we have an old root, and it's valid, then we overwrite the newBuilder to be one
// preloaded with the old root or one which uses the old root for trust bootstrapping.
if rootJSON, err := l.Cache.GetSized(data.CanonicalRootRole.String(), store.NoSizeLimit); err == nil {
// if we can't load the cached root, fail hard because that is how we pin trust
if err := oldBuilder.Load(data.CanonicalRootRole, rootJSON, minVersion, true); err != nil {
return nil, err
}
// again, the root on disk is the source of trust pinning, so use an empty trust
// pinning configuration
newBuilder = tuf.NewRepoBuilder(l.GUN, l.CryptoService, trustpinning.TrustPinConfig{})
if err := newBuilder.Load(data.CanonicalRootRole, rootJSON, minVersion, false); err != nil {
// Ok, the old root is expired - we want to download a new one. But we want to use the
// old root to verify the new root, so bootstrap a new builder with the old builder
// but use the trustpinning to validate the new root
minVersion = oldBuilder.GetLoadedVersion(data.CanonicalRootRole)
newBuilder = oldBuilder.BootstrapNewBuilderWithNewTrustpin(l.TrustPinning)
}
}
if !newBuilder.IsLoaded(data.CanonicalRootRole) || l.AlwaysCheckInitialized {
// remoteErr was nil and we were not able to load a root from cache or
// are specifically checking for initialization of the repo.
// if remote store successfully set up, try and get root from remote
// We don't have any local data to determine the size of root, so try the maximum (though it is restricted at 100MB)
tmpJSON, err := l.RemoteStore.GetSized(data.CanonicalRootRole.String(), store.NoSizeLimit)
if err != nil {
// we didn't have a root in cache and were unable to load one from
// the server. Nothing we can do but error.
return nil, err
}
if !newBuilder.IsLoaded(data.CanonicalRootRole) {
// we always want to use the downloaded root if we couldn't load from cache
if err := newBuilder.Load(data.CanonicalRootRole, tmpJSON, minVersion, false); err != nil {
return nil, err
}
err = l.Cache.Set(data.CanonicalRootRole.String(), tmpJSON)
if err != nil {
// if we can't write cache we should still continue, just log error
logrus.Errorf("could not save root to cache: %s", err.Error())
}
}
}
// We can only get here if remoteErr != nil (hence we don't download any new root),
// and there was no root on disk
if !newBuilder.IsLoaded(data.CanonicalRootRole) {
return nil, ErrRepoNotInitialized{}
}
return &tufClient{
oldBuilder: oldBuilder,
newBuilder: newBuilder,
remote: l.RemoteStore,
cache: l.Cache,
}, nil
}
// LoadTUFRepo bootstraps a trust anchor (root.json) from cache (if provided) before updating
// all the metadata for the repo from the remote (if provided). It loads a TUF repo from cache,
// from a remote store, or both.
func LoadTUFRepo(options TUFLoadOptions) (*tuf.Repo, *tuf.Repo, error) {
// set some sane defaults, so nothing has to be provided necessarily
if options.RemoteStore == nil {
options.RemoteStore = store.OfflineStore{}
}
if options.Cache == nil {
options.Cache = store.NewMemoryStore(nil)
}
if options.CryptoService == nil {
options.CryptoService = cryptoservice.EmptyService
}
c, err := bootstrapClient(options)
if err != nil {
if _, ok := err.(store.ErrMetaNotFound); ok {
return nil, nil, ErrRepositoryNotExist{
remote: options.RemoteStore.Location(),
gun: options.GUN,
}
}
return nil, nil, err
}
repo, invalid, err := c.Update()
if err != nil {
// notFound.Resource may include a version or checksum so when the role is root,
// it will be root, <version>.root or root.<checksum>.
notFound, ok := err.(store.ErrMetaNotFound)
isRoot, _ := regexp.MatchString(`\.?`+data.CanonicalRootRole.String()+`\.?`, notFound.Resource)
if ok && isRoot {
return nil, nil, ErrRepositoryNotExist{
remote: options.RemoteStore.Location(),
gun: options.GUN,
}
}
return nil, nil, err
}
warnRolesNearExpiry(repo)
return repo, invalid, nil
}

View File

@ -1,62 +0,0 @@
package client
import (
"github.com/theupdateframework/notary/client/changelist"
"github.com/theupdateframework/notary/tuf"
"github.com/theupdateframework/notary/tuf/data"
)
// Witness creates change objects to witness (i.e. re-sign) the given
// roles on the next publish. One change is created per role
func (r *repository) Witness(roles ...data.RoleName) ([]data.RoleName, error) {
var err error
successful := make([]data.RoleName, 0, len(roles))
for _, role := range roles {
// scope is role
c := changelist.NewTUFChange(
changelist.ActionUpdate,
role,
changelist.TypeWitness,
"",
nil,
)
err = r.changelist.Add(c)
if err != nil {
break
}
successful = append(successful, role)
}
return successful, err
}
func witnessTargets(repo *tuf.Repo, invalid *tuf.Repo, role data.RoleName) error {
if r, ok := repo.Targets[role]; ok {
// role is already valid, mark for re-signing/updating
r.Dirty = true
return nil
}
if roleObj, err := repo.GetDelegationRole(role); err == nil && invalid != nil {
// A role with a threshold > len(keys) is technically invalid, but we let it build in the builder because
// we want to be able to download the role (which may still have targets on it), add more keys, and then
// witness the role, thus bringing it back to valid. However, if no keys have been added before witnessing,
// then it is still an invalid role, and can't be witnessed because nothing can bring it back to valid.
if roleObj.Threshold > len(roleObj.Keys) {
return data.ErrInvalidRole{
Role: role,
Reason: "role does not specify enough valid signing keys to meet its required threshold",
}
}
if r, ok := invalid.Targets[role]; ok {
// role is recognized but invalid, move to valid data and mark for re-signing
repo.Targets[role] = r
r.Dirty = true
return nil
}
}
// role isn't recognized, even as invalid
return data.ErrInvalidRole{
Role: role,
Reason: "this role is not known",
}
}

View File

@ -1,25 +0,0 @@
codecov:
notify:
# 2 builds on circleci, 1 jenkins build
after_n_builds: 3
coverage:
range: "50...100"
status:
# project will give us the diff in the total code coverage between a commit
# and its parent
project:
default:
target: auto
threshold: "0.05%"
# patch would give us the code coverage of the diff only
patch: false
# changes tells us if there are unexpected code coverage changes in other files
# which were not changed by the diff
changes: false
ignore: # ignore testutils for coverage
- "tuf/testutils/*"
- "vendor/*"
- "proto/*.pb.go"
- "trustmanager/remoteks/*.pb.go"
comment: off

View File

@ -1,95 +0,0 @@
package notary
import (
"time"
)
// application wide constants
const (
// MaxDownloadSize is the maximum size we'll download for metadata if no limit is given
MaxDownloadSize int64 = 100 << 20
// MaxTimestampSize is the maximum size of timestamp metadata - 1MiB.
MaxTimestampSize int64 = 1 << 20
// MinRSABitSize is the minimum bit size for RSA keys allowed in notary
MinRSABitSize = 2048
// MinThreshold requires a minimum of one threshold for roles; currently we do not support a higher threshold
MinThreshold = 1
// SHA256HexSize is how big a SHA256 hex is in number of characters
SHA256HexSize = 64
// SHA512HexSize is how big a SHA512 hex is in number of characters
SHA512HexSize = 128
// SHA256 is the name of SHA256 hash algorithm
SHA256 = "sha256"
// SHA512 is the name of SHA512 hash algorithm
SHA512 = "sha512"
// TrustedCertsDir is the directory, under the notary repo base directory, where trusted certs are stored
TrustedCertsDir = "trusted_certificates"
// PrivDir is the directory, under the notary repo base directory, where private keys are stored
PrivDir = "private"
// RootKeysSubdir is the subdirectory under PrivDir where root private keys are stored
// DEPRECATED: The only reason we need this constant is compatibility with older versions
RootKeysSubdir = "root_keys"
// NonRootKeysSubdir is the subdirectory under PrivDir where non-root private keys are stored
// DEPRECATED: The only reason we need this constant is compatibility with older versions
NonRootKeysSubdir = "tuf_keys"
// KeyExtension is the file extension to use for private key files
KeyExtension = "key"
// Day is a duration of one day
Day = 24 * time.Hour
Year = 365 * Day
// NotaryRootExpiry is the duration representing the expiry time of the Root role
NotaryRootExpiry = 10 * Year
NotaryTargetsExpiry = 3 * Year
NotarySnapshotExpiry = 3 * Year
NotaryTimestampExpiry = 14 * Day
ConsistentMetadataCacheMaxAge = 30 * Day
CurrentMetadataCacheMaxAge = 5 * time.Minute
// CacheMaxAgeLimit is the generally recommended maximum age for Cache-Control headers
// (one year, in seconds, since one year is forever in terms of internet
// content)
CacheMaxAgeLimit = 1 * Year
MySQLBackend = "mysql"
MemoryBackend = "memory"
PostgresBackend = "postgres"
SQLiteBackend = "sqlite3"
RethinkDBBackend = "rethinkdb"
FileBackend = "file"
DefaultImportRole = "delegation"
// HealthCheckKeyManagement and HealthCheckSigner are the grpc service name
// for "KeyManagement" and "Signer" respectively which used for health check.
// The "Overall" indicates the querying for overall status of the server.
HealthCheckKeyManagement = "grpc.health.v1.Health.KeyManagement"
HealthCheckSigner = "grpc.health.v1.Health.Signer"
HealthCheckOverall = "grpc.health.v1.Health.Overall"
// PrivExecPerms indicates the file permissions for directory
// and PrivNoExecPerms for file.
PrivExecPerms = 0700
PrivNoExecPerms = 0600
// DefaultPageSize is the default number of records to return from the changefeed
DefaultPageSize = 100
)
// enum to use for setting and retrieving values from contexts
const (
CtxKeyMetaStore CtxKey = iota
CtxKeyKeyAlgo
CtxKeyCryptoSvc
CtxKeyRepo
)
// NotarySupportedBackends contains the backends we would like to support at present
var NotarySupportedBackends = []string{
MemoryBackend,
MySQLBackend,
SQLiteBackend,
RethinkDBBackend,
PostgresBackend,
}

View File

@ -1,16 +0,0 @@
// +build !windows
package notary
import (
"os"
"syscall"
)
// NotarySupportedSignals contains the signals we would like to capture:
// - SIGUSR1, indicates a increment of the log level.
// - SIGUSR2, indicates a decrement of the log level.
var NotarySupportedSignals = []os.Signal{
syscall.SIGUSR1,
syscall.SIGUSR2,
}

View File

@ -1,8 +0,0 @@
// +build windows
package notary
import "os"
// NotarySupportedSignals does not contain any signals, because SIGUSR1/2 are not supported on windows
var NotarySupportedSignals = []os.Signal{}

View File

@ -1,29 +0,0 @@
FROM dockercore/golang-cross:1.12.15
RUN apt-get update && apt-get install -y \
curl \
clang \
file \
libsqlite3-dev \
patch \
tar \
xz-utils \
python \
python-pip \
--no-install-recommends \
&& rm -rf /var/lib/apt/lists/*
RUN useradd -ms /bin/bash notary \
&& pip install codecov \
&& go get golang.org/x/lint/golint github.com/fzipp/gocyclo github.com/client9/misspell/cmd/misspell github.com/gordonklaus/ineffassign github.com/securego/gosec/cmd/gosec/...
ENV NOTARYDIR /go/src/github.com/theupdateframework/notary
ENV GO111MODULE=on
ENV GOFLAGS=-mod=vendor
COPY . ${NOTARYDIR}
RUN chmod -R a+rw /go
WORKDIR ${NOTARYDIR}
# Note this cannot use alpine because of the MacOSX Cross SDK: the cctools there uses sys/cdefs.h and that cannot be used in alpine: http://wiki.musl-libc.org/wiki/FAQ#Q:_I.27m_trying_to_compile_something_against_musl_and_I_get_error_messages_about_sys.2Fcdefs.h

View File

@ -1,41 +0,0 @@
package cryptoservice
import (
"crypto"
"crypto/rand"
"crypto/x509"
"fmt"
"time"
"github.com/theupdateframework/notary/tuf/data"
"github.com/theupdateframework/notary/tuf/utils"
)
// GenerateCertificate generates an X509 Certificate from a template, given a GUN and validity interval
func GenerateCertificate(rootKey data.PrivateKey, gun data.GUN, startTime, endTime time.Time) (*x509.Certificate, error) {
signer := rootKey.CryptoSigner()
if signer == nil {
return nil, fmt.Errorf("key type not supported for Certificate generation: %s", rootKey.Algorithm())
}
return generateCertificate(signer, gun, startTime, endTime)
}
func generateCertificate(signer crypto.Signer, gun data.GUN, startTime, endTime time.Time) (*x509.Certificate, error) {
template, err := utils.NewCertificate(gun.String(), startTime, endTime)
if err != nil {
return nil, fmt.Errorf("failed to create the certificate template for: %s (%v)", gun, err)
}
derBytes, err := x509.CreateCertificate(rand.Reader, template, template, signer.Public(), signer)
if err != nil {
return nil, fmt.Errorf("failed to create the certificate for: %s (%v)", gun, err)
}
cert, err := x509.ParseCertificate(derBytes)
if err != nil {
return nil, fmt.Errorf("failed to parse the certificate for key: %s (%v)", gun, err)
}
return cert, nil
}

View File

@ -1,162 +0,0 @@
package cryptoservice
import (
"crypto/x509"
"encoding/pem"
"errors"
"fmt"
"github.com/sirupsen/logrus"
"github.com/theupdateframework/notary"
"github.com/theupdateframework/notary/trustmanager"
"github.com/theupdateframework/notary/tuf/data"
"github.com/theupdateframework/notary/tuf/utils"
)
var (
// ErrNoValidPrivateKey is returned if a key being imported doesn't
// look like a private key
ErrNoValidPrivateKey = errors.New("no valid private key found")
// ErrRootKeyNotEncrypted is returned if a root key being imported is
// unencrypted
ErrRootKeyNotEncrypted = errors.New("only encrypted root keys may be imported")
// EmptyService is an empty crypto service
EmptyService = NewCryptoService()
)
// CryptoService implements Sign and Create, holding a specific GUN and keystore to
// operate on
type CryptoService struct {
keyStores []trustmanager.KeyStore
}
// NewCryptoService returns an instance of CryptoService
func NewCryptoService(keyStores ...trustmanager.KeyStore) *CryptoService {
return &CryptoService{keyStores: keyStores}
}
// Create is used to generate keys for targets, snapshots and timestamps
func (cs *CryptoService) Create(role data.RoleName, gun data.GUN, algorithm string) (data.PublicKey, error) {
if algorithm == data.RSAKey {
return nil, fmt.Errorf("%s keys can only be imported", data.RSAKey)
}
privKey, err := utils.GenerateKey(algorithm)
if err != nil {
return nil, fmt.Errorf("failed to generate %s key: %v", algorithm, err)
}
logrus.Debugf("generated new %s key for role: %s and keyID: %s", algorithm, role.String(), privKey.ID())
pubKey := data.PublicKeyFromPrivate(privKey)
return pubKey, cs.AddKey(role, gun, privKey)
}
// GetPrivateKey returns a private key and role if present by ID.
func (cs *CryptoService) GetPrivateKey(keyID string) (k data.PrivateKey, role data.RoleName, err error) {
for _, ks := range cs.keyStores {
if k, role, err = ks.GetKey(keyID); err == nil {
return
}
switch err.(type) {
case trustmanager.ErrPasswordInvalid, trustmanager.ErrAttemptsExceeded:
return
default:
continue
}
}
return // returns whatever the final values were
}
// GetKey returns a key by ID
func (cs *CryptoService) GetKey(keyID string) data.PublicKey {
privKey, _, err := cs.GetPrivateKey(keyID)
if err != nil {
return nil
}
return data.PublicKeyFromPrivate(privKey)
}
// GetKeyInfo returns role and GUN info of a key by ID
func (cs *CryptoService) GetKeyInfo(keyID string) (trustmanager.KeyInfo, error) {
for _, store := range cs.keyStores {
if info, err := store.GetKeyInfo(keyID); err == nil {
return info, nil
}
}
return trustmanager.KeyInfo{}, fmt.Errorf("Could not find info for keyID %s", keyID)
}
// RemoveKey deletes a key by ID
func (cs *CryptoService) RemoveKey(keyID string) (err error) {
for _, ks := range cs.keyStores {
ks.RemoveKey(keyID)
}
return // returns whatever the final values were
}
// AddKey adds a private key to a specified role.
// The GUN is inferred from the cryptoservice itself for non-root roles
func (cs *CryptoService) AddKey(role data.RoleName, gun data.GUN, key data.PrivateKey) (err error) {
// First check if this key already exists in any of our keystores
for _, ks := range cs.keyStores {
if keyInfo, err := ks.GetKeyInfo(key.ID()); err == nil {
if keyInfo.Role != role {
return fmt.Errorf("key with same ID already exists for role: %s", keyInfo.Role.String())
}
logrus.Debugf("key with same ID %s and role %s already exists", key.ID(), keyInfo.Role.String())
return nil
}
}
// If the key didn't exist in any of our keystores, add and return on the first successful keystore
for _, ks := range cs.keyStores {
// Try to add to this keystore, return if successful
if err = ks.AddKey(trustmanager.KeyInfo{Role: role, Gun: gun}, key); err == nil {
return nil
}
}
return // returns whatever the final values were
}
// ListKeys returns a list of key IDs valid for the given role
func (cs *CryptoService) ListKeys(role data.RoleName) []string {
var res []string
for _, ks := range cs.keyStores {
for k, r := range ks.ListKeys() {
if r.Role == role {
res = append(res, k)
}
}
}
return res
}
// ListAllKeys returns a map of key IDs to role
func (cs *CryptoService) ListAllKeys() map[string]data.RoleName {
res := make(map[string]data.RoleName)
for _, ks := range cs.keyStores {
for k, r := range ks.ListKeys() {
res[k] = r.Role // keys are content addressed so don't care about overwrites
}
}
return res
}
// CheckRootKeyIsEncrypted makes sure the root key is encrypted. We have
// internal assumptions that depend on this.
func CheckRootKeyIsEncrypted(pemBytes []byte) error {
block, _ := pem.Decode(pemBytes)
if block == nil {
return ErrNoValidPrivateKey
}
if block.Type == "ENCRYPTED PRIVATE KEY" {
return nil
}
if !notary.FIPSEnabled() && x509.IsEncryptedPEMBlock(block) {
return nil
}
return ErrRootKeyNotEncrypted
}

View File

@ -1,60 +0,0 @@
version: "2"
services:
server:
build:
context: .
dockerfile: server.Dockerfile
networks:
mdb:
sig:
srv:
aliases:
- notary-server
entrypoint: /usr/bin/env sh
command: -c "./migrations/migrate.sh && notary-server -config=fixtures/server-config.json"
depends_on:
- mysql
- signer
signer:
build:
context: .
dockerfile: signer.Dockerfile
networks:
mdb:
sig:
aliases:
- notarysigner
entrypoint: /usr/bin/env sh
command: -c "./migrations/migrate.sh && notary-signer -config=fixtures/signer-config.json"
depends_on:
- mysql
mysql:
networks:
- mdb
volumes:
- ./notarysql/mysql-initdb.d:/docker-entrypoint-initdb.d
image: mariadb:10.4
environment:
- TERM=dumb
- MYSQL_ALLOW_EMPTY_PASSWORD="true"
command: mysqld --innodb_file_per_table
client:
build:
context: .
dockerfile: Dockerfile
env_file: buildscripts/env.list
command: buildscripts/testclient.py
volumes:
- ./test_output:/test_output
networks:
- mdb
- srv
depends_on:
- server
networks:
mdb:
external: false
sig:
external: false
srv:
external: false

View File

@ -1,63 +0,0 @@
version: "2"
services:
server:
build:
context: .
dockerfile: server.Dockerfile
networks:
mdb:
sig:
srv:
aliases:
- notary-server
entrypoint: /usr/bin/env sh
command: -c "./migrations/migrate.sh && notary-server -config=fixtures/server-config.postgres.json"
environment:
MIGRATIONS_PATH: migrations/server/postgresql
DB_URL: postgres://server@postgresql:5432/notaryserver?sslmode=verify-ca&sslrootcert=/go/src/github.com/theupdateframework/notary/fixtures/database/ca.pem&sslcert=/go/src/github.com/theupdateframework/notary/fixtures/database/notary-server.pem&sslkey=/go/src/github.com/theupdateframework/notary/fixtures/database/notary-server-key.pem
depends_on:
- postgresql
- signer
signer:
build:
context: .
dockerfile: signer.Dockerfile
networks:
mdb:
sig:
aliases:
- notarysigner
entrypoint: /usr/bin/env sh
command: -c "./migrations/migrate.sh && notary-signer -config=fixtures/signer-config.postgres.json"
environment:
MIGRATIONS_PATH: migrations/signer/postgresql
DB_URL: postgres://signer@postgresql:5432/notarysigner?sslmode=verify-ca&sslrootcert=/go/src/github.com/theupdateframework/notary/fixtures/database/ca.pem&sslcert=/go/src/github.com/theupdateframework/notary/fixtures/database/notary-signer.pem&sslkey=/go/src/github.com/theupdateframework/notary/fixtures/database/notary-signer-key.pem
depends_on:
- postgresql
postgresql:
image: postgres:9.5.4
networks:
- mdb
volumes:
- ./notarysql/postgresql-initdb.d:/docker-entrypoint-initdb.d
command: -l
client:
build:
context: .
dockerfile: Dockerfile
env_file: buildscripts/env.list
command: buildscripts/testclient.py
volumes:
- ./test_output:/test_output
networks:
- mdb
- srv
depends_on:
- server
networks:
mdb:
external: false
sig:
external: false
srv:
external: false

View File

@ -1,110 +0,0 @@
version: "2"
services:
server:
build:
context: .
dockerfile: server.Dockerfile
volumes:
- ./fixtures/rethinkdb:/tls
networks:
- rdb
links:
- rdb-proxy:rdb-proxy.rdb
- signer
ports:
- "8080"
- "4443:4443"
entrypoint: /usr/bin/env sh
command: -c "sh migrations/rethink_migrate.sh && notary-server -config=fixtures/server-config.rethink.json"
depends_on:
- rdb-proxy
signer:
build:
context: .
dockerfile: signer.Dockerfile
volumes:
- ./fixtures/rethinkdb:/tls
networks:
rdb:
aliases:
- notarysigner
links:
- rdb-proxy:rdb-proxy.rdb
entrypoint: /usr/bin/env sh
command: -c "sh migrations/rethink_migrate.sh && notary-signer -config=fixtures/signer-config.rethink.json"
depends_on:
- rdb-proxy
rdb-01:
image: jlhawn/rethinkdb:2.3.4
volumes:
- ./fixtures/rethinkdb:/tls
- rdb-01-data:/var/data
networks:
rdb:
aliases:
- rdb
- rdb.rdb
- rdb-01.rdb
command: "--bind all --no-http-admin --server-name rdb_01 --canonical-address rdb-01.rdb --directory /var/data/rethinkdb --join rdb.rdb --driver-tls-ca /tls/ca.pem --driver-tls-key /tls/key.pem --driver-tls-cert /tls/cert.pem --cluster-tls-key /tls/key.pem --cluster-tls-cert /tls/cert.pem --cluster-tls-ca /tls/ca.pem"
rdb-02:
image: jlhawn/rethinkdb:2.3.4
volumes:
- ./fixtures/rethinkdb:/tls
- rdb-02-data:/var/data
networks:
rdb:
aliases:
- rdb
- rdb.rdb
- rdb-02.rdb
command: "--bind all --no-http-admin --server-name rdb_02 --canonical-address rdb-02.rdb --directory /var/data/rethinkdb --join rdb.rdb --driver-tls-ca /tls/ca.pem --driver-tls-key /tls/key.pem --driver-tls-cert /tls/cert.pem --cluster-tls-key /tls/key.pem --cluster-tls-cert /tls/cert.pem --cluster-tls-ca /tls/ca.pem"
rdb-03:
image: jlhawn/rethinkdb:2.3.4
volumes:
- ./fixtures/rethinkdb:/tls
- rdb-03-data:/var/data
networks:
rdb:
aliases:
- rdb
- rdb.rdb
- rdb-03.rdb
command: "--bind all --no-http-admin --server-name rdb_03 --canonical-address rdb-03.rdb --directory /var/data/rethinkdb --join rdb.rdb --driver-tls-ca /tls/ca.pem --driver-tls-key /tls/key.pem --driver-tls-cert /tls/cert.pem --cluster-tls-key /tls/key.pem --cluster-tls-cert /tls/cert.pem --cluster-tls-ca /tls/ca.pem"
rdb-proxy:
image: jlhawn/rethinkdb:2.3.4
ports:
- "8080:8080"
volumes:
- ./fixtures/rethinkdb:/tls
networks:
rdb:
aliases:
- rdb-proxy
- rdb-proxy.rdp
command: "proxy --bind all --join rdb.rdb --driver-tls-ca /tls/ca.pem --driver-tls-key /tls/key.pem --driver-tls-cert /tls/cert.pem --cluster-tls-key /tls/key.pem --cluster-tls-cert /tls/cert.pem --cluster-tls-ca /tls/ca.pem"
depends_on:
- rdb-01
- rdb-02
- rdb-03
client:
build:
context: .
dockerfile: Dockerfile
volumes:
- ./test_output:/test_output
networks:
- rdb
env_file: buildscripts/env.list
links:
- server:notary-server
command: buildscripts/testclient.py
volumes:
rdb-01-data:
external: false
rdb-02-data:
external: false
rdb-03-data:
external: false
networks:
rdb:
external: false

View File

@ -1,54 +0,0 @@
version: "2"
services:
server:
build:
context: .
dockerfile: server.Dockerfile
networks:
- mdb
- sig
ports:
- "8080"
- "4443:4443"
entrypoint: /usr/bin/env sh
command: -c "./migrations/migrate.sh && notary-server -config=fixtures/server-config.postgres.json"
environment:
MIGRATIONS_PATH: migrations/server/postgresql
DB_URL: postgres://server@postgresql:5432/notaryserver?sslmode=verify-ca&sslrootcert=/go/src/github.com/theupdateframework/notary/fixtures/database/ca.pem&sslcert=/go/src/github.com/theupdateframework/notary/fixtures/database/notary-server.pem&sslkey=/go/src/github.com/theupdateframework/notary/fixtures/database/notary-server-key.pem
depends_on:
- postgresql
- signer
signer:
build:
context: .
dockerfile: signer.Dockerfile
networks:
mdb:
sig:
aliases:
- notarysigner
entrypoint: /usr/bin/env sh
command: -c "./migrations/migrate.sh && notary-signer -config=fixtures/signer-config.postgres.json"
environment:
MIGRATIONS_PATH: migrations/signer/postgresql
DB_URL: postgres://signer@postgresql:5432/notarysigner?sslmode=verify-ca&sslrootcert=/go/src/github.com/theupdateframework/notary/fixtures/database/ca.pem&sslcert=/go/src/github.com/theupdateframework/notary/fixtures/database/notary-signer.pem&sslkey=/go/src/github.com/theupdateframework/notary/fixtures/database/notary-signer-key.pem
depends_on:
- postgresql
postgresql:
image: postgres:9.5.4
networks:
- mdb
volumes:
- ./notarysql/postgresql-initdb.d:/docker-entrypoint-initdb.d
- notary_data:/var/lib/postgresql
ports:
- 5432:5432
command: -l
volumes:
notary_data:
external: false
networks:
mdb:
external: false
sig:
external: false

View File

@ -1,96 +0,0 @@
version: "2"
services:
server:
build:
context: .
dockerfile: server.Dockerfile
volumes:
- ./fixtures/rethinkdb:/tls
networks:
- rdb
links:
- rdb-proxy:rdb-proxy.rdb
- signer
ports:
- "4443:4443"
entrypoint: /usr/bin/env sh
command: -c "sh migrations/rethink_migrate.sh && notary-server -config=fixtures/server-config.rethink.json"
depends_on:
- rdb-proxy
signer:
build:
context: .
dockerfile: signer.Dockerfile
volumes:
- ./fixtures/rethinkdb:/tls
networks:
rdb:
aliases:
- notarysigner
links:
- rdb-proxy:rdb-proxy.rdb
entrypoint: /usr/bin/env sh
command: -c "sh migrations/rethink_migrate.sh && notary-signer -config=fixtures/signer-config.rethink.json"
depends_on:
- rdb-proxy
rdb-01:
image: jlhawn/rethinkdb:2.3.4
volumes:
- ./fixtures/rethinkdb:/tls
- rdb-01-data:/var/data
networks:
rdb:
aliases:
- rdb-01.rdb
command: "--bind all --no-http-admin --server-name rdb_01 --canonical-address rdb-01.rdb --directory /var/data/rethinkdb --driver-tls-ca /tls/ca.pem --driver-tls-key /tls/key.pem --driver-tls-cert /tls/cert.pem --cluster-tls-key /tls/key.pem --cluster-tls-cert /tls/cert.pem --cluster-tls-ca /tls/ca.pem"
rdb-02:
image: jlhawn/rethinkdb:2.3.4
volumes:
- ./fixtures/rethinkdb:/tls
- rdb-02-data:/var/data
networks:
rdb:
aliases:
- rdb-02.rdb
command: "--bind all --no-http-admin --server-name rdb_02 --canonical-address rdb-02.rdb --directory /var/data/rethinkdb --join rdb-01 --driver-tls-ca /tls/ca.pem --driver-tls-key /tls/key.pem --driver-tls-cert /tls/cert.pem --cluster-tls-key /tls/key.pem --cluster-tls-cert /tls/cert.pem --cluster-tls-ca /tls/ca.pem"
depends_on:
- rdb-01
rdb-03:
image: jlhawn/rethinkdb:2.3.4
volumes:
- ./fixtures/rethinkdb:/tls
- rdb-03-data:/var/data
networks:
rdb:
aliases:
- rdb-03.rdb
command: "--bind all --no-http-admin --server-name rdb_03 --canonical-address rdb-03.rdb --directory /var/data/rethinkdb --join rdb-02 --driver-tls-ca /tls/ca.pem --driver-tls-key /tls/key.pem --driver-tls-cert /tls/cert.pem --cluster-tls-key /tls/key.pem --cluster-tls-cert /tls/cert.pem --cluster-tls-ca /tls/ca.pem"
depends_on:
- rdb-01
- rdb-02
rdb-proxy:
image: jlhawn/rethinkdb:2.3.4
ports:
- "8080:8080"
volumes:
- ./fixtures/rethinkdb:/tls
networks:
rdb:
aliases:
- rdb-proxy
- rdb-proxy.rdp
command: "proxy --bind all --join rdb-03 --driver-tls-ca /tls/ca.pem --driver-tls-key /tls/key.pem --driver-tls-cert /tls/cert.pem --cluster-tls-key /tls/key.pem --cluster-tls-cert /tls/cert.pem --cluster-tls-ca /tls/ca.pem"
depends_on:
- rdb-01
- rdb-02
- rdb-03
volumes:
rdb-01-data:
external: false
rdb-02-data:
external: false
rdb-03-data:
external: false
networks:
rdb:
external: false

View File

@ -1,49 +0,0 @@
version: "2"
services:
server:
build:
context: .
dockerfile: server.Dockerfile
networks:
- mdb
- sig
ports:
- "8080"
- "4443:4443"
entrypoint: /usr/bin/env sh
command: -c "./migrations/migrate.sh && notary-server -config=fixtures/server-config.json"
depends_on:
- mysql
- signer
signer:
build:
context: .
dockerfile: signer.Dockerfile
networks:
mdb:
sig:
aliases:
- notarysigner
entrypoint: /usr/bin/env sh
command: -c "./migrations/migrate.sh && notary-signer -config=fixtures/signer-config.json"
depends_on:
- mysql
mysql:
networks:
- mdb
volumes:
- ./notarysql/mysql-initdb.d:/docker-entrypoint-initdb.d
- notary_data:/var/lib/mysql
image: mariadb:10.4
environment:
- TERM=dumb
- MYSQL_ALLOW_EMPTY_PASSWORD="true"
command: mysqld --innodb_file_per_table
volumes:
notary_data:
external: false
networks:
mdb:
external: false
sig:
external: false

View File

@ -1,17 +0,0 @@
FROM golang:1.14.1-alpine
ENV NOTARYPKG github.com/theupdateframework/notary
ENV GO111MODULE=on
# Copy the local repo to the expected go path
COPY . /go/src/${NOTARYPKG}
WORKDIR /go/src/${NOTARYPKG}
EXPOSE 4450
# Install escrow
RUN go install ${NOTARYPKG}/cmd/escrow
ENTRYPOINT [ "escrow" ]
CMD [ "-config=cmd/escrow/config.toml" ]

View File

@ -1,14 +0,0 @@
package notary
import (
"crypto"
// Need to import md5 so can test availability.
_ "crypto/md5" // #nosec
)
// FIPSEnabled returns true if running in FIPS mode.
// If compiled in FIPS mode the md5 hash function is never available
// even when imported. This seems to be the best test we have for it.
func FIPSEnabled() bool {
return !crypto.MD5.Available()
}

View File

@ -1,12 +0,0 @@
package notary
// PassRetriever is a callback function that should retrieve a passphrase
// for a given named key. If it should be treated as new passphrase (e.g. with
// confirmation), createNew will be true. Attempts is passed in so that implementers
// decide how many chances to give to a human, for example.
type PassRetriever func(keyName, alias string, createNew bool, attempts int) (passphrase string, giveup bool, err error)
// CtxKey is a wrapper type for use in context.WithValue() to satisfy golint
// https://github.com/golang/go/issues/17293
// https://github.com/golang/lint/pull/245
type CtxKey int

View File

@ -1,210 +0,0 @@
// Package passphrase is a utility function for managing passphrase
// for TUF and Notary keys.
package passphrase
import (
"bufio"
"errors"
"fmt"
"io"
"os"
"path/filepath"
"strings"
"github.com/theupdateframework/notary"
"golang.org/x/term"
)
const (
idBytesToDisplay = 7
tufRootAlias = "root"
tufRootKeyGenerationWarning = `You are about to create a new root signing key passphrase. This passphrase
will be used to protect the most sensitive key in your signing system. Please
choose a long, complex passphrase and be careful to keep the password and the
key file itself secure and backed up. It is highly recommended that you use a
password manager to generate the passphrase and keep it safe. There will be no
way to recover this key. You can find the key in your config directory.`
)
var (
// ErrTooShort is returned if the passphrase entered for a new key is
// below the minimum length
ErrTooShort = errors.New("Passphrase too short")
// ErrDontMatch is returned if the two entered passphrases don't match.
// new key is below the minimum length
ErrDontMatch = errors.New("The entered passphrases do not match")
// ErrTooManyAttempts is returned if the maximum number of passphrase
// entry attempts is reached.
ErrTooManyAttempts = errors.New("Too many attempts")
// ErrNoInput is returned if we do not have a valid input method for passphrases
ErrNoInput = errors.New("Please either use environment variables or STDIN with a terminal to provide key passphrases")
)
// PromptRetriever returns a new Retriever which will provide a prompt on stdin
// and stdout to retrieve a passphrase. stdin will be checked if it is a terminal,
// else the PromptRetriever will error when attempting to retrieve a passphrase.
// Upon successful passphrase retrievals, the passphrase will be cached such that
// subsequent prompts will produce the same passphrase.
func PromptRetriever() notary.PassRetriever {
if !term.IsTerminal(int(os.Stdin.Fd())) {
return func(string, string, bool, int) (string, bool, error) {
return "", false, ErrNoInput
}
}
return PromptRetrieverWithInOut(os.Stdin, os.Stdout, nil)
}
type boundRetriever struct {
in io.Reader
out io.Writer
aliasMap map[string]string
passphraseCache map[string]string
}
func (br *boundRetriever) getPassphrase(keyName, alias string, createNew bool, numAttempts int) (string, bool, error) {
if numAttempts == 0 {
if alias == tufRootAlias && createNew {
fmt.Fprintln(br.out, tufRootKeyGenerationWarning)
}
if pass, ok := br.passphraseCache[alias]; ok {
return pass, false, nil
}
} else if !createNew { // per `if`, numAttempts > 0 if we're at this `else`
if numAttempts > 3 {
return "", true, ErrTooManyAttempts
}
fmt.Fprintln(br.out, "Passphrase incorrect. Please retry.")
}
// passphrase not cached and we're not aborting, get passphrase from user!
return br.requestPassphrase(keyName, alias, createNew, numAttempts)
}
func (br *boundRetriever) requestPassphrase(keyName, alias string, createNew bool, numAttempts int) (string, bool, error) {
// Figure out if we should display a different string for this alias
displayAlias := alias
if val, ok := br.aliasMap[alias]; ok {
displayAlias = val
}
indexOfLastSeparator := strings.LastIndex(keyName, string(filepath.Separator))
if indexOfLastSeparator == -1 {
indexOfLastSeparator = 0
}
var shortName string
if len(keyName) > indexOfLastSeparator+idBytesToDisplay {
if indexOfLastSeparator > 0 {
keyNamePrefix := keyName[:indexOfLastSeparator]
keyNameID := keyName[indexOfLastSeparator+1 : indexOfLastSeparator+idBytesToDisplay+1]
shortName = keyNameID + " (" + keyNamePrefix + ")"
} else {
shortName = keyName[indexOfLastSeparator : indexOfLastSeparator+idBytesToDisplay]
}
}
withID := fmt.Sprintf(" with ID %s", shortName)
if shortName == "" {
withID = ""
}
switch {
case createNew:
fmt.Fprintf(br.out, "Enter passphrase for new %s key%s: ", displayAlias, withID)
case displayAlias == "yubikey":
fmt.Fprintf(br.out, "Enter the %s for the attached Yubikey: ", keyName)
default:
fmt.Fprintf(br.out, "Enter passphrase for %s key%s: ", displayAlias, withID)
}
stdin := bufio.NewReader(br.in)
passphrase, err := GetPassphrase(stdin)
fmt.Fprintln(br.out)
if err != nil {
return "", false, err
}
retPass := strings.TrimSpace(string(passphrase))
if createNew {
err = br.verifyAndConfirmPassword(stdin, retPass, displayAlias, withID)
if err != nil {
return "", false, err
}
}
br.cachePassword(alias, retPass)
return retPass, false, nil
}
func (br *boundRetriever) verifyAndConfirmPassword(stdin *bufio.Reader, retPass, displayAlias, withID string) error {
if len(retPass) < 8 {
fmt.Fprintln(br.out, "Passphrase is too short. Please use a password manager to generate and store a good random passphrase.")
return ErrTooShort
}
fmt.Fprintf(br.out, "Repeat passphrase for new %s key%s: ", displayAlias, withID)
confirmation, err := GetPassphrase(stdin)
fmt.Fprintln(br.out)
if err != nil {
return err
}
confirmationStr := strings.TrimSpace(string(confirmation))
if retPass != confirmationStr {
fmt.Fprintln(br.out, "Passphrases do not match. Please retry.")
return ErrDontMatch
}
return nil
}
func (br *boundRetriever) cachePassword(alias, retPass string) {
br.passphraseCache[alias] = retPass
}
// PromptRetrieverWithInOut returns a new Retriever which will provide a
// prompt using the given in and out readers. The passphrase will be cached
// such that subsequent prompts will produce the same passphrase.
// aliasMap can be used to specify display names for TUF key aliases. If aliasMap
// is nil, a sensible default will be used.
func PromptRetrieverWithInOut(in io.Reader, out io.Writer, aliasMap map[string]string) notary.PassRetriever {
bound := &boundRetriever{
in: in,
out: out,
aliasMap: aliasMap,
passphraseCache: make(map[string]string),
}
return bound.getPassphrase
}
// ConstantRetriever returns a new Retriever which will return a constant string
// as a passphrase.
func ConstantRetriever(constantPassphrase string) notary.PassRetriever {
return func(k, a string, c bool, n int) (string, bool, error) {
return constantPassphrase, false, nil
}
}
// GetPassphrase get the passphrase from bufio.Reader or from terminal.
// If typing on the terminal, we disable terminal to echo the passphrase.
func GetPassphrase(in *bufio.Reader) ([]byte, error) {
var (
passphrase []byte
err error
)
if term.IsTerminal(int(os.Stdin.Fd())) {
passphrase, err = term.ReadPassword(int(os.Stdin.Fd()))
} else {
passphrase, err = in.ReadBytes('\n')
}
return passphrase, err
}

View File

@ -1,30 +0,0 @@
FROM golang:1.14.1-alpine
RUN apk add --update git gcc libc-dev
ENV GO111MODULE=on
ARG MIGRATE_VER=v4.6.2
RUN go get -tags 'mysql postgres file' github.com/golang-migrate/migrate/v4/cli@${MIGRATE_VER} && mv /go/bin/cli /go/bin/migrate
ENV GOFLAGS=-mod=vendor
ENV NOTARYPKG github.com/theupdateframework/notary
# Copy the local repo to the expected go path
COPY . /go/src/${NOTARYPKG}
WORKDIR /go/src/${NOTARYPKG}
RUN chmod 0600 ./fixtures/database/*
ENV SERVICE_NAME=notary_server
EXPOSE 4443
# Install notary-server
RUN go install \
-tags pkcs11 \
-ldflags "-w -X ${NOTARYPKG}/version.GitCommit=`git rev-parse --short HEAD` -X ${NOTARYPKG}/version.NotaryVersion=`cat NOTARY_VERSION`" \
${NOTARYPKG}/cmd/notary-server && apk del git gcc libc-dev && rm -rf /var/cache/apk/*
ENTRYPOINT [ "notary-server" ]
CMD [ "-config=fixtures/server-config-local.json" ]

View File

@ -1,42 +0,0 @@
FROM golang:1.14.1-alpine AS build-env
RUN apk add --update git gcc libc-dev
ENV GO111MODULE=on
ARG MIGRATE_VER=v4.6.2
RUN go get -tags 'mysql postgres file' github.com/golang-migrate/migrate/v4/cli@${MIGRATE_VER} && mv /go/bin/cli /go/bin/migrate
ENV GOFLAGS=-mod=vendor
ENV NOTARYPKG github.com/theupdateframework/notary
# Copy the local repo to the expected go path
COPY . /go/src/${NOTARYPKG}
WORKDIR /go/src/${NOTARYPKG}
# Build notary-server
RUN go install \
-tags pkcs11 \
-ldflags "-w -X ${NOTARYPKG}/version.GitCommit=`git rev-parse --short HEAD` -X ${NOTARYPKG}/version.NotaryVersion=`cat NOTARY_VERSION`" \
${NOTARYPKG}/cmd/notary-server
FROM busybox:latest
# the ln is for compatibility with the docker-compose.yml, making these
# images a straight swap for the those built in the compose file.
RUN mkdir -p /usr/bin /var/lib && ln -s /bin/env /usr/bin/env
COPY --from=build-env /go/bin/notary-server /usr/bin/notary-server
COPY --from=build-env /go/bin/migrate /usr/bin/migrate
COPY --from=build-env /lib/ld-musl-x86_64.so.1 /lib/ld-musl-x86_64.so.1
COPY --from=build-env /go/src/github.com/theupdateframework/notary/migrations/ /var/lib/notary/migrations
COPY --from=build-env /go/src/github.com/theupdateframework/notary/fixtures /var/lib/notary/fixtures
RUN chmod 0600 /var/lib/notary/fixtures/database/*
WORKDIR /var/lib/notary
# SERVICE_NAME needed for migration script
ENV SERVICE_NAME=notary_server
EXPOSE 4443
ENTRYPOINT [ "/usr/bin/notary-server" ]
CMD [ "-config=/var/lib/notary/fixtures/server-config-local.json" ]

View File

@ -1,31 +0,0 @@
FROM golang:1.14.1-alpine
RUN apk add --update git gcc libc-dev
ENV GO111MODULE=on
ARG MIGRATE_VER=v4.6.2
RUN go get -tags 'mysql postgres file' github.com/golang-migrate/migrate/v4/cli@${MIGRATE_VER} && mv /go/bin/cli /go/bin/migrate
ENV GOFLAGS=-mod=vendor
ENV NOTARYPKG github.com/theupdateframework/notary
# Copy the local repo to the expected go path
COPY . /go/src/${NOTARYPKG}
WORKDIR /go/src/${NOTARYPKG}
RUN chmod 0600 ./fixtures/database/*
ENV SERVICE_NAME=notary_signer
ENV NOTARY_SIGNER_DEFAULT_ALIAS="timestamp_1"
ENV NOTARY_SIGNER_TIMESTAMP_1="testpassword"
# Install notary-signer
RUN go install \
-tags pkcs11 \
-ldflags "-w -X ${NOTARYPKG}/version.GitCommit=`git rev-parse --short HEAD` -X ${NOTARYPKG}/version.NotaryVersion=`cat NOTARY_VERSION`" \
${NOTARYPKG}/cmd/notary-signer && apk del git gcc libc-dev && rm -rf /var/cache/apk/*
ENTRYPOINT [ "notary-signer" ]
CMD [ "-config=fixtures/signer-config-local.json" ]

View File

@ -1,44 +0,0 @@
FROM golang:1.14.1-alpine AS build-env
RUN apk add --update git gcc libc-dev
ENV GO111MODULE=on
ARG MIGRATE_VER=v4.6.2
RUN go get -tags 'mysql postgres file' github.com/golang-migrate/migrate/v4/cli@${MIGRATE_VER} && mv /go/bin/cli /go/bin/migrate
ENV GOFLAGS=-mod=vendor
ENV NOTARYPKG github.com/theupdateframework/notary
# Copy the local repo to the expected go path
COPY . /go/src/${NOTARYPKG}
WORKDIR /go/src/${NOTARYPKG}
# Build notary-signer
RUN go install \
-tags pkcs11 \
-ldflags "-w -X ${NOTARYPKG}/version.GitCommit=`git rev-parse --short HEAD` -X ${NOTARYPKG}/version.NotaryVersion=`cat NOTARY_VERSION`" \
${NOTARYPKG}/cmd/notary-signer
FROM busybox:latest
# the ln is for compatibility with the docker-compose.yml, making these
# images a straight swap for the those built in the compose file.
RUN mkdir -p /usr/bin /var/lib && ln -s /bin/env /usr/bin/env
COPY --from=build-env /go/bin/notary-signer /usr/bin/notary-signer
COPY --from=build-env /go/bin/migrate /usr/bin/migrate
COPY --from=build-env /lib/ld-musl-x86_64.so.1 /lib/ld-musl-x86_64.so.1
COPY --from=build-env /go/src/github.com/theupdateframework/notary/migrations/ /var/lib/notary/migrations
COPY --from=build-env /go/src/github.com/theupdateframework/notary/fixtures /var/lib/notary/fixtures
RUN chmod 0600 /var/lib/notary/fixtures/database/*
WORKDIR /var/lib/notary
# SERVICE_NAME needed for migration script
ENV SERVICE_NAME=notary_signer
ENV NOTARY_SIGNER_DEFAULT_ALIAS="timestamp_1"
ENV NOTARY_SIGNER_TIMESTAMP_1="testpassword"
ENTRYPOINT [ "/usr/bin/notary-signer" ]
CMD [ "-config=/var/lib/notary/fixtures/signer-config-local.json" ]

View File

@ -1,22 +0,0 @@
package storage
import (
"errors"
"fmt"
)
var (
// ErrPathOutsideStore indicates that the returned path would be
// outside the store
ErrPathOutsideStore = errors.New("path outside file store")
)
// ErrMetaNotFound indicates we did not find a particular piece
// of metadata in the store
type ErrMetaNotFound struct {
Resource string
}
func (err ErrMetaNotFound) Error() string {
return fmt.Sprintf("%s trust data unavailable. Has a notary repository been initialized?", err.Resource)
}

View File

@ -1,278 +0,0 @@
package storage
import (
"bytes"
"encoding/pem"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"strings"
"github.com/sirupsen/logrus"
"github.com/theupdateframework/notary"
)
// NewFileStore creates a fully configurable file store
func NewFileStore(baseDir, fileExt string) (*FilesystemStore, error) {
baseDir = filepath.Clean(baseDir)
if err := createDirectory(baseDir, notary.PrivExecPerms); err != nil {
return nil, err
}
if !strings.HasPrefix(fileExt, ".") {
fileExt = "." + fileExt
}
return &FilesystemStore{
baseDir: baseDir,
ext: fileExt,
}, nil
}
// NewPrivateKeyFileStorage initializes a new filestore for private keys, appending
// the notary.PrivDir to the baseDir.
func NewPrivateKeyFileStorage(baseDir, fileExt string) (*FilesystemStore, error) {
baseDir = filepath.Join(baseDir, notary.PrivDir)
myStore, err := NewFileStore(baseDir, fileExt)
myStore.migrateTo0Dot4()
return myStore, err
}
// NewPrivateSimpleFileStore is a wrapper to create an owner readable/writeable
// _only_ filestore
func NewPrivateSimpleFileStore(baseDir, fileExt string) (*FilesystemStore, error) {
return NewFileStore(baseDir, fileExt)
}
// FilesystemStore is a store in a locally accessible directory
type FilesystemStore struct {
baseDir string
ext string
}
func (f *FilesystemStore) moveKeyTo0Dot4Location(file string) {
keyID := filepath.Base(file)
fileDir := filepath.Dir(file)
d, _ := f.Get(file)
block, _ := pem.Decode(d)
if block == nil {
logrus.Warn("Key data for", file, "could not be decoded as a valid PEM block. The key will not been migrated and may not be available")
return
}
fileDir = strings.TrimPrefix(fileDir, notary.RootKeysSubdir)
fileDir = strings.TrimPrefix(fileDir, notary.NonRootKeysSubdir)
if fileDir != "" {
block.Headers["gun"] = filepath.ToSlash(fileDir[1:])
}
if strings.Contains(keyID, "_") {
role := strings.Split(keyID, "_")[1]
keyID = strings.TrimSuffix(keyID, "_"+role)
block.Headers["role"] = role
}
var keyPEM bytes.Buffer
// since block came from decoding the PEM bytes in the first place, and all we're doing is adding some headers we ignore the possibility of an error while encoding the block
pem.Encode(&keyPEM, block)
f.Set(keyID, keyPEM.Bytes())
}
func (f *FilesystemStore) migrateTo0Dot4() {
rootKeysSubDir := filepath.Clean(filepath.Join(f.Location(), notary.RootKeysSubdir))
nonRootKeysSubDir := filepath.Clean(filepath.Join(f.Location(), notary.NonRootKeysSubdir))
if _, err := os.Stat(rootKeysSubDir); !os.IsNotExist(err) && f.Location() != rootKeysSubDir {
if rootKeysSubDir == "" || rootKeysSubDir == "/" {
// making sure we don't remove a user's homedir
logrus.Warn("The directory for root keys is an unsafe value, we are not going to delete the directory. Please delete it manually")
} else {
// root_keys exists, migrate things from it
listOnlyRootKeysDirStore, _ := NewFileStore(rootKeysSubDir, f.ext)
for _, file := range listOnlyRootKeysDirStore.ListFiles() {
f.moveKeyTo0Dot4Location(filepath.Join(notary.RootKeysSubdir, file))
}
// delete the old directory
os.RemoveAll(rootKeysSubDir)
}
}
if _, err := os.Stat(nonRootKeysSubDir); !os.IsNotExist(err) && f.Location() != nonRootKeysSubDir {
if nonRootKeysSubDir == "" || nonRootKeysSubDir == "/" {
// making sure we don't remove a user's homedir
logrus.Warn("The directory for non root keys is an unsafe value, we are not going to delete the directory. Please delete it manually")
} else {
// tuf_keys exists, migrate things from it
listOnlyNonRootKeysDirStore, _ := NewFileStore(nonRootKeysSubDir, f.ext)
for _, file := range listOnlyNonRootKeysDirStore.ListFiles() {
f.moveKeyTo0Dot4Location(filepath.Join(notary.NonRootKeysSubdir, file))
}
// delete the old directory
os.RemoveAll(nonRootKeysSubDir)
}
}
// if we have a trusted_certificates folder, let's delete for a complete migration since it is unused by new clients
certsSubDir := filepath.Join(f.Location(), "trusted_certificates")
if certsSubDir == "" || certsSubDir == "/" {
logrus.Warn("The directory for trusted certificate is an unsafe value, we are not going to delete the directory. Please delete it manually")
} else {
os.RemoveAll(certsSubDir)
}
}
func (f *FilesystemStore) getPath(name string) (string, error) {
fileName := fmt.Sprintf("%s%s", name, f.ext)
fullPath := filepath.Join(f.baseDir, fileName)
if !strings.HasPrefix(fullPath, f.baseDir) {
return "", ErrPathOutsideStore
}
return fullPath, nil
}
// GetSized returns the meta for the given name (a role) up to size bytes
// If size is "NoSizeLimit", this corresponds to "infinite," but we cut off at a
// predefined threshold "notary.MaxDownloadSize". If the file is larger than size
// we return ErrMaliciousServer for consistency with the HTTPStore
func (f *FilesystemStore) GetSized(name string, size int64) ([]byte, error) {
p, err := f.getPath(name)
if err != nil {
return nil, err
}
file, err := os.Open(p)
if err != nil {
if os.IsNotExist(err) {
err = ErrMetaNotFound{Resource: name}
}
return nil, err
}
defer func() {
_ = file.Close()
}()
if size == NoSizeLimit {
size = notary.MaxDownloadSize
}
stat, err := file.Stat()
if err != nil {
return nil, err
}
if stat.Size() > size {
return nil, ErrMaliciousServer{}
}
l := io.LimitReader(file, size)
return ioutil.ReadAll(l)
}
// Get returns the meta for the given name.
func (f *FilesystemStore) Get(name string) ([]byte, error) {
p, err := f.getPath(name)
if err != nil {
return nil, err
}
meta, err := ioutil.ReadFile(p)
if err != nil {
if os.IsNotExist(err) {
err = ErrMetaNotFound{Resource: name}
}
return nil, err
}
return meta, nil
}
// SetMulti sets the metadata for multiple roles in one operation
func (f *FilesystemStore) SetMulti(metas map[string][]byte) error {
for role, blob := range metas {
err := f.Set(role, blob)
if err != nil {
return err
}
}
return nil
}
// Set sets the meta for a single role
func (f *FilesystemStore) Set(name string, meta []byte) error {
fp, err := f.getPath(name)
if err != nil {
return err
}
// Ensures the parent directories of the file we are about to write exist
err = os.MkdirAll(filepath.Dir(fp), notary.PrivExecPerms)
if err != nil {
return err
}
// if something already exists, just delete it and re-write it
os.RemoveAll(fp)
// Write the file to disk
return ioutil.WriteFile(fp, meta, notary.PrivNoExecPerms)
}
// RemoveAll clears the existing filestore by removing its base directory
func (f *FilesystemStore) RemoveAll() error {
return os.RemoveAll(f.baseDir)
}
// Remove removes the metadata for a single role - if the metadata doesn't
// exist, no error is returned
func (f *FilesystemStore) Remove(name string) error {
p, err := f.getPath(name)
if err != nil {
return err
}
return os.RemoveAll(p) // RemoveAll succeeds if path doesn't exist
}
// Location returns a human readable name for the storage location
func (f FilesystemStore) Location() string {
return f.baseDir
}
// ListFiles returns a list of all the filenames that can be used with Get*
// to retrieve content from this filestore
func (f FilesystemStore) ListFiles() []string {
files := make([]string, 0, 0)
filepath.Walk(f.baseDir, func(fp string, fi os.FileInfo, err error) error {
// If there are errors, ignore this particular file
if err != nil {
return nil
}
// Ignore if it is a directory
if fi.IsDir() {
return nil
}
// If this is a symlink, ignore it
if fi.Mode()&os.ModeSymlink == os.ModeSymlink {
return nil
}
// Only allow matches that end with our certificate extension (e.g. *.crt)
matched, _ := filepath.Match("*"+f.ext, fi.Name())
if matched {
// Find the relative path for this file relative to the base path.
fp, err = filepath.Rel(f.baseDir, fp)
if err != nil {
return err
}
trimmed := strings.TrimSuffix(fp, f.ext)
files = append(files, trimmed)
}
return nil
})
return files
}
// createDirectory receives a string of the path to a directory.
// It does not support passing files, so the caller has to remove
// the filename by doing filepath.Dir(full_path_to_file)
func createDirectory(dir string, perms os.FileMode) error {
// This prevents someone passing /path/to/dir and 'dir' not being created
// If two '//' exist, MkdirAll deals it with correctly
dir = dir + "/"
return os.MkdirAll(dir, perms)
}

View File

@ -1,379 +0,0 @@
// A Store that can fetch and set metadata on a remote server.
// Some API constraints:
// - Response bodies for error codes should be unmarshallable as:
// {"errors": [{..., "detail": <serialized validation error>}]}
// else validation error details, etc. will be unparsable. The errors
// should have a github.com/theupdateframework/notary/tuf/validation/SerializableError
// in the Details field.
// If writing your own server, please have a look at
// github.com/docker/distribution/registry/api/errcode
package storage
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"mime/multipart"
"net/http"
"net/url"
"path"
"github.com/sirupsen/logrus"
"github.com/theupdateframework/notary"
"github.com/theupdateframework/notary/tuf/data"
"github.com/theupdateframework/notary/tuf/validation"
)
const (
// MaxErrorResponseSize is the maximum size for an error message - 1KiB
MaxErrorResponseSize int64 = 1 << 10
// MaxKeySize is the maximum size for a stored TUF key - 256KiB
MaxKeySize = 256 << 10
)
// ErrServerUnavailable indicates an error from the server. code allows us to
// populate the http error we received
type ErrServerUnavailable struct {
code int
}
// NetworkError represents any kind of network error when attempting to make a request
type NetworkError struct {
Wrapped error
}
func (n NetworkError) Error() string {
if _, ok := n.Wrapped.(*url.Error); ok {
// QueryUnescape does the inverse transformation of QueryEscape,
// converting %AB into the byte 0xAB and '+' into ' ' (space).
// It returns an error if any % is not followed by two hexadecimal digits.
//
// If this happens, we log out the QueryUnescape error and return the
// original error to client.
res, err := url.QueryUnescape(n.Wrapped.Error())
if err != nil {
logrus.Errorf("unescape network error message failed: %s", err)
return n.Wrapped.Error()
}
return res
}
return n.Wrapped.Error()
}
func (err ErrServerUnavailable) Error() string {
if err.code == 401 {
return fmt.Sprintf("you are not authorized to perform this operation: server returned 401.")
}
return fmt.Sprintf("unable to reach trust server at this time: %d.", err.code)
}
// ErrMaliciousServer indicates the server returned a response that is highly suspected
// of being malicious. i.e. it attempted to send us more data than the known size of a
// particular role metadata.
type ErrMaliciousServer struct{}
func (err ErrMaliciousServer) Error() string {
return "trust server returned a bad response."
}
// ErrInvalidOperation indicates that the server returned a 400 response and
// propagate any body we received.
type ErrInvalidOperation struct {
msg string
}
func (err ErrInvalidOperation) Error() string {
if err.msg != "" {
return fmt.Sprintf("trust server rejected operation: %s", err.msg)
}
return "trust server rejected operation."
}
// HTTPStore manages pulling and pushing metadata from and to a remote
// service over HTTP. It assumes the URL structure of the remote service
// maps identically to the structure of the TUF repo:
// <baseURL>/<metaPrefix>/(root|targets|snapshot|timestamp).json
// <baseURL>/<targetsPrefix>/foo.sh
//
// If consistent snapshots are disabled, it is advised that caching is not
// enabled. Simple set a cachePath (and ensure it's writeable) to enable
// caching.
type HTTPStore struct {
baseURL url.URL
metaPrefix string
metaExtension string
keyExtension string
roundTrip http.RoundTripper
}
// NewNotaryServerStore returns a new HTTPStore against a URL which should represent a notary
// server
func NewNotaryServerStore(serverURL string, gun data.GUN, roundTrip http.RoundTripper) (RemoteStore, error) {
return NewHTTPStore(
serverURL+"/v2/"+gun.String()+"/_trust/tuf/",
"",
"json",
"key",
roundTrip,
)
}
// NewHTTPStore initializes a new store against a URL and a number of configuration options.
//
// In case of a nil `roundTrip`, a default offline store is used instead.
func NewHTTPStore(baseURL, metaPrefix, metaExtension, keyExtension string, roundTrip http.RoundTripper) (RemoteStore, error) {
base, err := url.Parse(baseURL)
if err != nil {
return nil, err
}
if !base.IsAbs() {
return nil, errors.New("HTTPStore requires an absolute baseURL")
}
if roundTrip == nil {
return &OfflineStore{}, nil
}
return &HTTPStore{
baseURL: *base,
metaPrefix: metaPrefix,
metaExtension: metaExtension,
keyExtension: keyExtension,
roundTrip: roundTrip,
}, nil
}
func tryUnmarshalError(resp *http.Response, defaultError error) error {
b := io.LimitReader(resp.Body, MaxErrorResponseSize)
bodyBytes, err := ioutil.ReadAll(b)
if err != nil {
return defaultError
}
var parsedErrors struct {
Errors []struct {
Detail validation.SerializableError `json:"detail"`
} `json:"errors"`
}
if err := json.Unmarshal(bodyBytes, &parsedErrors); err != nil {
return defaultError
}
if len(parsedErrors.Errors) != 1 {
return defaultError
}
err = parsedErrors.Errors[0].Detail.Error
if err == nil {
return defaultError
}
return err
}
func translateStatusToError(resp *http.Response, resource string) error {
switch resp.StatusCode {
case http.StatusOK:
return nil
case http.StatusNotFound:
return ErrMetaNotFound{Resource: resource}
case http.StatusBadRequest:
return tryUnmarshalError(resp, ErrInvalidOperation{})
default:
return ErrServerUnavailable{code: resp.StatusCode}
}
}
// GetSized downloads the named meta file with the given size. A short body
// is acceptable because in the case of timestamp.json, the size is a cap,
// not an exact length.
// If size is "NoSizeLimit", this corresponds to "infinite," but we cut off at a
// predefined threshold "notary.MaxDownloadSize".
func (s HTTPStore) GetSized(name string, size int64) ([]byte, error) {
url, err := s.buildMetaURL(name)
if err != nil {
return nil, err
}
req, err := http.NewRequest("GET", url.String(), nil)
if err != nil {
return nil, err
}
resp, err := s.roundTrip.RoundTrip(req)
if err != nil {
return nil, NetworkError{Wrapped: err}
}
defer resp.Body.Close()
if err := translateStatusToError(resp, name); err != nil {
logrus.Debugf("received HTTP status %d when requesting %s.", resp.StatusCode, name)
return nil, err
}
if size == NoSizeLimit {
size = notary.MaxDownloadSize
}
if resp.ContentLength > size {
return nil, ErrMaliciousServer{}
}
logrus.Debugf("%d when retrieving metadata for %s", resp.StatusCode, name)
b := io.LimitReader(resp.Body, size)
body, err := ioutil.ReadAll(b)
if err != nil {
return nil, err
}
return body, nil
}
// Set sends a single piece of metadata to the TUF server
func (s HTTPStore) Set(name string, blob []byte) error {
return s.SetMulti(map[string][]byte{name: blob})
}
// Remove always fails, because we should never be able to delete metadata
// remotely
func (s HTTPStore) Remove(name string) error {
return ErrInvalidOperation{msg: "cannot delete individual metadata files"}
}
// NewMultiPartMetaRequest builds a request with the provided metadata updates
// in multipart form
func NewMultiPartMetaRequest(url string, metas map[string][]byte) (*http.Request, error) {
body := &bytes.Buffer{}
writer := multipart.NewWriter(body)
for role, blob := range metas {
part, err := writer.CreateFormFile("files", role)
if err != nil {
return nil, err
}
_, err = io.Copy(part, bytes.NewBuffer(blob))
if err != nil {
return nil, err
}
}
err := writer.Close()
if err != nil {
return nil, err
}
req, err := http.NewRequest("POST", url, body)
if err != nil {
return nil, err
}
req.Header.Set("Content-Type", writer.FormDataContentType())
return req, nil
}
// SetMulti does a single batch upload of multiple pieces of TUF metadata.
// This should be preferred for updating a remote server as it enable the server
// to remain consistent, either accepting or rejecting the complete update.
func (s HTTPStore) SetMulti(metas map[string][]byte) error {
url, err := s.buildMetaURL("")
if err != nil {
return err
}
req, err := NewMultiPartMetaRequest(url.String(), metas)
if err != nil {
return err
}
resp, err := s.roundTrip.RoundTrip(req)
if err != nil {
return NetworkError{Wrapped: err}
}
defer resp.Body.Close()
// if this 404's something is pretty wrong
return translateStatusToError(resp, "POST metadata endpoint")
}
// RemoveAll will attempt to delete all TUF metadata for a GUN
func (s HTTPStore) RemoveAll() error {
url, err := s.buildMetaURL("")
if err != nil {
return err
}
req, err := http.NewRequest("DELETE", url.String(), nil)
if err != nil {
return err
}
resp, err := s.roundTrip.RoundTrip(req)
if err != nil {
return NetworkError{Wrapped: err}
}
defer resp.Body.Close()
return translateStatusToError(resp, "DELETE metadata for GUN endpoint")
}
func (s HTTPStore) buildMetaURL(name string) (*url.URL, error) {
var filename string
if name != "" {
filename = fmt.Sprintf("%s.%s", name, s.metaExtension)
}
uri := path.Join(s.metaPrefix, filename)
return s.buildURL(uri)
}
func (s HTTPStore) buildKeyURL(name data.RoleName) (*url.URL, error) {
filename := fmt.Sprintf("%s.%s", name.String(), s.keyExtension)
uri := path.Join(s.metaPrefix, filename)
return s.buildURL(uri)
}
func (s HTTPStore) buildURL(uri string) (*url.URL, error) {
sub, err := url.Parse(uri)
if err != nil {
return nil, err
}
return s.baseURL.ResolveReference(sub), nil
}
// GetKey retrieves a public key from the remote server
func (s HTTPStore) GetKey(role data.RoleName) ([]byte, error) {
url, err := s.buildKeyURL(role)
if err != nil {
return nil, err
}
req, err := http.NewRequest("GET", url.String(), nil)
if err != nil {
return nil, err
}
resp, err := s.roundTrip.RoundTrip(req)
if err != nil {
return nil, NetworkError{Wrapped: err}
}
defer resp.Body.Close()
if err := translateStatusToError(resp, role.String()+" key"); err != nil {
return nil, err
}
b := io.LimitReader(resp.Body, MaxKeySize)
body, err := ioutil.ReadAll(b)
if err != nil {
return nil, err
}
return body, nil
}
// RotateKey rotates a private key and returns the public component from the remote server
func (s HTTPStore) RotateKey(role data.RoleName) ([]byte, error) {
url, err := s.buildKeyURL(role)
if err != nil {
return nil, err
}
req, err := http.NewRequest("POST", url.String(), nil)
if err != nil {
return nil, err
}
resp, err := s.roundTrip.RoundTrip(req)
if err != nil {
return nil, NetworkError{Wrapped: err}
}
defer resp.Body.Close()
if err := translateStatusToError(resp, role.String()+" key"); err != nil {
return nil, err
}
b := io.LimitReader(resp.Body, MaxKeySize)
body, err := ioutil.ReadAll(b)
if err != nil {
return nil, err
}
return body, nil
}
// Location returns a human readable name for the storage location
func (s HTTPStore) Location() string {
return s.baseURL.Host
}

Some files were not shown because too many files have changed in this diff Show More