vendor: update buildkit to master@31c870e82a48

Signed-off-by: Justin Chadwell <me@jedevc.com>
This commit is contained in:
Justin Chadwell
2023-05-15 18:32:31 +01:00
parent 167cd16acb
commit e61a8cf637
269 changed files with 25798 additions and 3371 deletions

13
vendor/github.com/in-toto/in-toto-golang/LICENSE generated vendored Normal file
View File

@ -0,0 +1,13 @@
Copyright 2018 New York University
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -0,0 +1,156 @@
package in_toto
import (
"crypto/x509"
"fmt"
"net/url"
)
const (
AllowAllConstraint = "*"
)
// CertificateConstraint defines the attributes a certificate must have to act as a functionary.
// A wildcard `*` allows any value in the specified attribute, where as an empty array or value
// asserts that the certificate must have nothing for that attribute. A certificate must have
// every value defined in a constraint to match.
type CertificateConstraint struct {
CommonName string `json:"common_name"`
DNSNames []string `json:"dns_names"`
Emails []string `json:"emails"`
Organizations []string `json:"organizations"`
Roots []string `json:"roots"`
URIs []string `json:"uris"`
}
// checkResult is a data structure used to hold
// certificate constraint errors
type checkResult struct {
errors []error
}
// newCheckResult initializes a new checkResult
func newCheckResult() *checkResult {
return &checkResult{
errors: make([]error, 0),
}
}
// evaluate runs a constraint check on a certificate
func (cr *checkResult) evaluate(cert *x509.Certificate, constraintCheck func(*x509.Certificate) error) *checkResult {
err := constraintCheck(cert)
if err != nil {
cr.errors = append(cr.errors, err)
}
return cr
}
// error reduces all of the errors into one error with a
// combined error message. If there are no errors, nil
// will be returned.
func (cr *checkResult) error() error {
if len(cr.errors) == 0 {
return nil
}
return fmt.Errorf("cert failed constraints check: %+q", cr.errors)
}
// Check tests the provided certificate against the constraint. An error is returned if the certificate
// fails any of the constraints. nil is returned if the certificate passes all of the constraints.
func (cc CertificateConstraint) Check(cert *x509.Certificate, rootCAIDs []string, rootCertPool, intermediateCertPool *x509.CertPool) error {
return newCheckResult().
evaluate(cert, cc.checkCommonName).
evaluate(cert, cc.checkDNSNames).
evaluate(cert, cc.checkEmails).
evaluate(cert, cc.checkOrganizations).
evaluate(cert, cc.checkRoots(rootCAIDs, rootCertPool, intermediateCertPool)).
evaluate(cert, cc.checkURIs).
error()
}
// checkCommonName verifies that the certificate's common name matches the constraint.
func (cc CertificateConstraint) checkCommonName(cert *x509.Certificate) error {
return checkCertConstraint("common name", []string{cc.CommonName}, []string{cert.Subject.CommonName})
}
// checkDNSNames verifies that the certificate's dns names matches the constraint.
func (cc CertificateConstraint) checkDNSNames(cert *x509.Certificate) error {
return checkCertConstraint("dns name", cc.DNSNames, cert.DNSNames)
}
// checkEmails verifies that the certificate's emails matches the constraint.
func (cc CertificateConstraint) checkEmails(cert *x509.Certificate) error {
return checkCertConstraint("email", cc.Emails, cert.EmailAddresses)
}
// checkOrganizations verifies that the certificate's organizations matches the constraint.
func (cc CertificateConstraint) checkOrganizations(cert *x509.Certificate) error {
return checkCertConstraint("organization", cc.Organizations, cert.Subject.Organization)
}
// checkRoots verifies that the certificate's roots matches the constraint.
// The certificates trust chain must also be verified.
func (cc CertificateConstraint) checkRoots(rootCAIDs []string, rootCertPool, intermediateCertPool *x509.CertPool) func(*x509.Certificate) error {
return func(cert *x509.Certificate) error {
_, err := VerifyCertificateTrust(cert, rootCertPool, intermediateCertPool)
if err != nil {
return fmt.Errorf("failed to verify roots: %w", err)
}
return checkCertConstraint("root", cc.Roots, rootCAIDs)
}
}
// checkURIs verifies that the certificate's URIs matches the constraint.
func (cc CertificateConstraint) checkURIs(cert *x509.Certificate) error {
return checkCertConstraint("uri", cc.URIs, urisToStrings(cert.URIs))
}
// urisToStrings is a helper that converts a list of URL objects to the string that represents them
func urisToStrings(uris []*url.URL) []string {
res := make([]string, 0, len(uris))
for _, uri := range uris {
res = append(res, uri.String())
}
return res
}
// checkCertConstraint tests that the provided test values match the allowed values of the constraint.
// All allowed values must be met one-to-one to be considered a successful match.
func checkCertConstraint(attributeName string, constraints, values []string) error {
// If the only constraint is to allow all, the check succeeds
if len(constraints) == 1 && constraints[0] == AllowAllConstraint {
return nil
}
if len(constraints) == 1 && constraints[0] == "" {
constraints = []string{}
}
if len(values) == 1 && values[0] == "" {
values = []string{}
}
// If no constraints are specified, but the certificate has values for the attribute, then the check fails
if len(constraints) == 0 && len(values) > 0 {
return fmt.Errorf("not expecting any %s(s), but cert has %d %s(s)", attributeName, len(values), attributeName)
}
unmet := NewSet(constraints...)
for _, v := range values {
// if the cert has a value we didn't expect, fail early
if !unmet.Has(v) {
return fmt.Errorf("cert has an unexpected %s %s given constraints %+q", attributeName, v, constraints)
}
// consider the constraint met
unmet.Remove(v)
}
// if we have any unmet left after going through each test value, fail.
if len(unmet) > 0 {
return fmt.Errorf("cert with %s(s) %+q did not pass all constraints %+q", attributeName, values, constraints)
}
return nil
}

View File

@ -0,0 +1,30 @@
package in_toto
import (
"crypto/sha256"
"crypto/sha512"
"hash"
)
/*
getHashMapping returns a mapping from hash algorithm to supported hash
interface.
*/
func getHashMapping() map[string]func() hash.Hash {
return map[string]func() hash.Hash{
"sha256": sha256.New,
"sha512": sha512.New,
"sha384": sha512.New384,
}
}
/*
hashToHex calculates the hash over data based on hash algorithm h.
*/
func hashToHex(h hash.Hash, data []byte) []byte {
h.Write(data)
// We need to use h.Sum(nil) here, because otherwise hash.Sum() appends
// the hash to the passed data. So instead of having only the hash
// we would get: "dataHASH"
return h.Sum(nil)
}

View File

@ -0,0 +1,670 @@
package in_toto
import (
"crypto"
"crypto/ecdsa"
"crypto/ed25519"
"crypto/rand"
"crypto/rsa"
"crypto/sha256"
"crypto/x509"
"encoding/hex"
"encoding/pem"
"errors"
"fmt"
"io"
"io/ioutil"
"os"
"strings"
"github.com/secure-systems-lab/go-securesystemslib/cjson"
)
// ErrFailedPEMParsing gets returned when PKCS1, PKCS8 or PKIX key parsing fails
var ErrFailedPEMParsing = errors.New("failed parsing the PEM block: unsupported PEM type")
// ErrNoPEMBlock gets triggered when there is no PEM block in the provided file
var ErrNoPEMBlock = errors.New("failed to decode the data as PEM block (are you sure this is a pem file?)")
// ErrUnsupportedKeyType is returned when we are dealing with a key type different to ed25519 or RSA
var ErrUnsupportedKeyType = errors.New("unsupported key type")
// ErrInvalidSignature is returned when the signature is invalid
var ErrInvalidSignature = errors.New("invalid signature")
// ErrInvalidKey is returned when a given key is none of RSA, ECDSA or ED25519
var ErrInvalidKey = errors.New("invalid key")
const (
rsaKeyType string = "rsa"
ecdsaKeyType string = "ecdsa"
ed25519KeyType string = "ed25519"
rsassapsssha256Scheme string = "rsassa-pss-sha256"
ecdsaSha2nistp224 string = "ecdsa-sha2-nistp224"
ecdsaSha2nistp256 string = "ecdsa-sha2-nistp256"
ecdsaSha2nistp384 string = "ecdsa-sha2-nistp384"
ecdsaSha2nistp521 string = "ecdsa-sha2-nistp521"
ed25519Scheme string = "ed25519"
pemPublicKey string = "PUBLIC KEY"
pemPrivateKey string = "PRIVATE KEY"
pemRSAPrivateKey string = "RSA PRIVATE KEY"
)
/*
getSupportedKeyIDHashAlgorithms returns a string slice of supported
KeyIDHashAlgorithms. We need to use this function instead of a constant,
because Go does not support global constant slices.
*/
func getSupportedKeyIDHashAlgorithms() Set {
return NewSet("sha256", "sha512")
}
/*
getSupportedRSASchemes returns a string slice of supported RSA Key schemes.
We need to use this function instead of a constant because Go does not support
global constant slices.
*/
func getSupportedRSASchemes() []string {
return []string{rsassapsssha256Scheme}
}
/*
getSupportedEcdsaSchemes returns a string slice of supported ecdsa Key schemes.
We need to use this function instead of a constant because Go does not support
global constant slices.
*/
func getSupportedEcdsaSchemes() []string {
return []string{ecdsaSha2nistp224, ecdsaSha2nistp256, ecdsaSha2nistp384, ecdsaSha2nistp521}
}
/*
getSupportedEd25519Schemes returns a string slice of supported ed25519 Key
schemes. We need to use this function instead of a constant because Go does
not support global constant slices.
*/
func getSupportedEd25519Schemes() []string {
return []string{ed25519Scheme}
}
/*
generateKeyID creates a partial key map and generates the key ID
based on the created partial key map via the SHA256 method.
The resulting keyID will be directly saved in the corresponding key object.
On success generateKeyID will return nil, in case of errors while encoding
there will be an error.
*/
func (k *Key) generateKeyID() error {
// Create partial key map used to create the keyid
// Unfortunately, we can't use the Key object because this also carries
// yet unwanted fields, such as KeyID and KeyVal.Private and therefore
// produces a different hash. We generate the keyID exactly as we do in
// the securesystemslib to keep interoperability between other in-toto
// implementations.
var keyToBeHashed = map[string]interface{}{
"keytype": k.KeyType,
"scheme": k.Scheme,
"keyid_hash_algorithms": k.KeyIDHashAlgorithms,
"keyval": map[string]string{
"public": k.KeyVal.Public,
},
}
keyCanonical, err := cjson.EncodeCanonical(keyToBeHashed)
if err != nil {
return err
}
// calculate sha256 and return string representation of keyID
keyHashed := sha256.Sum256(keyCanonical)
k.KeyID = fmt.Sprintf("%x", keyHashed)
err = validateKey(*k)
if err != nil {
return err
}
return nil
}
/*
generatePEMBlock creates a PEM block from scratch via the keyBytes and the pemType.
If successful it returns a PEM block as []byte slice. This function should always
succeed, if keyBytes is empty the PEM block will have an empty byte block.
Therefore only header and footer will exist.
*/
func generatePEMBlock(keyBytes []byte, pemType string) []byte {
// construct PEM block
pemBlock := &pem.Block{
Type: pemType,
Headers: nil,
Bytes: keyBytes,
}
return pem.EncodeToMemory(pemBlock)
}
/*
setKeyComponents sets all components in our key object.
Furthermore it makes sure to remove any trailing and leading whitespaces or newlines.
We treat key types differently for interoperability reasons to the in-toto python
implementation and the securesystemslib.
*/
func (k *Key) setKeyComponents(pubKeyBytes []byte, privateKeyBytes []byte, keyType string, scheme string, KeyIDHashAlgorithms []string) error {
// assume we have a privateKey if the key size is bigger than 0
switch keyType {
case rsaKeyType:
if len(privateKeyBytes) > 0 {
k.KeyVal = KeyVal{
Private: strings.TrimSpace(string(generatePEMBlock(privateKeyBytes, pemRSAPrivateKey))),
Public: strings.TrimSpace(string(generatePEMBlock(pubKeyBytes, pemPublicKey))),
}
} else {
k.KeyVal = KeyVal{
Public: strings.TrimSpace(string(generatePEMBlock(pubKeyBytes, pemPublicKey))),
}
}
case ecdsaKeyType:
if len(privateKeyBytes) > 0 {
k.KeyVal = KeyVal{
Private: strings.TrimSpace(string(generatePEMBlock(privateKeyBytes, pemPrivateKey))),
Public: strings.TrimSpace(string(generatePEMBlock(pubKeyBytes, pemPublicKey))),
}
} else {
k.KeyVal = KeyVal{
Public: strings.TrimSpace(string(generatePEMBlock(pubKeyBytes, pemPublicKey))),
}
}
case ed25519KeyType:
if len(privateKeyBytes) > 0 {
k.KeyVal = KeyVal{
Private: strings.TrimSpace(hex.EncodeToString(privateKeyBytes)),
Public: strings.TrimSpace(hex.EncodeToString(pubKeyBytes)),
}
} else {
k.KeyVal = KeyVal{
Public: strings.TrimSpace(hex.EncodeToString(pubKeyBytes)),
}
}
default:
return fmt.Errorf("%w: %s", ErrUnsupportedKeyType, keyType)
}
k.KeyType = keyType
k.Scheme = scheme
k.KeyIDHashAlgorithms = KeyIDHashAlgorithms
if err := k.generateKeyID(); err != nil {
return err
}
return nil
}
/*
parseKey tries to parse a PEM []byte slice. Using the following standards
in the given order:
- PKCS8
- PKCS1
- PKIX
On success it returns the parsed key and nil.
On failure it returns nil and the error ErrFailedPEMParsing
*/
func parseKey(data []byte) (interface{}, error) {
key, err := x509.ParsePKCS8PrivateKey(data)
if err == nil {
return key, nil
}
key, err = x509.ParsePKCS1PrivateKey(data)
if err == nil {
return key, nil
}
key, err = x509.ParsePKIXPublicKey(data)
if err == nil {
return key, nil
}
key, err = x509.ParseCertificate(data)
if err == nil {
return key, nil
}
key, err = x509.ParseECPrivateKey(data)
if err == nil {
return key, nil
}
return nil, ErrFailedPEMParsing
}
/*
decodeAndParse receives potential PEM bytes decodes them via pem.Decode
and pushes them to parseKey. If any error occurs during this process,
the function will return nil and an error (either ErrFailedPEMParsing
or ErrNoPEMBlock). On success it will return the decoded pemData, the
key object interface and nil as error. We need the decoded pemData,
because LoadKey relies on decoded pemData for operating system
interoperability.
*/
func decodeAndParse(pemBytes []byte) (*pem.Block, interface{}, error) {
// pem.Decode returns the parsed pem block and a rest.
// The rest is everything, that could not be parsed as PEM block.
// Therefore we can drop this via using the blank identifier "_"
data, _ := pem.Decode(pemBytes)
if data == nil {
return nil, nil, ErrNoPEMBlock
}
// Try to load private key, if this fails try to load
// key as public key
key, err := parseKey(data.Bytes)
if err != nil {
return nil, nil, err
}
return data, key, nil
}
/*
LoadKey loads the key file at specified file path into the key object.
It automatically derives the PEM type and the key type.
Right now the following PEM types are supported:
- PKCS1 for private keys
- PKCS8 for private keys
- PKIX for public keys
The following key types are supported and will be automatically assigned to
the key type field:
- ed25519
- rsa
- ecdsa
The following schemes are supported:
- ed25519 -> ed25519
- rsa -> rsassa-pss-sha256
- ecdsa -> ecdsa-sha256-nistp256
Note that, this behavior is consistent with the securesystemslib, except for
ecdsa. We do not use the scheme string as key type in in-toto-golang.
Instead we are going with a ecdsa/ecdsa-sha2-nistp256 pair.
On success it will return nil. The following errors can happen:
- path not found or not readable
- no PEM block in the loaded file
- no valid PKCS8/PKCS1 private key or PKIX public key
- errors while marshalling
- unsupported key types
*/
func (k *Key) LoadKey(path string, scheme string, KeyIDHashAlgorithms []string) error {
pemFile, err := os.Open(path)
if err != nil {
return err
}
defer pemFile.Close()
err = k.LoadKeyReader(pemFile, scheme, KeyIDHashAlgorithms)
if err != nil {
return err
}
return pemFile.Close()
}
func (k *Key) LoadKeyDefaults(path string) error {
pemFile, err := os.Open(path)
if err != nil {
return err
}
defer pemFile.Close()
err = k.LoadKeyReaderDefaults(pemFile)
if err != nil {
return err
}
return pemFile.Close()
}
// LoadKeyReader loads the key from a supplied reader. The logic matches LoadKey otherwise.
func (k *Key) LoadKeyReader(r io.Reader, scheme string, KeyIDHashAlgorithms []string) error {
if r == nil {
return ErrNoPEMBlock
}
// Read key bytes
pemBytes, err := ioutil.ReadAll(r)
if err != nil {
return err
}
// decodeAndParse returns the pemData for later use
// and a parsed key object (for operations on that key, like extracting the public Key)
pemData, key, err := decodeAndParse(pemBytes)
if err != nil {
return err
}
return k.loadKey(key, pemData, scheme, KeyIDHashAlgorithms)
}
func (k *Key) LoadKeyReaderDefaults(r io.Reader) error {
if r == nil {
return ErrNoPEMBlock
}
// Read key bytes
pemBytes, err := ioutil.ReadAll(r)
if err != nil {
return err
}
// decodeAndParse returns the pemData for later use
// and a parsed key object (for operations on that key, like extracting the public Key)
pemData, key, err := decodeAndParse(pemBytes)
if err != nil {
return err
}
scheme, keyIDHashAlgorithms, err := getDefaultKeyScheme(key)
if err != nil {
return err
}
return k.loadKey(key, pemData, scheme, keyIDHashAlgorithms)
}
func getDefaultKeyScheme(key interface{}) (scheme string, keyIDHashAlgorithms []string, err error) {
keyIDHashAlgorithms = []string{"sha256", "sha512"}
switch key.(type) {
case *rsa.PublicKey, *rsa.PrivateKey:
scheme = rsassapsssha256Scheme
case ed25519.PrivateKey, ed25519.PublicKey:
scheme = ed25519Scheme
case *ecdsa.PrivateKey, *ecdsa.PublicKey:
scheme = ecdsaSha2nistp256
case *x509.Certificate:
return getDefaultKeyScheme(key.(*x509.Certificate).PublicKey)
default:
err = ErrUnsupportedKeyType
}
return scheme, keyIDHashAlgorithms, err
}
func (k *Key) loadKey(key interface{}, pemData *pem.Block, scheme string, keyIDHashAlgorithms []string) error {
switch key.(type) {
case *rsa.PublicKey:
pubKeyBytes, err := x509.MarshalPKIXPublicKey(key.(*rsa.PublicKey))
if err != nil {
return err
}
if err := k.setKeyComponents(pubKeyBytes, []byte{}, rsaKeyType, scheme, keyIDHashAlgorithms); err != nil {
return err
}
case *rsa.PrivateKey:
// Note: RSA Public Keys will get stored as X.509 SubjectPublicKeyInfo (RFC5280)
// This behavior is consistent to the securesystemslib
pubKeyBytes, err := x509.MarshalPKIXPublicKey(key.(*rsa.PrivateKey).Public())
if err != nil {
return err
}
if err := k.setKeyComponents(pubKeyBytes, pemData.Bytes, rsaKeyType, scheme, keyIDHashAlgorithms); err != nil {
return err
}
case ed25519.PublicKey:
if err := k.setKeyComponents(key.(ed25519.PublicKey), []byte{}, ed25519KeyType, scheme, keyIDHashAlgorithms); err != nil {
return err
}
case ed25519.PrivateKey:
pubKeyBytes := key.(ed25519.PrivateKey).Public()
if err := k.setKeyComponents(pubKeyBytes.(ed25519.PublicKey), key.(ed25519.PrivateKey), ed25519KeyType, scheme, keyIDHashAlgorithms); err != nil {
return err
}
case *ecdsa.PrivateKey:
pubKeyBytes, err := x509.MarshalPKIXPublicKey(key.(*ecdsa.PrivateKey).Public())
if err != nil {
return err
}
if err := k.setKeyComponents(pubKeyBytes, pemData.Bytes, ecdsaKeyType, scheme, keyIDHashAlgorithms); err != nil {
return err
}
case *ecdsa.PublicKey:
pubKeyBytes, err := x509.MarshalPKIXPublicKey(key.(*ecdsa.PublicKey))
if err != nil {
return err
}
if err := k.setKeyComponents(pubKeyBytes, []byte{}, ecdsaKeyType, scheme, keyIDHashAlgorithms); err != nil {
return err
}
case *x509.Certificate:
err := k.loadKey(key.(*x509.Certificate).PublicKey, pemData, scheme, keyIDHashAlgorithms)
if err != nil {
return err
}
k.KeyVal.Certificate = string(pem.EncodeToMemory(pemData))
default:
// We should never get here, because we implement all from Go supported Key Types
return errors.New("unexpected Error in LoadKey function")
}
return nil
}
/*
GenerateSignature will automatically detect the key type and sign the signable data
with the provided key. If everything goes right GenerateSignature will return
a for the key valid signature and err=nil. If something goes wrong it will
return a not initialized signature and an error. Possible errors are:
- ErrNoPEMBlock
- ErrUnsupportedKeyType
Currently supported is only one scheme per key.
Note that in-toto-golang has different requirements to an ecdsa key.
In in-toto-golang we use the string 'ecdsa' as string for the key type.
In the key scheme we use: ecdsa-sha2-nistp256.
*/
func GenerateSignature(signable []byte, key Key) (Signature, error) {
err := validateKey(key)
if err != nil {
return Signature{}, err
}
var signature Signature
var signatureBuffer []byte
hashMapping := getHashMapping()
// The following switch block is needed for keeping interoperability
// with the securesystemslib and the python implementation
// in which we are storing RSA keys in PEM format, but ed25519 keys hex encoded.
switch key.KeyType {
case rsaKeyType:
// We do not need the pemData here, so we can throw it away via '_'
_, parsedKey, err := decodeAndParse([]byte(key.KeyVal.Private))
if err != nil {
return Signature{}, err
}
parsedKey, ok := parsedKey.(*rsa.PrivateKey)
if !ok {
return Signature{}, ErrKeyKeyTypeMismatch
}
switch key.Scheme {
case rsassapsssha256Scheme:
hashed := hashToHex(hashMapping["sha256"](), signable)
// We use rand.Reader as secure random source for rsa.SignPSS()
signatureBuffer, err = rsa.SignPSS(rand.Reader, parsedKey.(*rsa.PrivateKey), crypto.SHA256, hashed,
&rsa.PSSOptions{SaltLength: sha256.Size, Hash: crypto.SHA256})
if err != nil {
return signature, err
}
default:
// supported key schemes will get checked in validateKey
panic("unexpected Error in GenerateSignature function")
}
case ecdsaKeyType:
// We do not need the pemData here, so we can throw it away via '_'
_, parsedKey, err := decodeAndParse([]byte(key.KeyVal.Private))
if err != nil {
return Signature{}, err
}
parsedKey, ok := parsedKey.(*ecdsa.PrivateKey)
if !ok {
return Signature{}, ErrKeyKeyTypeMismatch
}
curveSize := parsedKey.(*ecdsa.PrivateKey).Curve.Params().BitSize
var hashed []byte
if err := matchEcdsaScheme(curveSize, key.Scheme); err != nil {
return Signature{}, ErrCurveSizeSchemeMismatch
}
// implement https://tools.ietf.org/html/rfc5656#section-6.2.1
// We determine the curve size and choose the correct hashing
// method based on the curveSize
switch {
case curveSize <= 256:
hashed = hashToHex(hashMapping["sha256"](), signable)
case 256 < curveSize && curveSize <= 384:
hashed = hashToHex(hashMapping["sha384"](), signable)
case curveSize > 384:
hashed = hashToHex(hashMapping["sha512"](), signable)
default:
panic("unexpected Error in GenerateSignature function")
}
// Generate the ecdsa signature on the same way, as we do in the securesystemslib
// We are marshalling the ecdsaSignature struct as ASN.1 INTEGER SEQUENCES
// into an ASN.1 Object.
signatureBuffer, err = ecdsa.SignASN1(rand.Reader, parsedKey.(*ecdsa.PrivateKey), hashed[:])
if err != nil {
return signature, err
}
case ed25519KeyType:
// We do not need a scheme switch here, because ed25519
// only consist of sha256 and curve25519.
privateHex, err := hex.DecodeString(key.KeyVal.Private)
if err != nil {
return signature, ErrInvalidHexString
}
// Note: We can directly use the key for signing and do not
// need to use ed25519.NewKeyFromSeed().
signatureBuffer = ed25519.Sign(privateHex, signable)
default:
// We should never get here, because we call validateKey in the first
// line of the function.
panic("unexpected Error in GenerateSignature function")
}
signature.Sig = hex.EncodeToString(signatureBuffer)
signature.KeyID = key.KeyID
signature.Certificate = key.KeyVal.Certificate
return signature, nil
}
/*
VerifySignature will verify unverified byte data via a passed key and signature.
Supported key types are:
- rsa
- ed25519
- ecdsa
When encountering an RSA key, VerifySignature will decode the PEM block in the key
and will call rsa.VerifyPSS() for verifying the RSA signature.
When encountering an ed25519 key, VerifySignature will decode the hex string encoded
public key and will use ed25519.Verify() for verifying the ed25519 signature.
When the given key is an ecdsa key, VerifySignature will unmarshall the ASN1 object
and will use the retrieved ecdsa components 'r' and 's' for verifying the signature.
On success it will return nil. In case of an unsupported key type or any other error
it will return an error.
Note that in-toto-golang has different requirements to an ecdsa key.
In in-toto-golang we use the string 'ecdsa' as string for the key type.
In the key scheme we use: ecdsa-sha2-nistp256.
*/
func VerifySignature(key Key, sig Signature, unverified []byte) error {
err := validateKey(key)
if err != nil {
return err
}
sigBytes, err := hex.DecodeString(sig.Sig)
if err != nil {
return err
}
hashMapping := getHashMapping()
switch key.KeyType {
case rsaKeyType:
// We do not need the pemData here, so we can throw it away via '_'
_, parsedKey, err := decodeAndParse([]byte(key.KeyVal.Public))
if err != nil {
return err
}
parsedKey, ok := parsedKey.(*rsa.PublicKey)
if !ok {
return ErrKeyKeyTypeMismatch
}
switch key.Scheme {
case rsassapsssha256Scheme:
hashed := hashToHex(hashMapping["sha256"](), unverified)
err = rsa.VerifyPSS(parsedKey.(*rsa.PublicKey), crypto.SHA256, hashed, sigBytes, &rsa.PSSOptions{SaltLength: sha256.Size, Hash: crypto.SHA256})
if err != nil {
return fmt.Errorf("%w: %s", ErrInvalidSignature, err)
}
default:
// supported key schemes will get checked in validateKey
panic("unexpected Error in VerifySignature function")
}
case ecdsaKeyType:
// We do not need the pemData here, so we can throw it away via '_'
_, parsedKey, err := decodeAndParse([]byte(key.KeyVal.Public))
if err != nil {
return err
}
parsedKey, ok := parsedKey.(*ecdsa.PublicKey)
if !ok {
return ErrKeyKeyTypeMismatch
}
curveSize := parsedKey.(*ecdsa.PublicKey).Curve.Params().BitSize
var hashed []byte
if err := matchEcdsaScheme(curveSize, key.Scheme); err != nil {
return ErrCurveSizeSchemeMismatch
}
// implement https://tools.ietf.org/html/rfc5656#section-6.2.1
// We determine the curve size and choose the correct hashing
// method based on the curveSize
switch {
case curveSize <= 256:
hashed = hashToHex(hashMapping["sha256"](), unverified)
case 256 < curveSize && curveSize <= 384:
hashed = hashToHex(hashMapping["sha384"](), unverified)
case curveSize > 384:
hashed = hashToHex(hashMapping["sha512"](), unverified)
default:
panic("unexpected Error in VerifySignature function")
}
if ok := ecdsa.VerifyASN1(parsedKey.(*ecdsa.PublicKey), hashed[:], sigBytes); !ok {
return ErrInvalidSignature
}
case ed25519KeyType:
// We do not need a scheme switch here, because ed25519
// only consist of sha256 and curve25519.
pubHex, err := hex.DecodeString(key.KeyVal.Public)
if err != nil {
return ErrInvalidHexString
}
if ok := ed25519.Verify(pubHex, unverified, sigBytes); !ok {
return fmt.Errorf("%w: ed25519", ErrInvalidSignature)
}
default:
// We should never get here, because we call validateKey in the first
// line of the function.
panic("unexpected Error in VerifySignature function")
}
return nil
}
/*
VerifyCertificateTrust verifies that the certificate has a chain of trust
to a root in rootCertPool, possibly using any intermediates in
intermediateCertPool
*/
func VerifyCertificateTrust(cert *x509.Certificate, rootCertPool, intermediateCertPool *x509.CertPool) ([][]*x509.Certificate, error) {
verifyOptions := x509.VerifyOptions{
Roots: rootCertPool,
Intermediates: intermediateCertPool,
}
chains, err := cert.Verify(verifyOptions)
if len(chains) == 0 || err != nil {
return nil, fmt.Errorf("cert cannot be verified by provided roots and intermediates")
}
return chains, nil
}

View File

@ -0,0 +1,227 @@
// Copyright 2010 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found at https://golang.org/LICENSE.
// this is a modified version of path.Match that removes handling of path separators
package in_toto
import (
"errors"
"unicode/utf8"
)
// errBadPattern indicates a pattern was malformed.
var errBadPattern = errors.New("syntax error in pattern")
// match reports whether name matches the shell pattern.
// The pattern syntax is:
//
// pattern:
// { term }
// term:
// '*' matches any sequence of non-/ characters
// '?' matches any single non-/ character
// '[' [ '^' ] { character-range } ']'
// character class (must be non-empty)
// c matches character c (c != '*', '?', '\\', '[')
// '\\' c matches character c
//
// character-range:
// c matches character c (c != '\\', '-', ']')
// '\\' c matches character c
// lo '-' hi matches character c for lo <= c <= hi
//
// Match requires pattern to match all of name, not just a substring.
// The only possible returned error is ErrBadPattern, when pattern
// is malformed.
func match(pattern, name string) (matched bool, err error) {
Pattern:
for len(pattern) > 0 {
var star bool
var chunk string
star, chunk, pattern = scanChunk(pattern)
if star && chunk == "" {
// Trailing * matches everything
return true, nil
}
// Look for match at current position.
t, ok, err := matchChunk(chunk, name)
// if we're the last chunk, make sure we've exhausted the name
// otherwise we'll give a false result even if we could still match
// using the star
if ok && (len(t) == 0 || len(pattern) > 0) {
name = t
continue
}
if err != nil {
return false, err
}
if star {
// Look for match skipping i+1 bytes.
for i := 0; i < len(name); i++ {
t, ok, err := matchChunk(chunk, name[i+1:])
if ok {
// if we're the last chunk, make sure we exhausted the name
if len(pattern) == 0 && len(t) > 0 {
continue
}
name = t
continue Pattern
}
if err != nil {
return false, err
}
}
}
// Before returning false with no error,
// check that the remainder of the pattern is syntactically valid.
for len(pattern) > 0 {
_, chunk, pattern = scanChunk(pattern)
if _, _, err := matchChunk(chunk, ""); err != nil {
return false, err
}
}
return false, nil
}
return len(name) == 0, nil
}
// scanChunk gets the next segment of pattern, which is a non-star string
// possibly preceded by a star.
func scanChunk(pattern string) (star bool, chunk, rest string) {
for len(pattern) > 0 && pattern[0] == '*' {
pattern = pattern[1:]
star = true
}
inrange := false
var i int
Scan:
for i = 0; i < len(pattern); i++ {
switch pattern[i] {
case '\\':
// error check handled in matchChunk: bad pattern.
if i+1 < len(pattern) {
i++
}
case '[':
inrange = true
case ']':
inrange = false
case '*':
if !inrange {
break Scan
}
}
}
return star, pattern[0:i], pattern[i:]
}
// matchChunk checks whether chunk matches the beginning of s.
// If so, it returns the remainder of s (after the match).
// Chunk is all single-character operators: literals, char classes, and ?.
func matchChunk(chunk, s string) (rest string, ok bool, err error) {
// failed records whether the match has failed.
// After the match fails, the loop continues on processing chunk,
// checking that the pattern is well-formed but no longer reading s.
failed := false
for len(chunk) > 0 {
if !failed && len(s) == 0 {
failed = true
}
switch chunk[0] {
case '[':
// character class
var r rune
if !failed {
var n int
r, n = utf8.DecodeRuneInString(s)
s = s[n:]
}
chunk = chunk[1:]
// possibly negated
negated := false
if len(chunk) > 0 && chunk[0] == '^' {
negated = true
chunk = chunk[1:]
}
// parse all ranges
match := false
nrange := 0
for {
if len(chunk) > 0 && chunk[0] == ']' && nrange > 0 {
chunk = chunk[1:]
break
}
var lo, hi rune
if lo, chunk, err = getEsc(chunk); err != nil {
return "", false, err
}
hi = lo
if chunk[0] == '-' {
if hi, chunk, err = getEsc(chunk[1:]); err != nil {
return "", false, err
}
}
if lo <= r && r <= hi {
match = true
}
nrange++
}
if match == negated {
failed = true
}
case '?':
if !failed {
_, n := utf8.DecodeRuneInString(s)
s = s[n:]
}
chunk = chunk[1:]
case '\\':
chunk = chunk[1:]
if len(chunk) == 0 {
return "", false, errBadPattern
}
fallthrough
default:
if !failed {
if chunk[0] != s[0] {
failed = true
}
s = s[1:]
}
chunk = chunk[1:]
}
}
if failed {
return "", false, nil
}
return s, true, nil
}
// getEsc gets a possibly-escaped character from chunk, for a character class.
func getEsc(chunk string) (r rune, nchunk string, err error) {
if len(chunk) == 0 || chunk[0] == '-' || chunk[0] == ']' {
err = errBadPattern
return
}
if chunk[0] == '\\' {
chunk = chunk[1:]
if len(chunk) == 0 {
err = errBadPattern
return
}
}
r, n := utf8.DecodeRuneInString(chunk)
if r == utf8.RuneError && n == 1 {
err = errBadPattern
}
nchunk = chunk[n:]
if len(nchunk) == 0 {
err = errBadPattern
}
return
}

1073
vendor/github.com/in-toto/in-toto-golang/in_toto/model.go generated vendored Normal file

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,131 @@
package in_toto
import (
"fmt"
"strings"
)
// An error message issued in UnpackRule if it receives a malformed rule.
var errorMsg = "Wrong rule format, available formats are:\n" +
"\tMATCH <pattern> [IN <source-path-prefix>] WITH (MATERIALS|PRODUCTS)" +
" [IN <destination-path-prefix>] FROM <step>,\n" +
"\tCREATE <pattern>,\n" +
"\tDELETE <pattern>,\n" +
"\tMODIFY <pattern>,\n" +
"\tALLOW <pattern>,\n" +
"\tDISALLOW <pattern>,\n" +
"\tREQUIRE <filename>\n\n"
/*
UnpackRule parses the passed rule and extracts and returns the information
required for rule processing. It can be used to verify if a rule has a valid
format. Available rule formats are:
MATCH <pattern> [IN <source-path-prefix>] WITH (MATERIALS|PRODUCTS)
[IN <destination-path-prefix>] FROM <step>,
CREATE <pattern>,
DELETE <pattern>,
MODIFY <pattern>,
ALLOW <pattern>,
DISALLOW <pattern>
Rule tokens are normalized to lower case before returning. The returned map
has the following format:
{
"type": "match" | "create" | "delete" |"modify" | "allow" | "disallow"
"pattern": "<file name pattern>",
"srcPrefix": "<path or empty string>", // MATCH rule only
"dstPrefix": "<path or empty string>", // MATCH rule only
"dstType": "materials" | "products">, // MATCH rule only
"dstName": "<step name>", // Match rule only
}
If the rule does not match any of the available formats the first return value
is nil and the second return value is the error.
*/
func UnpackRule(rule []string) (map[string]string, error) {
// Cache rule len
ruleLen := len(rule)
// Create all lower rule copy to case-insensitively parse out tokens whose
// position we don't know yet. We keep the original rule to retain the
// non-token elements' case.
ruleLower := make([]string, ruleLen)
for i, val := range rule {
ruleLower[i] = strings.ToLower(val)
}
switch ruleLower[0] {
case "create", "modify", "delete", "allow", "disallow", "require":
if ruleLen != 2 {
return nil,
fmt.Errorf("%s Got:\n\t %s", errorMsg, rule)
}
return map[string]string{
"type": ruleLower[0],
"pattern": rule[1],
}, nil
case "match":
var srcPrefix string
var dstType string
var dstPrefix string
var dstName string
// MATCH <pattern> IN <source-path-prefix> WITH (MATERIALS|PRODUCTS) \
// IN <destination-path-prefix> FROM <step>
if ruleLen == 10 && ruleLower[2] == "in" &&
ruleLower[4] == "with" && ruleLower[6] == "in" &&
ruleLower[8] == "from" {
srcPrefix = rule[3]
dstType = ruleLower[5]
dstPrefix = rule[7]
dstName = rule[9]
// MATCH <pattern> IN <source-path-prefix> WITH (MATERIALS|PRODUCTS) \
// FROM <step>
} else if ruleLen == 8 && ruleLower[2] == "in" &&
ruleLower[4] == "with" && ruleLower[6] == "from" {
srcPrefix = rule[3]
dstType = ruleLower[5]
dstPrefix = ""
dstName = rule[7]
// MATCH <pattern> WITH (MATERIALS|PRODUCTS) IN <destination-path-prefix>
// FROM <step>
} else if ruleLen == 8 && ruleLower[2] == "with" &&
ruleLower[4] == "in" && ruleLower[6] == "from" {
srcPrefix = ""
dstType = ruleLower[3]
dstPrefix = rule[5]
dstName = rule[7]
// MATCH <pattern> WITH (MATERIALS|PRODUCTS) FROM <step>
} else if ruleLen == 6 && ruleLower[2] == "with" &&
ruleLower[4] == "from" {
srcPrefix = ""
dstType = ruleLower[3]
dstPrefix = ""
dstName = rule[5]
} else {
return nil,
fmt.Errorf("%s Got:\n\t %s", errorMsg, rule)
}
return map[string]string{
"type": ruleLower[0],
"pattern": rule[1],
"srcPrefix": srcPrefix,
"dstPrefix": dstPrefix,
"dstType": dstType,
"dstName": dstName,
}, nil
default:
return nil,
fmt.Errorf("%s Got:\n\t %s", errorMsg, rule)
}
}

View File

@ -0,0 +1,409 @@
package in_toto
import (
"bytes"
"errors"
"fmt"
"io/ioutil"
"os"
"os/exec"
"path/filepath"
"reflect"
"strings"
"syscall"
"github.com/shibumi/go-pathspec"
)
// ErrSymCycle signals a detected symlink cycle in our RecordArtifacts() function.
var ErrSymCycle = errors.New("symlink cycle detected")
// ErrUnsupportedHashAlgorithm signals a missing hash mapping in getHashMapping
var ErrUnsupportedHashAlgorithm = errors.New("unsupported hash algorithm detected")
var ErrEmptyCommandArgs = errors.New("the command args are empty")
// visitedSymlinks is a hashset that contains all paths that we have visited.
var visitedSymlinks Set
/*
RecordArtifact reads and hashes the contents of the file at the passed path
using sha256 and returns a map in the following format:
{
"<path>": {
"sha256": <hex representation of hash>
}
}
If reading the file fails, the first return value is nil and the second return
value is the error.
NOTE: For cross-platform consistency Windows-style line separators (CRLF) are
normalized to Unix-style line separators (LF) before hashing file contents.
*/
func RecordArtifact(path string, hashAlgorithms []string, lineNormalization bool) (map[string]interface{}, error) {
supportedHashMappings := getHashMapping()
// Read file from passed path
contents, err := ioutil.ReadFile(path)
hashedContentsMap := make(map[string]interface{})
if err != nil {
return nil, err
}
if lineNormalization {
// "Normalize" file contents. We convert all line separators to '\n'
// for keeping operating system independence
contents = bytes.ReplaceAll(contents, []byte("\r\n"), []byte("\n"))
contents = bytes.ReplaceAll(contents, []byte("\r"), []byte("\n"))
}
// Create a map of all the hashes present in the hash_func list
for _, element := range hashAlgorithms {
if _, ok := supportedHashMappings[element]; !ok {
return nil, fmt.Errorf("%w: %s", ErrUnsupportedHashAlgorithm, element)
}
h := supportedHashMappings[element]
result := fmt.Sprintf("%x", hashToHex(h(), contents))
hashedContentsMap[element] = result
}
// Return it in a format that is conformant with link metadata artifacts
return hashedContentsMap, nil
}
/*
RecordArtifacts is a wrapper around recordArtifacts.
RecordArtifacts initializes a set for storing visited symlinks,
calls recordArtifacts and deletes the set if no longer needed.
recordArtifacts walks through the passed slice of paths, traversing
subdirectories, and calls RecordArtifact for each file. It returns a map in
the following format:
{
"<path>": {
"sha256": <hex representation of hash>
},
"<path>": {
"sha256": <hex representation of hash>
},
...
}
If recording an artifact fails the first return value is nil and the second
return value is the error.
*/
func RecordArtifacts(paths []string, hashAlgorithms []string, gitignorePatterns []string, lStripPaths []string, lineNormalization bool) (evalArtifacts map[string]interface{}, err error) {
// Make sure to initialize a fresh hashset for every RecordArtifacts call
visitedSymlinks = NewSet()
evalArtifacts, err = recordArtifacts(paths, hashAlgorithms, gitignorePatterns, lStripPaths, lineNormalization)
// pass result and error through
return evalArtifacts, err
}
/*
recordArtifacts walks through the passed slice of paths, traversing
subdirectories, and calls RecordArtifact for each file. It returns a map in
the following format:
{
"<path>": {
"sha256": <hex representation of hash>
},
"<path>": {
"sha256": <hex representation of hash>
},
...
}
If recording an artifact fails the first return value is nil and the second
return value is the error.
*/
func recordArtifacts(paths []string, hashAlgorithms []string, gitignorePatterns []string, lStripPaths []string, lineNormalization bool) (map[string]interface{}, error) {
artifacts := make(map[string]interface{})
for _, path := range paths {
err := filepath.Walk(path,
func(path string, info os.FileInfo, err error) error {
// Abort if Walk function has a problem,
// e.g. path does not exist
if err != nil {
return err
}
// We need to call pathspec.GitIgnore inside of our filepath.Walk, because otherwise
// we will not catch all paths. Just imagine a path like "." and a pattern like "*.pub".
// If we would call pathspec outside of the filepath.Walk this would not match.
ignore, err := pathspec.GitIgnore(gitignorePatterns, path)
if err != nil {
return err
}
if ignore {
return nil
}
// Don't hash directories
if info.IsDir() {
return nil
}
// check for symlink and evaluate the last element in a symlink
// chain via filepath.EvalSymlinks. We use EvalSymlinks here,
// because with os.Readlink() we would just read the next
// element in a possible symlink chain. This would mean more
// iterations. infoMode()&os.ModeSymlink uses the file
// type bitmask to check for a symlink.
if info.Mode()&os.ModeSymlink == os.ModeSymlink {
// return with error if we detect a symlink cycle
if ok := visitedSymlinks.Has(path); ok {
// this error will get passed through
// to RecordArtifacts()
return ErrSymCycle
}
evalSym, err := filepath.EvalSymlinks(path)
if err != nil {
return err
}
// add symlink to visitedSymlinks set
// this way, we know which link we have visited already
// if we visit a symlink twice, we have detected a symlink cycle
visitedSymlinks.Add(path)
// We recursively call RecordArtifacts() to follow
// the new path.
evalArtifacts, evalErr := recordArtifacts([]string{evalSym}, hashAlgorithms, gitignorePatterns, lStripPaths, lineNormalization)
if evalErr != nil {
return evalErr
}
for key, value := range evalArtifacts {
artifacts[key] = value
}
return nil
}
artifact, err := RecordArtifact(path, hashAlgorithms, lineNormalization)
// Abort if artifact can't be recorded, e.g.
// due to file permissions
if err != nil {
return err
}
for _, strip := range lStripPaths {
if strings.HasPrefix(path, strip) {
path = strings.TrimPrefix(path, strip)
break
}
}
// Check if path is unique
_, existingPath := artifacts[path]
if existingPath {
return fmt.Errorf("left stripping has resulted in non unique dictionary key: %s", path)
}
artifacts[path] = artifact
return nil
})
if err != nil {
return nil, err
}
}
return artifacts, nil
}
/*
waitErrToExitCode converts an error returned by Cmd.wait() to an exit code. It
returns -1 if no exit code can be inferred.
*/
func waitErrToExitCode(err error) int {
// If there's no exit code, we return -1
retVal := -1
// See https://stackoverflow.com/questions/10385551/get-exit-code-go
if err != nil {
if exiterr, ok := err.(*exec.ExitError); ok {
// The program has exited with an exit code != 0
// This works on both Unix and Windows. Although package
// syscall is generally platform dependent, WaitStatus is
// defined for both Unix and Windows and in both cases has
// an ExitStatus() method with the same signature.
if status, ok := exiterr.Sys().(syscall.WaitStatus); ok {
retVal = status.ExitStatus()
}
}
} else {
retVal = 0
}
return retVal
}
/*
RunCommand executes the passed command in a subprocess. The first element of
cmdArgs is used as executable and the rest as command arguments. It captures
and returns stdout, stderr and exit code. The format of the returned map is:
{
"return-value": <exit code>,
"stdout": "<standard output>",
"stderr": "<standard error>"
}
If the command cannot be executed or no pipes for stdout or stderr can be
created the first return value is nil and the second return value is the error.
NOTE: Since stdout and stderr are captured, they cannot be seen during the
command execution.
*/
func RunCommand(cmdArgs []string, runDir string) (map[string]interface{}, error) {
if len(cmdArgs) == 0 {
return nil, ErrEmptyCommandArgs
}
cmd := exec.Command(cmdArgs[0], cmdArgs[1:]...)
if runDir != "" {
cmd.Dir = runDir
}
stderrPipe, err := cmd.StderrPipe()
if err != nil {
return nil, err
}
stdoutPipe, err := cmd.StdoutPipe()
if err != nil {
return nil, err
}
if err := cmd.Start(); err != nil {
return nil, err
}
// TODO: duplicate stdout, stderr
stdout, _ := ioutil.ReadAll(stdoutPipe)
stderr, _ := ioutil.ReadAll(stderrPipe)
retVal := waitErrToExitCode(cmd.Wait())
return map[string]interface{}{
"return-value": float64(retVal),
"stdout": string(stdout),
"stderr": string(stderr),
}, nil
}
/*
InTotoRun executes commands, e.g. for software supply chain steps or
inspections of an in-toto layout, and creates and returns corresponding link
metadata. Link metadata contains recorded products at the passed productPaths
and materials at the passed materialPaths. The returned link is wrapped in a
Metablock object. If command execution or artifact recording fails the first
return value is an empty Metablock and the second return value is the error.
*/
func InTotoRun(name string, runDir string, materialPaths []string, productPaths []string,
cmdArgs []string, key Key, hashAlgorithms []string, gitignorePatterns []string,
lStripPaths []string, lineNormalization bool) (Metablock, error) {
var linkMb Metablock
materials, err := RecordArtifacts(materialPaths, hashAlgorithms, gitignorePatterns, lStripPaths, lineNormalization)
if err != nil {
return linkMb, err
}
// make sure that we only run RunCommand if cmdArgs is not nil or empty
byProducts := map[string]interface{}{}
if len(cmdArgs) != 0 {
byProducts, err = RunCommand(cmdArgs, runDir)
if err != nil {
return linkMb, err
}
}
products, err := RecordArtifacts(productPaths, hashAlgorithms, gitignorePatterns, lStripPaths, lineNormalization)
if err != nil {
return linkMb, err
}
linkMb.Signed = Link{
Type: "link",
Name: name,
Materials: materials,
Products: products,
ByProducts: byProducts,
Command: cmdArgs,
Environment: map[string]interface{}{},
}
linkMb.Signatures = []Signature{}
// We use a new feature from Go1.13 here, to check the key struct.
// IsZero() will return True, if the key hasn't been initialized
// with other values than the default ones.
if !reflect.ValueOf(key).IsZero() {
if err := linkMb.Sign(key); err != nil {
return linkMb, err
}
}
return linkMb, nil
}
/*
InTotoRecordStart begins the creation of a link metablock file in two steps,
in order to provide evidence for supply chain steps that cannot be carries out
by a single command. InTotoRecordStart collects the hashes of the materials
before any commands are run, signs the unfinished link, and returns the link.
*/
func InTotoRecordStart(name string, materialPaths []string, key Key, hashAlgorithms, gitignorePatterns []string, lStripPaths []string, lineNormalization bool) (Metablock, error) {
var linkMb Metablock
materials, err := RecordArtifacts(materialPaths, hashAlgorithms, gitignorePatterns, lStripPaths, lineNormalization)
if err != nil {
return linkMb, err
}
linkMb.Signed = Link{
Type: "link",
Name: name,
Materials: materials,
Products: map[string]interface{}{},
ByProducts: map[string]interface{}{},
Command: []string{},
Environment: map[string]interface{}{},
}
if !reflect.ValueOf(key).IsZero() {
if err := linkMb.Sign(key); err != nil {
return linkMb, err
}
}
return linkMb, nil
}
/*
InTotoRecordStop ends the creation of a metatadata link file created by
InTotoRecordStart. InTotoRecordStop takes in a signed unfinished link metablock
created by InTotoRecordStart and records the hashes of any products creted by
commands run between InTotoRecordStart and InTotoRecordStop. The resultant
finished link metablock is then signed by the provided key and returned.
*/
func InTotoRecordStop(prelimLinkMb Metablock, productPaths []string, key Key, hashAlgorithms, gitignorePatterns []string, lStripPaths []string, lineNormalization bool) (Metablock, error) {
var linkMb Metablock
if err := prelimLinkMb.VerifySignature(key); err != nil {
return linkMb, err
}
link, ok := prelimLinkMb.Signed.(Link)
if !ok {
return linkMb, errors.New("invalid metadata block")
}
products, err := RecordArtifacts(productPaths, hashAlgorithms, gitignorePatterns, lStripPaths, lineNormalization)
if err != nil {
return linkMb, err
}
link.Products = products
linkMb.Signed = link
if !reflect.ValueOf(key).IsZero() {
if err := linkMb.Sign(key); err != nil {
return linkMb, err
}
}
return linkMb, nil
}

View File

@ -0,0 +1,16 @@
package common
// DigestSet contains a set of digests. It is represented as a map from
// algorithm name to lowercase hex-encoded value.
type DigestSet map[string]string
// ProvenanceBuilder idenfifies the entity that executed the build steps.
type ProvenanceBuilder struct {
ID string `json:"id"`
}
// ProvenanceMaterial defines the materials used to build an artifact.
type ProvenanceMaterial struct {
URI string `json:"uri,omitempty"`
Digest DigestSet `json:"digest,omitempty"`
}

View File

@ -0,0 +1,50 @@
package v01
import (
"time"
"github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/common"
)
const (
// PredicateSLSAProvenance represents a build provenance for an artifact.
PredicateSLSAProvenance = "https://slsa.dev/provenance/v0.1"
)
// ProvenancePredicate is the provenance predicate definition.
type ProvenancePredicate struct {
Builder common.ProvenanceBuilder `json:"builder"`
Recipe ProvenanceRecipe `json:"recipe"`
Metadata *ProvenanceMetadata `json:"metadata,omitempty"`
Materials []common.ProvenanceMaterial `json:"materials,omitempty"`
}
// ProvenanceRecipe describes the actions performed by the builder.
type ProvenanceRecipe struct {
Type string `json:"type"`
// DefinedInMaterial can be sent as the null pointer to indicate that
// the value is not present.
DefinedInMaterial *int `json:"definedInMaterial,omitempty"`
EntryPoint string `json:"entryPoint"`
Arguments interface{} `json:"arguments,omitempty"`
Environment interface{} `json:"environment,omitempty"`
}
// ProvenanceMetadata contains metadata for the built artifact.
type ProvenanceMetadata struct {
// Use pointer to make sure that the abscense of a time is not
// encoded as the Epoch time.
BuildStartedOn *time.Time `json:"buildStartedOn,omitempty"`
BuildFinishedOn *time.Time `json:"buildFinishedOn,omitempty"`
Completeness ProvenanceComplete `json:"completeness"`
Reproducible bool `json:"reproducible"`
}
// ProvenanceComplete indicates wheter the claims in build/recipe are complete.
// For in depth information refer to the specifictaion:
// https://github.com/in-toto/attestation/blob/v0.1.0/spec/predicates/provenance.md
type ProvenanceComplete struct {
Arguments bool `json:"arguments"`
Environment bool `json:"environment"`
Materials bool `json:"materials"`
}

View File

@ -0,0 +1,137 @@
package v02
import (
"time"
"github.com/in-toto/in-toto-golang/in_toto/slsa_provenance/common"
)
const (
// PredicateSLSAProvenance represents a build provenance for an artifact.
PredicateSLSAProvenance = "https://slsa.dev/provenance/v0.2"
)
// ProvenancePredicate is the provenance predicate definition.
type ProvenancePredicate struct {
// Builder identifies the entity that executed the invocation, which is trusted to have
// correctly performed the operation and populated this provenance.
//
// The identity MUST reflect the trust base that consumers care about. How detailed to be is a
// judgement call. For example, GitHub Actions supports both GitHub-hosted runners and
// self-hosted runners. The GitHub-hosted runner might be a single identity because its all
// GitHub from the consumers perspective. Meanwhile, each self-hosted runner might have its
// own identity because not all runners are trusted by all consumers.
Builder common.ProvenanceBuilder `json:"builder"`
// BuildType is a URI indicating what type of build was performed. It determines the meaning of
// [Invocation], [BuildConfig] and [Materials].
BuildType string `json:"buildType"`
// Invocation identifies the event that kicked off the build. When combined with materials,
// this SHOULD fully describe the build, such that re-running this invocation results in
// bit-for-bit identical output (if the build is reproducible).
//
// MAY be unset/null if unknown, but this is DISCOURAGED.
Invocation ProvenanceInvocation `json:"invocation,omitempty"`
// BuildConfig lists the steps in the build. If [ProvenanceInvocation.ConfigSource] is not
// available, BuildConfig can be used to verify information about the build.
//
// This is an arbitrary JSON object with a schema defined by [BuildType].
BuildConfig interface{} `json:"buildConfig,omitempty"`
// Metadata contains other properties of the build.
Metadata *ProvenanceMetadata `json:"metadata,omitempty"`
// Materials is the collection of artifacts that influenced the build including sources,
// dependencies, build tools, base images, and so on.
//
// This is considered to be incomplete unless metadata.completeness.materials is true.
Materials []common.ProvenanceMaterial `json:"materials,omitempty"`
}
// ProvenanceInvocation identifies the event that kicked off the build.
type ProvenanceInvocation struct {
// ConfigSource describes where the config file that kicked off the build came from. This is
// effectively a pointer to the source where [ProvenancePredicate.BuildConfig] came from.
ConfigSource ConfigSource `json:"configSource,omitempty"`
// Parameters is a collection of all external inputs that influenced the build on top of
// ConfigSource. For example, if the invocation type were “make”, then this might be the
// flags passed to make aside from the target, which is captured in [ConfigSource.EntryPoint].
//
// Consumers SHOULD accept only “safe” Parameters. The simplest and safest way to
// achieve this is to disallow any parameters altogether.
//
// This is an arbitrary JSON object with a schema defined by buildType.
Parameters interface{} `json:"parameters,omitempty"`
// Environment contains any other builder-controlled inputs necessary for correctly evaluating
// the build. Usually only needed for reproducing the build but not evaluated as part of
// policy.
//
// This SHOULD be minimized to only include things that are part of the public API, that cannot
// be recomputed from other values in the provenance, and that actually affect the evaluation
// of the build. For example, this might include variables that are referenced in the workflow
// definition, but it SHOULD NOT include a dump of all environment variables or include things
// like the hostname (assuming hostname is not part of the public API).
Environment interface{} `json:"environment,omitempty"`
}
type ConfigSource struct {
// URI indicating the identity of the source of the config.
URI string `json:"uri,omitempty"`
// Digest is a collection of cryptographic digests for the contents of the artifact specified
// by [URI].
Digest common.DigestSet `json:"digest,omitempty"`
// EntryPoint identifying the entry point into the build. This is often a path to a
// configuration file and/or a target label within that file. The syntax and meaning are
// defined by buildType. For example, if the buildType were “make”, then this would reference
// the directory in which to run make as well as which target to use.
//
// Consumers SHOULD accept only specific [ProvenanceInvocation.EntryPoint] values. For example,
// a policy might only allow the "release" entry point but not the "debug" entry point.
// MAY be omitted if the buildType specifies a default value.
EntryPoint string `json:"entryPoint,omitempty"`
}
// ProvenanceMetadata contains metadata for the built artifact.
type ProvenanceMetadata struct {
// BuildInvocationID identifies this particular build invocation, which can be useful for
// finding associated logs or other ad-hoc analysis. The exact meaning and format is defined
// by [common.ProvenanceBuilder.ID]; by default it is treated as opaque and case-sensitive.
// The value SHOULD be globally unique.
BuildInvocationID string `json:"buildInvocationID,omitempty"`
// BuildStartedOn is the timestamp of when the build started.
//
// Use pointer to make sure that the abscense of a time is not
// encoded as the Epoch time.
BuildStartedOn *time.Time `json:"buildStartedOn,omitempty"`
// BuildFinishedOn is the timestamp of when the build completed.
BuildFinishedOn *time.Time `json:"buildFinishedOn,omitempty"`
// Completeness indicates that the builder claims certain fields in this message to be
// complete.
Completeness ProvenanceComplete `json:"completeness"`
// Reproducible if true, means the builder claims that running invocation on materials will
// produce bit-for-bit identical output.
Reproducible bool `json:"reproducible"`
}
// ProvenanceComplete indicates wheter the claims in build/recipe are complete.
// For in depth information refer to the specifictaion:
// https://github.com/in-toto/attestation/blob/v0.1.0/spec/predicates/provenance.md
type ProvenanceComplete struct {
// Parameters if true, means the builder claims that [ProvenanceInvocation.Parameters] is
// complete, meaning that all external inputs are properly captured in
// ProvenanceInvocation.Parameters.
Parameters bool `json:"parameters"`
// Environment if true, means the builder claims that [ProvenanceInvocation.Environment] is
// complete.
Environment bool `json:"environment"`
// Materials if true, means the builder claims that materials is complete, usually through some
// controls to prevent network access. Sometimes called “hermetic”.
Materials bool `json:"materials"`
}

View File

@ -0,0 +1,147 @@
package in_toto
import (
"fmt"
)
/*
Set represents a data structure for set operations. See `NewSet` for how to
create a Set, and available Set receivers for useful set operations.
Under the hood Set aliases map[string]struct{}, where the map keys are the set
elements and the map values are a memory-efficient way of storing the keys.
*/
type Set map[string]struct{}
/*
NewSet creates a new Set, assigns it the optionally passed variadic string
elements, and returns it.
*/
func NewSet(elems ...string) Set {
var s Set = make(map[string]struct{})
for _, elem := range elems {
s.Add(elem)
}
return s
}
/*
Has returns True if the passed string is member of the set on which it was
called and False otherwise.
*/
func (s Set) Has(elem string) bool {
_, ok := s[elem]
return ok
}
/*
Add adds the passed string to the set on which it was called, if the string is
not a member of the set.
*/
func (s Set) Add(elem string) {
s[elem] = struct{}{}
}
/*
Remove removes the passed string from the set on which was is called, if the
string is a member of the set.
*/
func (s Set) Remove(elem string) {
delete(s, elem)
}
/*
Intersection creates and returns a new Set with the elements of the set on
which it was called that are also in the passed set.
*/
func (s Set) Intersection(s2 Set) Set {
res := NewSet()
for elem := range s {
if !s2.Has(elem) {
continue
}
res.Add(elem)
}
return res
}
/*
Difference creates and returns a new Set with the elements of the set on
which it was called that are not in the passed set.
*/
func (s Set) Difference(s2 Set) Set {
res := NewSet()
for elem := range s {
if s2.Has(elem) {
continue
}
res.Add(elem)
}
return res
}
/*
Filter creates and returns a new Set with the elements of the set on which it
was called that match the passed pattern. A matching error is treated like a
non-match plus a warning is printed.
*/
func (s Set) Filter(pattern string) Set {
res := NewSet()
for elem := range s {
matched, err := match(pattern, elem)
if err != nil {
fmt.Printf("WARNING: %s, pattern was '%s'\n", err, pattern)
continue
}
if !matched {
continue
}
res.Add(elem)
}
return res
}
/*
Slice creates and returns an unordered string slice with the elements of the
set on which it was called.
*/
func (s Set) Slice() []string {
var res []string
res = make([]string, 0, len(s))
for elem := range s {
res = append(res, elem)
}
return res
}
/*
InterfaceKeyStrings returns string keys of passed interface{} map in an
unordered string slice.
*/
func InterfaceKeyStrings(m map[string]interface{}) []string {
res := make([]string, len(m))
i := 0
for k := range m {
res[i] = k
i++
}
return res
}
/*
IsSubSet checks if the parameter subset is a
subset of the superset s.
*/
func (s Set) IsSubSet(subset Set) bool {
if len(subset) > len(s) {
return false
}
for key := range subset {
if s.Has(key) {
continue
} else {
return false
}
}
return true
}

View File

@ -0,0 +1,14 @@
//go:build linux || darwin || !windows
// +build linux darwin !windows
package in_toto
import "golang.org/x/sys/unix"
func isWritable(path string) error {
err := unix.Access(path, unix.W_OK)
if err != nil {
return err
}
return nil
}

View File

@ -0,0 +1,25 @@
package in_toto
import (
"errors"
"os"
)
func isWritable(path string) error {
// get fileInfo
info, err := os.Stat(path)
if err != nil {
return err
}
// check if path is a directory
if !info.IsDir() {
return errors.New("not a directory")
}
// Check if the user bit is enabled in file permission
if info.Mode().Perm()&(1<<(uint(7))) == 0 {
return errors.New("not writable")
}
return nil
}

File diff suppressed because it is too large Load Diff