// // Copyright 2021 The Sigstore Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package pki import ( "fmt" "io" "github.com/sigstore/rekor/pkg/pki/minisign" "github.com/sigstore/rekor/pkg/pki/pgp" "github.com/sigstore/rekor/pkg/pki/pkcs7" "github.com/sigstore/rekor/pkg/pki/ssh" "github.com/sigstore/rekor/pkg/pki/tuf" "github.com/sigstore/rekor/pkg/pki/x509" ) type Format string const ( PGP Format = "pgp" Minisign Format = "minisign" SSH Format = "ssh" X509 Format = "x509" PKCS7 Format = "pkcs7" Tuf Format = "tuf" ) type ArtifactFactory struct { impl pkiImpl } func NewArtifactFactory(format Format) (*ArtifactFactory, error) { if impl, ok := artifactFactoryMap[format]; ok { return &ArtifactFactory{impl: impl}, nil } return nil, fmt.Errorf("%v is not a supported PKI format", format) } type pkiImpl struct { newPubKey func(io.Reader) (PublicKey, error) newSignature func(io.Reader) (Signature, error) } var artifactFactoryMap map[Format]pkiImpl func init() { artifactFactoryMap = map[Format]pkiImpl{ PGP: { newPubKey: func(r io.Reader) (PublicKey, error) { return pgp.NewPublicKey(r) }, newSignature: func(r io.Reader) (Signature, error) { return pgp.NewSignature(r) }, }, Minisign: { newPubKey: func(r io.Reader) (PublicKey, error) { return minisign.NewPublicKey(r) }, newSignature: func(r io.Reader) (Signature, error) { return minisign.NewSignature(r) }, }, SSH: { newPubKey: func(r io.Reader) (PublicKey, error) { return ssh.NewPublicKey(r) }, newSignature: func(r io.Reader) (Signature, error) { return ssh.NewSignature(r) }, }, X509: { newPubKey: func(r io.Reader) (PublicKey, error) { return x509.NewPublicKey(r) }, newSignature: func(r io.Reader) (Signature, error) { return x509.NewSignature(r) }, }, PKCS7: { newPubKey: func(r io.Reader) (PublicKey, error) { return pkcs7.NewPublicKey(r) }, newSignature: func(r io.Reader) (Signature, error) { return pkcs7.NewSignature(r) }, }, Tuf: { newPubKey: func(r io.Reader) (PublicKey, error) { return tuf.NewPublicKey(r) }, newSignature: func(r io.Reader) (Signature, error) { return tuf.NewSignature(r) }, }, } } func SupportedFormats() []string { var formats []string for f := range artifactFactoryMap { formats = append(formats, string(f)) } return formats } func (a ArtifactFactory) NewPublicKey(r io.Reader) (PublicKey, error) { return a.impl.newPubKey(r) } func (a ArtifactFactory) NewSignature(r io.Reader) (Signature, error) { return a.impl.newSignature(r) }
// Copyright 2021 The Sigstore Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package sharding // VirtualLogIndex returns the virtual log index for a given leaf index func VirtualLogIndex(leafIndex int64, tid int64, ranges LogRanges) int64 { // if we have no inactive ranges, we have just one log! return the leafIndex as is // as long as it matches the active tree ID if ranges.NoInactive() { if ranges.GetActive().TreeID == tid { return leafIndex } return -1 } var virtualIndex int64 for _, r := range ranges.GetInactive() { if r.TreeID == tid { return virtualIndex + leafIndex } virtualIndex += r.TreeLength } // If no TreeID in Inactive matches the tid, the virtual index should be the active tree if ranges.GetActive().TreeID == tid { return virtualIndex + leafIndex } // Otherwise, the tid is invalid return -1 }
// // Copyright 2021 The Sigstore Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package sharding import ( "context" "crypto/sha256" "crypto/x509" "encoding/hex" "encoding/json" "errors" "fmt" "os" "strconv" "strings" "github.com/google/trillian" "github.com/google/trillian/types" "github.com/sigstore/rekor/pkg/log" "github.com/sigstore/rekor/pkg/signer" "github.com/sigstore/sigstore/pkg/cryptoutils" "github.com/sigstore/sigstore/pkg/signature" "github.com/sigstore/sigstore/pkg/signature/options" "sigs.k8s.io/yaml" ) // Active and inactive shards type LogRanges struct { // inactive shards are listed from oldest to newest inactive Ranges active LogRange } type Ranges []LogRange // LogRange represents a log or tree shard type LogRange struct { TreeID int64 `json:"treeID" yaml:"treeID"` TreeLength int64 `json:"treeLength" yaml:"treeLength"` // unused for active tree SigningConfig signer.SigningConfig `json:"signingConfig" yaml:"signingConfig"` // if unset, assume same as active tree Signer signature.Signer PemPubKey string // PEM-encoded PKIX public key LogID string // Hex-encoded SHA256 digest of PKIX-encoded public key } func (l LogRange) String() string { return fmt.Sprintf("{ TreeID: %v, TreeLength: %v, SigningScheme: %v, PemPubKey: %v, LogID: %v }", l.TreeID, l.TreeLength, l.SigningConfig.SigningSchemeOrKeyPath, l.PemPubKey, l.LogID) } // NewLogRanges initializes the active and any inactive log shards func NewLogRanges(ctx context.Context, logClient trillian.TrillianLogClient, inactiveShardsPath string, activeTreeID int64, signingConfig signer.SigningConfig) (LogRanges, error) { if activeTreeID == 0 { return LogRanges{}, errors.New("non-zero active tree ID required; please set the active tree ID via the `--trillian_log_server.tlog_id` flag") } // Initialize active shard activeLog, err := updateRange(ctx, logClient, LogRange{TreeID: activeTreeID, TreeLength: 0, SigningConfig: signingConfig}, true /*=active*/) if err != nil { return LogRanges{}, fmt.Errorf("creating range for active tree %d: %w", activeTreeID, err) } log.Logger.Infof("Active log: %s", activeLog.String()) if inactiveShardsPath == "" { log.Logger.Info("No config file specified, no inactive shards") return LogRanges{active: activeLog}, nil } // Initialize inactive shards from inactive tree IDs ranges, err := logRangesFromPath(inactiveShardsPath) if err != nil { return LogRanges{}, fmt.Errorf("log ranges from path: %w", err) } for i, r := range ranges { // If no signing config is provided, use the active tree signing key if r.SigningConfig.IsUnset() { r.SigningConfig = signingConfig } r, err := updateRange(ctx, logClient, r, false /*=active*/) if err != nil { return LogRanges{}, fmt.Errorf("updating range for tree id %d: %w", r.TreeID, err) } ranges[i] = r } for i, r := range ranges { log.Logger.Infof("Inactive range %d: %s", i, r.String()) } return LogRanges{ inactive: ranges, active: activeLog, }, nil } // logRangesFromPath unmarshals a shard config func logRangesFromPath(path string) (Ranges, error) { var ranges Ranges contents, err := os.ReadFile(path) if err != nil { return Ranges{}, err } if string(contents) == "" { log.Logger.Info("Sharding config file contents empty, skipping init of logRange map") return Ranges{}, nil } if err := yaml.Unmarshal(contents, &ranges); err != nil { // Try to use JSON if jerr := json.Unmarshal(contents, &ranges); jerr == nil { return ranges, nil } return Ranges{}, err } return ranges, nil } // updateRange fills in any missing information about the range func updateRange(ctx context.Context, logClient trillian.TrillianLogClient, r LogRange, active bool) (LogRange, error) { // If a tree length wasn't passed in or if the shard is inactive, fetch the tree size if r.TreeLength == 0 && !active { resp, err := logClient.GetLatestSignedLogRoot(ctx, &trillian.GetLatestSignedLogRootRequest{LogId: r.TreeID}) if err != nil { return LogRange{}, fmt.Errorf("getting signed log root for tree %d: %w", r.TreeID, err) } var root types.LogRootV1 if err := root.UnmarshalBinary(resp.SignedLogRoot.LogRoot); err != nil { return LogRange{}, err } r.TreeLength = int64(root.TreeSize) } if r.SigningConfig.IsUnset() { return LogRange{}, fmt.Errorf("signing config not set, unable to initialize shard signer") } // Initialize shard signer s, err := signer.New(ctx, r.SigningConfig.SigningSchemeOrKeyPath, r.SigningConfig.FileSignerPassword, r.SigningConfig.TinkKEKURI, r.SigningConfig.TinkKeysetPath) if err != nil { return LogRange{}, err } r.Signer = s // Initialize public key pubKey, err := s.PublicKey(options.WithContext(ctx)) if err != nil { return LogRange{}, err } pemPubKey, err := cryptoutils.MarshalPublicKeyToPEM(pubKey) if err != nil { return LogRange{}, err } r.PemPubKey = string(pemPubKey) // Initialize log ID from public key b, err := x509.MarshalPKIXPublicKey(pubKey) if err != nil { return LogRange{}, err } pubkeyHashBytes := sha256.Sum256(b) r.LogID = hex.EncodeToString(pubkeyHashBytes[:]) return r, nil } func (l *LogRanges) ResolveVirtualIndex(index int) (int64, int64) { indexLeft := index for _, l := range l.inactive { if indexLeft < int(l.TreeLength) { return l.TreeID, int64(indexLeft) } indexLeft -= int(l.TreeLength) } // If index not found in inactive trees, return the active tree return l.active.TreeID, int64(indexLeft) } func (l *LogRanges) NoInactive() bool { return l.inactive == nil } // AllShards returns all shards, starting with the active shard and then the inactive shards func (l *LogRanges) AllShards() []int64 { shards := []int64{l.GetActive().TreeID} for _, in := range l.GetInactive() { shards = append(shards, in.TreeID) } return shards } // TotalInactiveLength returns the total length across all inactive shards; // we don't know the length of the active shard. func (l *LogRanges) TotalInactiveLength() int64 { var total int64 for _, r := range l.inactive { total += r.TreeLength } return total } // GetLogRangeByTreeID returns the active or inactive // shard with the given tree ID func (l *LogRanges) GetLogRangeByTreeID(treeID int64) (LogRange, error) { if l.active.TreeID == treeID { return l.active, nil } for _, i := range l.inactive { if i.TreeID == treeID { return i, nil } } return LogRange{}, fmt.Errorf("no log range found for tree ID %d", treeID) } // GetInactive returns all inactive shards func (l *LogRanges) GetInactive() []LogRange { return l.inactive } // GetActive returns the cative shard func (l *LogRanges) GetActive() LogRange { return l.active } func (l *LogRanges) String() string { ranges := []string{} for _, r := range l.inactive { ranges = append(ranges, fmt.Sprintf("%d=%d", r.TreeID, r.TreeLength)) } ranges = append(ranges, fmt.Sprintf("active=%d", l.active.TreeID)) return strings.Join(ranges, ",") } // PublicKey returns the associated public key for the given Tree ID // and returns the active public key by default func (l *LogRanges) PublicKey(treeID string) (string, error) { // if no tree ID is specified, assume the active tree if treeID == "" { return l.active.PemPubKey, nil } tid, err := strconv.Atoi(treeID) if err != nil { return "", err } if tid == int(l.GetActive().TreeID) { return l.active.PemPubKey, nil } for _, i := range l.inactive { if int(i.TreeID) == tid { return i.PemPubKey, nil } } return "", fmt.Errorf("%d is not a valid tree ID and doesn't have an associated public key", tid) }
// // Copyright 2021 The Sigstore Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package sharding import ( "encoding/hex" "errors" "fmt" "strconv" ) // An EntryID refers to a specific artifact's ID and is made of two components, // the TreeID and the UUID. The TreeID is a hex-encoded uint64 (8 bytes) // referring to the specific trillian tree (also known as log or shard) where // the artifact can be found. The UUID is a hex-encoded 32-byte number // referring to the artifact's merkle leaf hash from trillian. Artifact lookup // by UUID occurs by finding the UUID within the tree specified by the TreeID. // // An EntryID is 40 bytes long and looks like this: // FFFF FFFF FFFF FFFF FFFF FFFF FFFF FFFF FFFF FFFF FFFF FFFF FFFF FFFF FFFF FFFF FFFF FFFF FFFF FFFF // |_______ ________| |_____________________________________ ______________________________________| // \/ \/ // TreeID (8 bytes, hex) UUID (32 bytes, hex) const TreeIDHexStringLen = 16 const UUIDHexStringLen = 64 const EntryIDHexStringLen = TreeIDHexStringLen + UUIDHexStringLen type EntryID struct { TreeID string UUID string } // CreateEntryIDFromParts This function can take a TreeID of equal or lesser length than TreeIDHexStringLen. In // case the TreeID length is less than TreeIDHexStringLen, it will be padded to the correct // length. func CreateEntryIDFromParts(treeid string, uuid string) (EntryID, error) { if len(treeid) > TreeIDHexStringLen { err := fmt.Errorf("invalid treeid len: %v", len(treeid)) return createEmptyEntryID(), err } if len(uuid) != UUIDHexStringLen { err := fmt.Errorf("invalid uuid len: %v", len(uuid)) return createEmptyEntryID(), err } treeidFormatted, err := PadToTreeIDLen(treeid) if err != nil { return createEmptyEntryID(), err } if err := ValidateEntryID(treeidFormatted + uuid); err != nil { return createEmptyEntryID(), err } return EntryID{ TreeID: treeidFormatted, UUID: uuid}, nil } func createEmptyEntryID() EntryID { return EntryID{ TreeID: "", UUID: ""} } func (e EntryID) ReturnEntryIDString() string { return e.TreeID + e.UUID } func PadToTreeIDLen(t string) (string, error) { switch { case len(t) == TreeIDHexStringLen: return t, nil case len(t) > TreeIDHexStringLen: return "", fmt.Errorf("invalid treeID %v: too long", t) default: return fmt.Sprintf("%016s", t), nil } } // GetUUIDFromIDString Returns UUID (with no prepended TreeID) from a UUID or EntryID string. // Validates UUID and also TreeID if present. func GetUUIDFromIDString(id string) (string, error) { switch len(id) { case UUIDHexStringLen: if err := ValidateUUID(id); err != nil { return "", err } return id, nil case EntryIDHexStringLen: if err := ValidateEntryID(id); err != nil { if err.Error() == "0 is not a valid TreeID" { return id[len(id)-UUIDHexStringLen:], nil } return "", err } return id[len(id)-UUIDHexStringLen:], nil default: return "", fmt.Errorf("invalid ID len %v for %v", len(id), id) } } // ValidateUUID This is permissive in that if passed an EntryID, it will find the UUID and validate it. func ValidateUUID(u string) error { switch len(u) { // If u is an EntryID, call validate on just the UUID case EntryIDHexStringLen: uid := u[len(u)-UUIDHexStringLen:] if err := ValidateUUID(uid); err != nil { return err } return nil case UUIDHexStringLen: if _, err := hex.DecodeString(u); err != nil { return fmt.Errorf("id %v is not a valid hex string: %w", u, err) } return nil default: return fmt.Errorf("invalid ID len %v for %v", len(u), u) } } // ValidateTreeID This is permissive in that if passed an EntryID, it will find the TreeID and validate it. func ValidateTreeID(t string) error { switch len(t) { // If t is an EntryID, call validate on just the TreeID case EntryIDHexStringLen: tid := t[:TreeIDHexStringLen] err := ValidateTreeID(tid) if err != nil { return err } return nil case TreeIDHexStringLen: // Check that it's a valid int64 in hex (base 16) i, err := strconv.ParseInt(t, 16, 64) if err != nil { return fmt.Errorf("could not convert treeID %v to int64: %w", t, err) } // Check for invalid TreeID values // TODO: test for more of these if i == 0 { return errors.New("0 is not a valid TreeID") } return nil default: return fmt.Errorf("TreeID len expected to be %v but got %v", TreeIDHexStringLen, len(t)) } } func ValidateEntryID(id string) error { UUIDErr := ValidateUUID(id) if UUIDErr != nil { return UUIDErr } treeIDErr := ValidateTreeID(id) if treeIDErr != nil { return treeIDErr } return nil } var ErrPlainUUID = errors.New("cannot get treeID from plain UUID") // GetTreeIDFromIDString Returns TreeID (with no appended UUID) from a TreeID or EntryID string. // Validates TreeID and also UUID if present. func GetTreeIDFromIDString(id string) (string, error) { switch len(id) { case UUIDHexStringLen: return "", ErrPlainUUID case EntryIDHexStringLen, TreeIDHexStringLen: if err := ValidateEntryID(id); err != nil { return "", err } return id[:TreeIDHexStringLen], nil default: return "", fmt.Errorf("invalid ID len %v for %v", len(id), id) } } func TreeID(entryID string) (int64, error) { tid, err := GetTreeIDFromIDString(entryID) if err != nil { return 0, err } i, err := strconv.ParseInt(tid, 16, 64) if err != nil { return 0, fmt.Errorf("could not convert treeID %v to int64: %w", tid, err) } return i, nil }
/* Copyright The Rekor Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package signer import ( "crypto" "fmt" "github.com/sigstore/sigstore/pkg/signature" "go.step.sm/crypto/pemutil" ) // returns an file based signer and verify, used for spinning up local instances type File struct { signature.SignerVerifier } func NewFile(keyPath, keyPass string) (*File, error) { opaqueKey, err := pemutil.Read(keyPath, pemutil.WithPassword([]byte(keyPass))) if err != nil { return nil, fmt.Errorf("file: provide a valid signer, %s is not valid: %w", keyPath, err) } signer, err := signature.LoadSignerVerifier(opaqueKey, crypto.SHA256) if err != nil { return nil, fmt.Errorf(`file: loaded private key from %s can't be used to sign: %w`, keyPath, err) } return &File{signer}, nil }
/* Copyright The Rekor Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package signer import ( "crypto" "crypto/elliptic" "crypto/rand" "github.com/sigstore/sigstore/pkg/signature" ) const MemoryScheme = "memory" // returns an in-memory signer and verify, used for spinning up local instances type Memory struct { signature.ECDSASignerVerifier } func NewMemory() (*Memory, error) { // generate a keypair sv, _, err := signature.NewECDSASignerVerifier(elliptic.P256(), rand.Reader, crypto.SHA256) if err != nil { return nil, err } return &Memory{ ECDSASignerVerifier: *sv, }, nil }
/* Copyright 2021 The Sigstore Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package signer import ( "context" "crypto" "strings" "github.com/sigstore/sigstore/pkg/signature" "github.com/sigstore/sigstore/pkg/signature/kms" "golang.org/x/exp/slices" // these are imported to load the providers via init() calls _ "github.com/sigstore/sigstore/pkg/signature/kms/aws" _ "github.com/sigstore/sigstore/pkg/signature/kms/azure" _ "github.com/sigstore/sigstore/pkg/signature/kms/gcp" _ "github.com/sigstore/sigstore/pkg/signature/kms/hashivault" ) // SigningConfig initializes the signer for a specific shard type SigningConfig struct { SigningSchemeOrKeyPath string `json:"signingSchemeOrKeyPath" yaml:"signingSchemeOrKeyPath"` FileSignerPassword string `json:"fileSignerPassword" yaml:"fileSignerPassword"` TinkKEKURI string `json:"tinkKEKURI" yaml:"tinkKEKURI"` TinkKeysetPath string `json:"tinkKeysetPath" yaml:"tinkKeysetPath"` } func (sc SigningConfig) IsUnset() bool { return sc.SigningSchemeOrKeyPath == "" && sc.FileSignerPassword == "" && sc.TinkKEKURI == "" && sc.TinkKeysetPath == "" } func New(ctx context.Context, signer, pass, tinkKEKURI, tinkKeysetPath string) (signature.Signer, error) { switch { case slices.ContainsFunc(kms.SupportedProviders(), func(s string) bool { return strings.HasPrefix(signer, s) }): return kms.Get(ctx, signer, crypto.SHA256) case signer == MemoryScheme: return NewMemory() case signer == TinkScheme: return NewTinkSigner(ctx, tinkKEKURI, tinkKeysetPath) default: return NewFile(signer, pass) } }
// Copyright 2024 The Sigstore Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package signer import ( "context" "errors" "fmt" "os" "path/filepath" "strings" tinkUtils "github.com/sigstore/sigstore/pkg/signature/tink" "github.com/tink-crypto/tink-go-awskms/v2/integration/awskms" "github.com/tink-crypto/tink-go-gcpkms/v2/integration/gcpkms" "github.com/tink-crypto/tink-go/v2/core/registry" "github.com/tink-crypto/tink-go/v2/keyset" "github.com/tink-crypto/tink-go/v2/tink" "github.com/sigstore/sigstore/pkg/signature" ) const TinkScheme = "tink" // NewTinkSigner returns a signature.SignerVerifier that wraps crypto.Signer and a hash function. // Provide a path to the encrypted keyset and cloud KMS key URI for decryption func NewTinkSigner(ctx context.Context, kekURI, keysetPath string) (signature.Signer, error) { if kekURI == "" || keysetPath == "" { return nil, fmt.Errorf("key encryption key URI or keyset path unset") } kek, err := getKeyEncryptionKey(ctx, kekURI) if err != nil { return nil, err } return NewTinkSignerWithHandle(kek, keysetPath) } // NewTinkSignerWithHandle returns a signature.SignerVerifier that wraps crypto.Signer and a hash function. // Provide a path to the encrypted keyset and a key handle for decrypting the keyset func NewTinkSignerWithHandle(kek tink.AEAD, keysetPath string) (signature.Signer, error) { f, err := os.Open(filepath.Clean(keysetPath)) if err != nil { return nil, err } defer f.Close() kh, err := keyset.Read(keyset.NewJSONReader(f), kek) if err != nil { return nil, err } signer, err := tinkUtils.KeyHandleToSigner(kh) if err != nil { return nil, err } return signature.LoadDefaultSignerVerifier(signer) } // getKeyEncryptionKey returns a Tink AEAD encryption key from KMS // Supports GCP and AWS func getKeyEncryptionKey(ctx context.Context, kmsKey string) (tink.AEAD, error) { switch { case strings.HasPrefix(kmsKey, "gcp-kms://"): gcpClient, err := gcpkms.NewClientWithOptions(ctx, kmsKey) if err != nil { return nil, err } registry.RegisterKMSClient(gcpClient) return gcpClient.GetAEAD(kmsKey) case strings.HasPrefix(kmsKey, "aws-kms://"): awsClient, err := awskms.NewClientWithOptions(kmsKey) if err != nil { return nil, err } registry.RegisterKMSClient(awsClient) return awsClient.GetAEAD(kmsKey) default: return nil, errors.New("unsupported KMS key type") } }
// // Copyright 2021 The Sigstore Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package alpine import ( "context" "errors" "fmt" "github.com/sigstore/rekor/pkg/generated/models" "github.com/sigstore/rekor/pkg/types" ) const ( KIND = "alpine" ) type BaseAlpineType struct { types.RekorType } func init() { types.TypeMap.Store(KIND, New) } func New() types.TypeImpl { bat := BaseAlpineType{} bat.Kind = KIND bat.VersionMap = VersionMap return &bat } var VersionMap = types.NewSemVerEntryFactoryMap() func (bat *BaseAlpineType) UnmarshalEntry(pe models.ProposedEntry) (types.EntryImpl, error) { if pe == nil { return nil, errors.New("proposed entry cannot be nil") } apk, ok := pe.(*models.Alpine) if !ok { return nil, errors.New("cannot unmarshal non-Alpine types") } return bat.VersionedUnmarshal(apk, *apk.APIVersion) } func (bat *BaseAlpineType) CreateProposedEntry(ctx context.Context, version string, props types.ArtifactProperties) (models.ProposedEntry, error) { if version == "" { version = bat.DefaultVersion() } ei, err := bat.VersionedUnmarshal(nil, version) if err != nil { return nil, fmt.Errorf("fetching Intoto version implementation: %w", err) } return ei.CreateFromArtifactProperties(ctx, props) } func (bat BaseAlpineType) DefaultVersion() string { return "0.0.1" }
// // Copyright 2021 The Sigstore Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package alpine import ( "archive/tar" "bufio" "bytes" "compress/gzip" "crypto" "crypto/sha1" // #nosec G505 "crypto/sha256" "encoding/hex" "encoding/pem" "errors" "fmt" "hash" "io" "strings" "github.com/sigstore/sigstore/pkg/signature" "github.com/sigstore/sigstore/pkg/signature/options" "github.com/spf13/viper" "gopkg.in/ini.v1" ) type Package struct { Pkginfo map[string]string // KVP pairs Signature []byte Datahash []byte controlSHA1Digest []byte } type sha1Reader struct { r *bufio.Reader addToHash bool hasher hash.Hash } func newSHA1Reader(b *bufio.Reader) *sha1Reader { // #nosec G401 c := sha1Reader{ r: b, hasher: sha1.New(), //nolint: gosec } return &c } func (s *sha1Reader) Read(p []byte) (int, error) { n, err := s.r.Read(p) if err == nil && n > 0 && s.addToHash { s.hasher.Write(p) } return n, err } func (s *sha1Reader) ReadByte() (byte, error) { b, err := s.r.ReadByte() if err == nil && s.addToHash { s.hasher.Write([]byte{b}) } return b, err } func (s sha1Reader) Sum() []byte { return s.hasher.Sum(nil) } func (s *sha1Reader) StartHashing() { s.hasher.Reset() s.addToHash = true } func (s *sha1Reader) StopHashing() { s.addToHash = false } func (p *Package) Unmarshal(pkgReader io.Reader) error { pkg := Package{} // bufio.Reader is required if Multistream(false) is used bufReader := bufio.NewReader(pkgReader) sha1BufReader := newSHA1Reader(bufReader) gzipReader, err := gzip.NewReader(sha1BufReader) if err != nil { return fmt.Errorf("create gzip reader: %w", err) } defer func() { _ = gzipReader.Close() }() // APKs are concatenated gzip files so we want to know where the boundary is gzipReader.Multistream(false) // GZIP headers/footers are left unmodified; Tar footers are removed on first two archives // signature.tar.gz | control.tar.gz | data.tar.gz sigBuf := bytes.Buffer{} for { _, err := io.CopyN(&sigBuf, gzipReader, 1024) if err != nil { if err == io.EOF { break } return fmt.Errorf("reading signature.tar.gz: %w", err) } } // the SHA1 sum used in the signature is over the entire file control.tar.gz so we need to // intercept the buffered reading to compute the hash correctly // // we start sha1 hashing now since the Reset() call will begin reading control.tar.gz headers sha1BufReader.StartHashing() // we reset the reader since we've found the end of signature.tar.gz if err := gzipReader.Reset(sha1BufReader); err != nil && err != io.EOF { return fmt.Errorf("resetting to control.tar.gz: %w", err) } gzipReader.Multistream(false) controlTar := bytes.Buffer{} for { _, err := io.CopyN(&controlTar, gzipReader, 1024) if err != nil { if err == io.EOF { break } return fmt.Errorf("reading control.tar.gz: %w", err) } } // signature uses sha1 digest hardcoded in abuild-sign tool pkg.controlSHA1Digest = sha1BufReader.Sum() sha1BufReader.StopHashing() // the gzip reader is NOT reset again since that advances the underlying reader // by reading the next GZIP header, which affects the datahash computation below sigReader := tar.NewReader(&sigBuf) for { header, err := sigReader.Next() if err == io.EOF { if pkg.Signature == nil { return errors.New("no signature detected in alpine package") } break } else if err != nil { return fmt.Errorf("getting next entry in tar archive: %w", err) } if strings.HasPrefix(header.Name, ".SIGN") && pkg.Signature == nil { if header.Size < 0 { return errors.New("negative header size for .SIGN file") } if uint64(header.Size) > viper.GetUint64("max_apk_metadata_size") && viper.GetUint64("max_apk_metadata_size") > 0 { return fmt.Errorf("uncompressed .SIGN file size %d exceeds max allowed size %d", header.Size, viper.GetUint64("max_apk_metadata_size")) } sigBytes := make([]byte, header.Size) if _, err = sigReader.Read(sigBytes); err != nil && err != io.EOF { return fmt.Errorf("reading signature: %w", err) } // we're not sure whether this is PEM encoded or not, so handle both cases block, _ := pem.Decode(sigBytes) if block == nil { pkg.Signature = sigBytes } else { pkg.Signature = block.Bytes } } } ctlReader := tar.NewReader(&controlTar) for { header, err := ctlReader.Next() if err == io.EOF { if pkg.Pkginfo == nil { return errors.New(".PKGINFO file was not located") } break } else if err != nil { return fmt.Errorf("getting next entry in tar archive: %w", err) } if header.Name == ".PKGINFO" { if header.Size < 0 { return errors.New("negative header size for .PKGINFO file") } if uint64(header.Size) > viper.GetUint64("max_apk_metadata_size") && viper.GetUint64("max_apk_metadata_size") > 0 { return fmt.Errorf("uncompressed .PKGINFO file size %d exceeds max allowed size %d", header.Size, viper.GetUint64("max_apk_metadata_size")) } pkginfoContent := make([]byte, header.Size) if _, err = ctlReader.Read(pkginfoContent); err != nil && err != io.EOF { return fmt.Errorf("reading .PKGINFO: %w", err) } pkg.Pkginfo, err = parsePkginfo(pkginfoContent) if err != nil { return fmt.Errorf("parsing .PKGINFO: %w", err) } pkg.Datahash, err = hex.DecodeString(pkg.Pkginfo["datahash"]) if err != nil { return fmt.Errorf("parsing datahash: %w", err) } } } // at this point, bufReader should point to first byte of data.tar.gz // datahash value from .PKGINFO is sha256 sum of data.tar.gz sha256 := sha256.New() if _, err := io.Copy(sha256, bufReader); err != nil { return fmt.Errorf("computing SHA256 sum of data.tar.gz: %w", err) } computedSum := sha256.Sum(nil) if !bytes.Equal(computedSum, pkg.Datahash) { return fmt.Errorf("checksum for data.tar.gz (%v) does not match value from .PKGINFO (%v)", hex.EncodeToString(computedSum), hex.EncodeToString(pkg.Datahash)) } *p = pkg return nil } // VerifySignature verifies the signature of the alpine package using the provided // public key. It returns an error if verification fails, or nil if it is successful. func (p Package) VerifySignature(pub crypto.PublicKey) error { if p.Signature == nil { return errors.New("no signature in alpine package object") } if p.controlSHA1Digest == nil { return errors.New("no digest value for data.tar.gz known") } verifier, err := signature.LoadUnsafeVerifier(pub) if err != nil { return err } return verifier.VerifySignature(bytes.NewReader(p.Signature), nil, options.WithDigest(p.controlSHA1Digest), options.WithCryptoSignerOpts(crypto.SHA1)) } // parsePkginfo parses the .PKGINFO file which is in a // key[space]=[space]value\n // format. it returns a map[string]string of the key/value pairs, or // an error if parsing could not be completed successfully. func parsePkginfo(input []byte) (map[string]string, error) { cfg, err := ini.Load(input) if err != nil { return nil, err } // .PKGINFO does not use sections, so using "" grabs the default values return cfg.Section("").KeysHash(), nil }
// // Copyright 2021 The Sigstore Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package alpine import ( "bytes" "context" "crypto/sha256" "encoding/hex" "encoding/json" "errors" "fmt" "io" "os" "path/filepath" "strings" "github.com/asaskevich/govalidator" "github.com/go-openapi/strfmt" "github.com/go-openapi/swag" "golang.org/x/sync/errgroup" "github.com/sigstore/rekor/pkg/generated/models" "github.com/sigstore/rekor/pkg/log" "github.com/sigstore/rekor/pkg/pki" "github.com/sigstore/rekor/pkg/pki/x509" "github.com/sigstore/rekor/pkg/types" "github.com/sigstore/rekor/pkg/types/alpine" "github.com/sigstore/rekor/pkg/util" ) const ( APIVERSION = "0.0.1" ) func init() { if err := alpine.VersionMap.SetEntryFactory(APIVERSION, NewEntry); err != nil { log.Logger.Panic(err) } } type V001Entry struct { AlpineModel models.AlpineV001Schema } func (v V001Entry) APIVersion() string { return APIVERSION } func NewEntry() types.EntryImpl { return &V001Entry{} } func (v V001Entry) IndexKeys() ([]string, error) { var result []string keyObj, err := x509.NewPublicKey(bytes.NewReader(*v.AlpineModel.PublicKey.Content)) if err != nil { return nil, err } key, err := keyObj.CanonicalValue() if err != nil { return nil, err } keyHash := sha256.Sum256(key) result = append(result, strings.ToLower(hex.EncodeToString(keyHash[:]))) result = append(result, keyObj.Subjects()...) if v.AlpineModel.Package.Hash != nil { hashKey := strings.ToLower(fmt.Sprintf("%s:%s", *v.AlpineModel.Package.Hash.Algorithm, *v.AlpineModel.Package.Hash.Value)) result = append(result, hashKey) } return result, nil } func (v *V001Entry) Unmarshal(pe models.ProposedEntry) error { apk, ok := pe.(*models.Alpine) if !ok { return errors.New("cannot unmarshal non Alpine v0.0.1 type") } if err := types.DecodeEntry(apk.Spec, &v.AlpineModel); err != nil { return err } // field validation if err := v.AlpineModel.Validate(strfmt.Default); err != nil { return err } return v.validate() } func (v *V001Entry) fetchExternalEntities(ctx context.Context) (*x509.PublicKey, *alpine.Package, error) { if err := v.validate(); err != nil { return nil, nil, &types.InputValidationError{Err: err} } g, ctx := errgroup.WithContext(ctx) hashR, hashW := io.Pipe() apkR, apkW := io.Pipe() defer hashR.Close() defer apkR.Close() closePipesOnError := types.PipeCloser(hashR, hashW, apkR, apkW) oldSHA := "" if v.AlpineModel.Package.Hash != nil && v.AlpineModel.Package.Hash.Value != nil { oldSHA = swag.StringValue(v.AlpineModel.Package.Hash.Value) } g.Go(func() error { defer hashW.Close() defer apkW.Close() dataReadCloser := bytes.NewReader(v.AlpineModel.Package.Content) /* #nosec G110 */ if _, err := io.Copy(io.MultiWriter(hashW, apkW), dataReadCloser); err != nil { return closePipesOnError(err) } return nil }) hashResult := make(chan string) g.Go(func() error { defer close(hashResult) hasher := sha256.New() if _, err := io.Copy(hasher, hashR); err != nil { return closePipesOnError(err) } computedSHA := hex.EncodeToString(hasher.Sum(nil)) if oldSHA != "" && computedSHA != oldSHA { return closePipesOnError(&types.InputValidationError{Err: fmt.Errorf("SHA mismatch: %s != %s", computedSHA, oldSHA)}) } select { case <-ctx.Done(): return ctx.Err() case hashResult <- computedSHA: return nil } }) keyResult := make(chan *x509.PublicKey) g.Go(func() error { defer close(keyResult) keyReadCloser := bytes.NewReader(*v.AlpineModel.PublicKey.Content) keyObj, err := x509.NewPublicKey(keyReadCloser) if err != nil { return closePipesOnError(&types.InputValidationError{Err: err}) } select { case <-ctx.Done(): return ctx.Err() case keyResult <- keyObj: return nil } }) var apkObj *alpine.Package var key *x509.PublicKey g.Go(func() error { apk := alpine.Package{} if err := apk.Unmarshal(apkR); err != nil { return closePipesOnError(&types.InputValidationError{Err: err}) } key = <-keyResult if key == nil { return closePipesOnError(errors.New("error processing public key")) } if err := apk.VerifySignature(key.CryptoPubKey()); err != nil { return closePipesOnError(&types.InputValidationError{Err: err}) } apkObj = &apk select { case <-ctx.Done(): return ctx.Err() default: return nil } }) computedSHA := <-hashResult if err := g.Wait(); err != nil { return nil, nil, err } // if we get here, all goroutines succeeded without error if oldSHA == "" { v.AlpineModel.Package.Hash = &models.AlpineV001SchemaPackageHash{} v.AlpineModel.Package.Hash.Algorithm = swag.String(models.AlpineV001SchemaPackageHashAlgorithmSha256) v.AlpineModel.Package.Hash.Value = swag.String(computedSHA) } return key, apkObj, nil } func (v *V001Entry) Canonicalize(ctx context.Context) ([]byte, error) { key, apkObj, err := v.fetchExternalEntities(ctx) if err != nil { return nil, err } canonicalEntry := models.AlpineV001Schema{} var content []byte // need to canonicalize key content canonicalEntry.PublicKey = &models.AlpineV001SchemaPublicKey{} content, err = key.CanonicalValue() if err != nil { return nil, err } canonicalEntry.PublicKey.Content = (*strfmt.Base64)(&content) canonicalEntry.Package = &models.AlpineV001SchemaPackage{} canonicalEntry.Package.Hash = &models.AlpineV001SchemaPackageHash{} canonicalEntry.Package.Hash.Algorithm = v.AlpineModel.Package.Hash.Algorithm canonicalEntry.Package.Hash.Value = v.AlpineModel.Package.Hash.Value // data content is not set deliberately // set .PKGINFO headers canonicalEntry.Package.Pkginfo = apkObj.Pkginfo // wrap in valid object with kind and apiVersion set apk := models.Alpine{} apk.APIVersion = swag.String(APIVERSION) apk.Spec = &canonicalEntry v.AlpineModel = canonicalEntry return json.Marshal(&apk) } // validate performs cross-field validation for fields in object func (v V001Entry) validate() error { key := v.AlpineModel.PublicKey if key == nil { return errors.New("missing public key") } if key.Content == nil || len(*key.Content) == 0 { return errors.New("'content' must be specified for publicKey") } pkg := v.AlpineModel.Package if pkg == nil { return errors.New("missing package") } hash := pkg.Hash if hash != nil { if !govalidator.IsHash(swag.StringValue(hash.Value), swag.StringValue(hash.Algorithm)) { return errors.New("invalid value for hash") } } else if len(pkg.Content) == 0 { return errors.New("'content' must be specified for package") } return nil } func (v V001Entry) CreateFromArtifactProperties(ctx context.Context, props types.ArtifactProperties) (models.ProposedEntry, error) { returnVal := models.Alpine{} re := V001Entry{} // we will need artifact, public-key, signature re.AlpineModel = models.AlpineV001Schema{} re.AlpineModel.Package = &models.AlpineV001SchemaPackage{} var err error artifactBytes := props.ArtifactBytes if artifactBytes == nil { var artifactReader io.ReadCloser if props.ArtifactPath == nil { return nil, errors.New("path to artifact file must be specified") } if props.ArtifactPath.IsAbs() { artifactReader, err = util.FileOrURLReadCloser(ctx, props.ArtifactPath.String(), nil) if err != nil { return nil, fmt.Errorf("error reading artifact file: %w", err) } } else { artifactReader, err = os.Open(filepath.Clean(props.ArtifactPath.Path)) if err != nil { return nil, fmt.Errorf("error opening artifact file: %w", err) } } artifactBytes, err = io.ReadAll(artifactReader) if err != nil { return nil, fmt.Errorf("error reading artifact file: %w", err) } } re.AlpineModel.Package.Content = strfmt.Base64(artifactBytes) re.AlpineModel.PublicKey = &models.AlpineV001SchemaPublicKey{} publicKeyBytes := props.PublicKeyBytes if len(publicKeyBytes) == 0 { if len(props.PublicKeyPaths) != 1 { return nil, errors.New("only one public key must be provided") } keyBytes, err := os.ReadFile(filepath.Clean(props.PublicKeyPaths[0].Path)) if err != nil { return nil, fmt.Errorf("error reading public key file: %w", err) } publicKeyBytes = append(publicKeyBytes, keyBytes) } else if len(publicKeyBytes) != 1 { return nil, errors.New("only one public key must be provided") } re.AlpineModel.PublicKey.Content = (*strfmt.Base64)(&publicKeyBytes[0]) if err := re.validate(); err != nil { return nil, err } if _, _, err := re.fetchExternalEntities(ctx); err != nil { return nil, fmt.Errorf("error retrieving external entities: %w", err) } returnVal.APIVersion = swag.String(re.APIVersion()) returnVal.Spec = re.AlpineModel return &returnVal, nil } func (v V001Entry) Verifiers() ([]pki.PublicKey, error) { if v.AlpineModel.PublicKey == nil || v.AlpineModel.PublicKey.Content == nil { return nil, errors.New("alpine v0.0.1 entry not initialized") } key, err := x509.NewPublicKey(bytes.NewReader(*v.AlpineModel.PublicKey.Content)) if err != nil { return nil, err } return []pki.PublicKey{key}, nil } func (v V001Entry) ArtifactHash() (string, error) { if v.AlpineModel.Package == nil || v.AlpineModel.Package.Hash == nil || v.AlpineModel.Package.Hash.Value == nil || v.AlpineModel.Package.Hash.Algorithm == nil { return "", errors.New("alpine v0.0.1 entry not initialized") } return strings.ToLower(fmt.Sprintf("%s:%s", *v.AlpineModel.Package.Hash.Algorithm, *v.AlpineModel.Package.Hash.Value)), nil } func (v V001Entry) Insertable() (bool, error) { if v.AlpineModel.Package == nil { return false, errors.New("missing package entry") } if len(v.AlpineModel.Package.Content) == 0 { return false, errors.New("missing package content") } if v.AlpineModel.PublicKey == nil { return false, errors.New("missing public key") } if v.AlpineModel.PublicKey.Content == nil || len(*v.AlpineModel.PublicKey.Content) == 0 { return false, errors.New("missing public key content") } return true, nil }
// // Copyright 2022 The Sigstore Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package cose import ( "bytes" "context" "crypto" "crypto/ecdsa" "crypto/rsa" "crypto/sha256" "encoding/hex" "encoding/json" "errors" "fmt" "os" "path/filepath" "strings" "github.com/go-openapi/strfmt" "github.com/go-openapi/swag" "github.com/in-toto/in-toto-golang/in_toto" "github.com/spf13/viper" gocose "github.com/veraison/go-cose" "github.com/sigstore/rekor/pkg/generated/models" "github.com/sigstore/rekor/pkg/log" "github.com/sigstore/rekor/pkg/pki" "github.com/sigstore/rekor/pkg/pki/x509" "github.com/sigstore/rekor/pkg/types" "github.com/sigstore/rekor/pkg/types/cose" ) const ( APIVERSION = "0.0.1" ) const ( CurveP256 = "P-256" ) func init() { if err := cose.VersionMap.SetEntryFactory(APIVERSION, NewEntry); err != nil { log.Logger.Panic(err) } } type V001Entry struct { CoseObj models.CoseV001Schema keyObj pki.PublicKey sign1Msg *gocose.Sign1Message envelopeHash []byte } func (v V001Entry) APIVersion() string { return APIVERSION } func NewEntry() types.EntryImpl { return &V001Entry{} } func (v V001Entry) IndexKeys() ([]string, error) { var result []string // We add the key, the hash of the overall cose envelope, and the hash of the payload itself as keys. keyObj, err := x509.NewPublicKey(bytes.NewReader(*v.CoseObj.PublicKey)) if err != nil { return nil, err } // 1. Key key, err := keyObj.CanonicalValue() if err != nil { log.Logger.Error(err) } else { keyHash := sha256.Sum256(key) result = append(result, strings.ToLower(hex.EncodeToString(keyHash[:]))) } result = append(result, keyObj.Subjects()...) // 2. Overall envelope result = append(result, formatKey(v.CoseObj.Message)) // 3. Payload if v.sign1Msg != nil { result = append(result, formatKey(v.sign1Msg.Payload)) } else { // If no payload exists (it's unpacked in validate() method) // return now, as we will not be able to extract any headers return result, nil } // If payload is an in-toto statement, let's grab the subjects. if rawContentType, ok := v.sign1Msg.Headers.Protected[gocose.HeaderLabelContentType]; ok { contentType, ok := rawContentType.(string) // Integers as defined by CoAP content format are valid too, // but in-intoto payload type is not defined there, so only // proceed if content type is a string. // See list of CoAP content formats here: // https://www.iana.org/assignments/core-parameters/core-parameters.xhtml#content-formats if ok && contentType == in_toto.PayloadType { stmt, err := getIntotoStatement(v.sign1Msg.Payload) if err != nil { // ContentType header says intoto statement, but // parsing failed, continue with a warning. log.Logger.Warnf("Failed to parse intoto statement") } else { for _, sub := range stmt.Subject { for alg, digest := range sub.Digest { index := alg + ":" + digest result = append(result, index) } } } } } return result, nil } func getIntotoStatement(b []byte) (*in_toto.Statement, error) { var stmt in_toto.Statement if err := json.Unmarshal(b, &stmt); err != nil { return nil, err } return &stmt, nil } func formatKey(b []byte) string { h := sha256.Sum256(b) hash := hex.EncodeToString(h[:]) return strings.ToLower(fmt.Sprintf("%s:%s", models.CoseV001SchemaDataPayloadHashAlgorithmSha256, hash)) } func (v *V001Entry) Unmarshal(pe models.ProposedEntry) error { it, ok := pe.(*models.Cose) if !ok { return errors.New("cannot unmarshal non Cose v0.0.1 type") } var err error if err := types.DecodeEntry(it.Spec, &v.CoseObj); err != nil { return err } // field validation if err := v.CoseObj.Validate(strfmt.Default); err != nil { return err } v.keyObj, err = x509.NewPublicKey(bytes.NewReader(*v.CoseObj.PublicKey)) if err != nil { return err } // Store the envelope hash. // The CoseObj.Message is only populated during entry creation. // When marshalling from the database (retrieval) the envelope // hash must be decoded from the stored hex string. // The envelope hash is used to create the attestation key during // retrieval of a record. if len(v.CoseObj.Message) == 0 { if v.CoseObj.Data == nil || v.CoseObj.Data.EnvelopeHash == nil || v.CoseObj.Data.EnvelopeHash.Value == nil { return errors.New("envelope hash should have been previously computed") } b, err := hex.DecodeString(*v.CoseObj.Data.EnvelopeHash.Value) if err != nil { return err } v.envelopeHash = b } else { h := sha256.Sum256(v.CoseObj.Message) v.envelopeHash = h[:] } return v.validate() } func (v *V001Entry) Canonicalize(_ context.Context) ([]byte, error) { if v.keyObj == nil { return nil, errors.New("cannot canonicalze empty key") } pk, err := v.keyObj.CanonicalValue() if err != nil { return nil, err } pkb := strfmt.Base64(pk) h := sha256.Sum256([]byte(v.sign1Msg.Payload)) canonicalEntry := models.CoseV001Schema{ PublicKey: &pkb, Data: &models.CoseV001SchemaData{ PayloadHash: &models.CoseV001SchemaDataPayloadHash{ Algorithm: swag.String(models.CoseV001SchemaDataPayloadHashAlgorithmSha256), Value: swag.String(hex.EncodeToString(h[:])), }, EnvelopeHash: &models.CoseV001SchemaDataEnvelopeHash{ Algorithm: swag.String(models.CoseV001SchemaDataEnvelopeHashAlgorithmSha256), Value: swag.String(hex.EncodeToString(v.envelopeHash)), }, }, } itObj := models.Cose{} itObj.APIVersion = swag.String(APIVERSION) itObj.Spec = &canonicalEntry return json.Marshal(&itObj) } // validate performs cross-field validation for fields in object func (v *V001Entry) validate() error { // This also gets called in the CLI, where we won't have this data // or during record retrieval (message is the raw COSE object) which // is only stored as an attestation. if len(v.CoseObj.Message) == 0 { return nil } alg, pk, err := getPublicKey(v.keyObj) if err != nil { return err } bv, err := gocose.NewVerifier(alg, pk) if err != nil { return err } sign1Msg := gocose.NewSign1Message() if err := sign1Msg.UnmarshalCBOR(v.CoseObj.Message); err != nil { return err } if err := sign1Msg.Verify(v.CoseObj.Data.Aad, bv); err != nil { return err } v.sign1Msg = sign1Msg return nil } func getPublicKey(pk pki.PublicKey) (gocose.Algorithm, crypto.PublicKey, error) { invAlg := gocose.Algorithm(0) x5pk, ok := pk.(*x509.PublicKey) if !ok { return invAlg, nil, errors.New("invalid public key type") } cryptoPub := x5pk.CryptoPubKey() var alg gocose.Algorithm switch t := cryptoPub.(type) { case *rsa.PublicKey: alg = gocose.AlgorithmPS256 case *ecdsa.PublicKey: alg = gocose.AlgorithmES256 if t.Params().Name != CurveP256 { return invAlg, nil, fmt.Errorf("unsupported elliptic curve %s", t.Params().Name) } default: return invAlg, nil, fmt.Errorf("unsupported algorithm type %T", t) } return alg, cryptoPub, nil } // AttestationKey returns the digest of the COSE envelope that was uploaded, // to be used to lookup the attestation from storage. func (v *V001Entry) AttestationKey() string { return fmt.Sprintf("%s:%s", models.CoseV001SchemaDataEnvelopeHashAlgorithmSha256, hex.EncodeToString(v.envelopeHash)) } // AttestationKeyValue returns both the key and value to be persisted // into attestation storage func (v *V001Entry) AttestationKeyValue() (string, []byte) { storageSize := len(v.CoseObj.Message) if storageSize > viper.GetInt("max_attestation_size") { log.Logger.Infof("Skipping attestation storage, size %d is greater than max %d", storageSize, viper.GetInt("max_attestation_size")) return "", nil } return v.AttestationKey(), v.CoseObj.Message } func (v V001Entry) CreateFromArtifactProperties(_ context.Context, props types.ArtifactProperties) (models.ProposedEntry, error) { returnVal := models.Cose{} var err error messageBytes := props.ArtifactBytes if messageBytes == nil { if props.ArtifactPath == nil { return nil, errors.New("path to artifact file must be specified") } if props.ArtifactPath.IsAbs() { return nil, errors.New("cose envelopes cannot be fetched over HTTP(S)") } messageBytes, err = os.ReadFile(filepath.Clean(props.ArtifactPath.Path)) if err != nil { return nil, err } } publicKeyBytes := props.PublicKeyBytes if len(publicKeyBytes) == 0 { if len(props.PublicKeyPaths) != 1 { return nil, errors.New("only one public key must be provided to verify signature") } keyBytes, err := os.ReadFile(filepath.Clean(props.PublicKeyPaths[0].Path)) if err != nil { return nil, fmt.Errorf("error reading public key file: %w", err) } publicKeyBytes = append(publicKeyBytes, keyBytes) } else if len(publicKeyBytes) != 1 { return nil, errors.New("only one public key must be provided") } kb := strfmt.Base64(publicKeyBytes[0]) mb := strfmt.Base64(messageBytes) re := V001Entry{ CoseObj: models.CoseV001Schema{ Data: &models.CoseV001SchemaData{ Aad: props.AdditionalAuthenticatedData, }, PublicKey: &kb, Message: mb, }, } returnVal.Spec = re.CoseObj returnVal.APIVersion = swag.String(re.APIVersion()) return &returnVal, nil } func (v V001Entry) Verifiers() ([]pki.PublicKey, error) { if v.CoseObj.PublicKey == nil { return nil, errors.New("cose v0.0.1 entry not initialized") } key, err := x509.NewPublicKey(bytes.NewReader(*v.CoseObj.PublicKey)) if err != nil { return nil, err } return []pki.PublicKey{key}, nil } func (v V001Entry) ArtifactHash() (string, error) { if v.CoseObj.Data == nil || v.CoseObj.Data.PayloadHash == nil || v.CoseObj.Data.PayloadHash.Value == nil || v.CoseObj.Data.PayloadHash.Algorithm == nil { return "", errors.New("cose v0.0.1 entry not initialized") } return strings.ToLower(fmt.Sprintf("%s:%s", *v.CoseObj.Data.PayloadHash.Algorithm, *v.CoseObj.Data.PayloadHash.Value)), nil } func (v V001Entry) Insertable() (bool, error) { if len(v.CoseObj.Message) == 0 { return false, errors.New("missing COSE Sign1 message") } if v.CoseObj.PublicKey == nil || len(*v.CoseObj.PublicKey) == 0 { return false, errors.New("missing public key") } if v.CoseObj.Data == nil { return false, errors.New("missing COSE data property") } if len(v.envelopeHash) == 0 { return false, errors.New("envelope hash has not been computed") } if v.keyObj == nil { return false, errors.New("public key has not been parsed") } if v.sign1Msg == nil { return false, errors.New("signature has not been validated") } return true, nil }
// // Copyright 2023 The Sigstore Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package dsse import ( "bytes" "context" "crypto" "crypto/sha256" "encoding/hex" "encoding/json" "errors" "fmt" "os" "path/filepath" "sort" "strings" "github.com/in-toto/in-toto-golang/in_toto" "github.com/secure-systems-lab/go-securesystemslib/dsse" "github.com/go-openapi/strfmt" "github.com/go-openapi/swag" "github.com/sigstore/rekor/pkg/generated/models" "github.com/sigstore/rekor/pkg/log" "github.com/sigstore/rekor/pkg/pki" "github.com/sigstore/rekor/pkg/pki/x509" "github.com/sigstore/rekor/pkg/types" dsseType "github.com/sigstore/rekor/pkg/types/dsse" "github.com/sigstore/sigstore/pkg/signature" sigdsse "github.com/sigstore/sigstore/pkg/signature/dsse" ) const ( APIVERSION = "0.0.1" ) func init() { if err := dsseType.VersionMap.SetEntryFactory(APIVERSION, NewEntry); err != nil { log.Logger.Panic(err) } } type V001Entry struct { DSSEObj models.DSSEV001Schema env *dsse.Envelope } func (v V001Entry) APIVersion() string { return APIVERSION } func NewEntry() types.EntryImpl { return &V001Entry{} } // IndexKeys computes the list of keys that should map back to this entry. // It should *never* reference v.DSSEObj.ProposedContent as those values would only // be present at the time of insertion func (v V001Entry) IndexKeys() ([]string, error) { var result []string for _, sig := range v.DSSEObj.Signatures { if sig == nil || sig.Verifier == nil { return result, errors.New("missing or malformed public key") } keyObj, err := x509.NewPublicKey(bytes.NewReader(*sig.Verifier)) if err != nil { return result, err } canonKey, err := keyObj.CanonicalValue() if err != nil { return result, fmt.Errorf("could not canonicalize key: %w", err) } keyHash := sha256.Sum256(canonKey) result = append(result, "sha256:"+hex.EncodeToString(keyHash[:])) result = append(result, keyObj.Subjects()...) } if v.DSSEObj.PayloadHash != nil { payloadHashKey := strings.ToLower(fmt.Sprintf("%s:%s", *v.DSSEObj.PayloadHash.Algorithm, *v.DSSEObj.PayloadHash.Value)) result = append(result, payloadHashKey) } if v.DSSEObj.EnvelopeHash != nil { envelopeHashKey := strings.ToLower(fmt.Sprintf("%s:%s", *v.DSSEObj.EnvelopeHash.Algorithm, *v.DSSEObj.EnvelopeHash.Value)) result = append(result, envelopeHashKey) } if v.env == nil { log.Logger.Info("DSSEObj content or DSSE envelope is nil, returning partial set of keys") return result, nil } switch v.env.PayloadType { case in_toto.PayloadType: if v.env.Payload == "" { log.Logger.Info("DSSEObj DSSE payload is empty") return result, nil } decodedPayload, err := v.env.DecodeB64Payload() if err != nil { return result, fmt.Errorf("could not decode envelope payload: %w", err) } statement, err := parseStatement(decodedPayload) if err != nil { return result, err } for _, s := range statement.Subject { for alg, ds := range s.Digest { result = append(result, alg+":"+ds) } } // Not all in-toto statements will contain a SLSA provenance predicate. // See https://github.com/in-toto/attestation/blob/main/spec/README.md#predicate // for other predicates. if predicate, err := parseSlsaPredicate(decodedPayload); err == nil { if predicate.Predicate.Materials != nil { for _, s := range predicate.Predicate.Materials { for alg, ds := range s.Digest { result = append(result, alg+":"+ds) } } } } default: log.Logger.Infof("Unknown DSSE envelope payloadType: %s", v.env.PayloadType) } return result, nil } func parseStatement(p []byte) (*in_toto.Statement, error) { ps := in_toto.Statement{} if err := json.Unmarshal(p, &ps); err != nil { return nil, err } return &ps, nil } func parseSlsaPredicate(p []byte) (*in_toto.ProvenanceStatement, error) { predicate := in_toto.ProvenanceStatement{} if err := json.Unmarshal(p, &predicate); err != nil { return nil, err } return &predicate, nil } func (v *V001Entry) Unmarshal(pe models.ProposedEntry) error { it, ok := pe.(*models.DSSE) if !ok { return errors.New("cannot unmarshal non DSSE v0.0.1 type") } dsseObj := &models.DSSEV001Schema{} if err := types.DecodeEntry(it.Spec, dsseObj); err != nil { return err } // field validation if err := dsseObj.Validate(strfmt.Default); err != nil { return err } // either we have just proposed content or the canonicalized fields if dsseObj.ProposedContent == nil { // then we need canonicalized fields, and all must be present (if present, they would have been validated in the above call to Validate()) if dsseObj.EnvelopeHash == nil || dsseObj.PayloadHash == nil || len(dsseObj.Signatures) == 0 { return errors.New("either proposedContent or envelopeHash, payloadHash, and signatures must be present") } v.DSSEObj = *dsseObj return nil } // if we're here, then we're trying to propose a new entry so we check to ensure client's aren't setting server-side computed fields if dsseObj.EnvelopeHash != nil || dsseObj.PayloadHash != nil || len(dsseObj.Signatures) != 0 { return errors.New("either proposedContent or envelopeHash, payloadHash, and signatures must be present but not both") } env := &dsse.Envelope{} if err := json.Unmarshal([]byte(*dsseObj.ProposedContent.Envelope), env); err != nil { return err } if len(env.Signatures) == 0 { return errors.New("DSSE envelope must contain 1 or more signatures") } allPubKeyBytes := make([][]byte, 0) for _, publicKey := range dsseObj.ProposedContent.Verifiers { if publicKey == nil { return errors.New("an invalid null verifier was provided in ProposedContent") } allPubKeyBytes = append(allPubKeyBytes, publicKey) } sigToKeyMap, err := verifyEnvelope(allPubKeyBytes, env) if err != nil { return err } // we need to ensure we canonicalize the ordering of signatures sortedSigs := make([]string, 0, len(sigToKeyMap)) for sig := range sigToKeyMap { sortedSigs = append(sortedSigs, sig) } sort.Strings(sortedSigs) for i, sig := range sortedSigs { key := sigToKeyMap[sig] canonicalizedKey, err := key.CanonicalValue() if err != nil { return err } b64CanonicalizedKey := strfmt.Base64(canonicalizedKey) dsseObj.Signatures = append(dsseObj.Signatures, &models.DSSEV001SchemaSignaturesItems0{ Signature: &sortedSigs[i], Verifier: &b64CanonicalizedKey, }) } decodedPayload, err := env.DecodeB64Payload() if err != nil { // this shouldn't happen because failure would have occurred in verifyEnvelope call above return err } payloadHash := sha256.Sum256(decodedPayload) dsseObj.PayloadHash = &models.DSSEV001SchemaPayloadHash{ Algorithm: swag.String(models.DSSEV001SchemaPayloadHashAlgorithmSha256), Value: swag.String(hex.EncodeToString(payloadHash[:])), } envelopeHash := sha256.Sum256([]byte(*dsseObj.ProposedContent.Envelope)) dsseObj.EnvelopeHash = &models.DSSEV001SchemaEnvelopeHash{ Algorithm: swag.String(models.DSSEV001SchemaEnvelopeHashAlgorithmSha256), Value: swag.String(hex.EncodeToString(envelopeHash[:])), } // we've gotten through all processing without error, now update the object we're unmarshalling into v.DSSEObj = *dsseObj v.env = env return nil } // Canonicalize returns a JSON representation of the entry to be persisted into the log. This // will be further canonicalized by JSON Canonicalization Scheme (JCS) before being written. // // This function should not use v.DSSEObj.ProposedContent fields as they are client provided and // should not be trusted; the other fields at the top level are only set server side. func (v *V001Entry) Canonicalize(_ context.Context) ([]byte, error) { canonicalEntry := models.DSSEV001Schema{ Signatures: v.DSSEObj.Signatures, EnvelopeHash: v.DSSEObj.EnvelopeHash, PayloadHash: v.DSSEObj.PayloadHash, ProposedContent: nil, // this is explicitly done as we don't want to canonicalize the envelope } for _, s := range canonicalEntry.Signatures { if s.Signature == nil { return nil, errors.New("canonical entry missing required signature") } } sort.Slice(canonicalEntry.Signatures, func(i, j int) bool { return *canonicalEntry.Signatures[i].Signature < *canonicalEntry.Signatures[j].Signature }) itObj := models.DSSE{} itObj.APIVersion = swag.String(APIVERSION) itObj.Spec = &canonicalEntry return json.Marshal(&itObj) } // AttestationKey and AttestationKeyValue are not implemented so the envelopes will not be persisted in Rekor func (v V001Entry) CreateFromArtifactProperties(_ context.Context, props types.ArtifactProperties) (models.ProposedEntry, error) { returnVal := models.DSSE{} re := V001Entry{ DSSEObj: models.DSSEV001Schema{ ProposedContent: &models.DSSEV001SchemaProposedContent{}, }, } var err error artifactBytes := props.ArtifactBytes if artifactBytes == nil { if props.ArtifactPath == nil { return nil, errors.New("path to artifact file must be specified") } if props.ArtifactPath.IsAbs() { return nil, errors.New("dsse envelopes cannot be fetched over HTTP(S)") } artifactBytes, err = os.ReadFile(filepath.Clean(props.ArtifactPath.Path)) if err != nil { return nil, err } } env := &dsse.Envelope{} if err := json.Unmarshal(artifactBytes, env); err != nil { return nil, fmt.Errorf("payload must be a valid DSSE envelope: %w", err) } allPubKeyBytes := make([][]byte, 0) if len(props.PublicKeyBytes) > 0 { allPubKeyBytes = append(allPubKeyBytes, props.PublicKeyBytes...) } if len(props.PublicKeyPaths) > 0 { for _, path := range props.PublicKeyPaths { if path.IsAbs() { return nil, errors.New("dsse public keys cannot be fetched over HTTP(S)") } publicKeyBytes, err := os.ReadFile(filepath.Clean(path.Path)) if err != nil { return nil, fmt.Errorf("error reading public key file: %w", err) } allPubKeyBytes = append(allPubKeyBytes, publicKeyBytes) } } keysBySig, err := verifyEnvelope(allPubKeyBytes, env) if err != nil { return nil, err } for _, key := range keysBySig { canonicalKey, err := key.CanonicalValue() if err != nil { return nil, err } re.DSSEObj.ProposedContent.Verifiers = append(re.DSSEObj.ProposedContent.Verifiers, strfmt.Base64(canonicalKey)) } re.DSSEObj.ProposedContent.Envelope = swag.String(string(artifactBytes)) returnVal.Spec = re.DSSEObj returnVal.APIVersion = swag.String(re.APIVersion()) return &returnVal, nil } // verifyEnvelope takes in an array of possible key bytes and attempts to parse them as x509 public keys. // it then uses these to verify the envelope and makes sure that every signature on the envelope is verified. // it returns a map of verifiers indexed by the signature the verifier corresponds to. func verifyEnvelope(allPubKeyBytes [][]byte, env *dsse.Envelope) (map[string]*x509.PublicKey, error) { // generate a fake id for these keys so we can get back to the key bytes and match them to their corresponding signature verifierBySig := make(map[string]*x509.PublicKey) allSigs := make(map[string]struct{}) for _, sig := range env.Signatures { allSigs[sig.Sig] = struct{}{} } for _, pubKeyBytes := range allPubKeyBytes { if len(allSigs) == 0 { break // if all signatures have been verified, do not attempt anymore } key, err := x509.NewPublicKey(bytes.NewReader(pubKeyBytes)) if err != nil { return nil, fmt.Errorf("could not parse public key as x509: %w", err) } vfr, err := signature.LoadVerifier(key.CryptoPubKey(), crypto.SHA256) if err != nil { return nil, fmt.Errorf("could not load verifier: %w", err) } dsseVfr, err := dsse.NewEnvelopeVerifier(&sigdsse.VerifierAdapter{SignatureVerifier: vfr}) if err != nil { return nil, fmt.Errorf("could not use public key as a dsse verifier: %w", err) } accepted, err := dsseVfr.Verify(context.Background(), env) if err != nil { return nil, fmt.Errorf("could not verify envelope: %w", err) } for _, accept := range accepted { delete(allSigs, accept.Sig.Sig) verifierBySig[accept.Sig.Sig] = key } } if len(allSigs) > 0 { return nil, errors.New("all signatures must have a key that verifies it") } return verifierBySig, nil } func (v V001Entry) Verifiers() ([]pki.PublicKey, error) { if len(v.DSSEObj.Signatures) == 0 { return nil, errors.New("dsse v0.0.1 entry not initialized") } var keys []pki.PublicKey for _, s := range v.DSSEObj.Signatures { key, err := x509.NewPublicKey(bytes.NewReader(*s.Verifier)) if err != nil { return nil, err } keys = append(keys, key) } return keys, nil } func (v V001Entry) ArtifactHash() (string, error) { if v.DSSEObj.PayloadHash == nil || v.DSSEObj.PayloadHash.Algorithm == nil || v.DSSEObj.PayloadHash.Value == nil { return "", errors.New("dsse v0.0.1 entry not initialized") } return strings.ToLower(fmt.Sprintf("%s:%s", *v.DSSEObj.PayloadHash.Algorithm, *v.DSSEObj.PayloadHash.Value)), nil } func (v V001Entry) Insertable() (bool, error) { if v.DSSEObj.ProposedContent == nil { return false, errors.New("missing proposed content") } if v.DSSEObj.ProposedContent.Envelope == nil || len(*v.DSSEObj.ProposedContent.Envelope) == 0 { return false, errors.New("missing proposed DSSE envelope") } if len(v.DSSEObj.ProposedContent.Verifiers) == 0 { return false, errors.New("missing proposed verifiers") } return true, nil }
// // Copyright 2021 The Sigstore Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package hashedrekord import ( "bytes" "context" "crypto" "crypto/sha256" "encoding/hex" "encoding/json" "errors" "fmt" "os" "path/filepath" "strings" "github.com/asaskevich/govalidator" "github.com/go-openapi/strfmt" "github.com/go-openapi/swag" "github.com/sigstore/rekor/pkg/generated/models" "github.com/sigstore/rekor/pkg/log" "github.com/sigstore/rekor/pkg/pki" "github.com/sigstore/rekor/pkg/pki/x509" "github.com/sigstore/rekor/pkg/types" hashedrekord "github.com/sigstore/rekor/pkg/types/hashedrekord" "github.com/sigstore/rekor/pkg/util" "github.com/sigstore/sigstore/pkg/signature/options" ) const ( APIVERSION = "0.0.1" ) func init() { if err := hashedrekord.VersionMap.SetEntryFactory(APIVERSION, NewEntry); err != nil { log.Logger.Panic(err) } } type V001Entry struct { HashedRekordObj models.HashedrekordV001Schema } func (v V001Entry) APIVersion() string { return APIVERSION } func NewEntry() types.EntryImpl { return &V001Entry{} } func (v V001Entry) IndexKeys() ([]string, error) { var result []string key := v.HashedRekordObj.Signature.PublicKey.Content keyHash := sha256.Sum256(key) result = append(result, strings.ToLower(hex.EncodeToString(keyHash[:]))) pub, err := x509.NewPublicKey(bytes.NewReader(key)) if err != nil { return nil, err } result = append(result, pub.Subjects()...) if v.HashedRekordObj.Data.Hash != nil { hashKey := strings.ToLower(fmt.Sprintf("%s:%s", *v.HashedRekordObj.Data.Hash.Algorithm, *v.HashedRekordObj.Data.Hash.Value)) result = append(result, hashKey) } return result, nil } func (v *V001Entry) Unmarshal(pe models.ProposedEntry) error { rekord, ok := pe.(*models.Hashedrekord) if !ok { return errors.New("cannot unmarshal non Rekord v0.0.1 type") } if err := types.DecodeEntry(rekord.Spec, &v.HashedRekordObj); err != nil { return err } // field validation if err := v.HashedRekordObj.Validate(strfmt.Default); err != nil { return err } // cross field validation _, _, err := v.validate() return err } func (v *V001Entry) Canonicalize(_ context.Context) ([]byte, error) { sigObj, keyObj, err := v.validate() if err != nil { return nil, &types.InputValidationError{Err: err} } canonicalEntry := models.HashedrekordV001Schema{} // need to canonicalize signature & key content canonicalEntry.Signature = &models.HashedrekordV001SchemaSignature{} canonicalEntry.Signature.Content, err = sigObj.CanonicalValue() if err != nil { return nil, err } // key URL (if known) is not set deliberately canonicalEntry.Signature.PublicKey = &models.HashedrekordV001SchemaSignaturePublicKey{} canonicalEntry.Signature.PublicKey.Content, err = keyObj.CanonicalValue() if err != nil { return nil, err } canonicalEntry.Data = &models.HashedrekordV001SchemaData{} canonicalEntry.Data.Hash = v.HashedRekordObj.Data.Hash // data content is not set deliberately v.HashedRekordObj = canonicalEntry // wrap in valid object with kind and apiVersion set rekordObj := models.Hashedrekord{} rekordObj.APIVersion = swag.String(APIVERSION) rekordObj.Spec = &canonicalEntry return json.Marshal(&rekordObj) } // validate performs cross-field validation for fields in object func (v *V001Entry) validate() (pki.Signature, pki.PublicKey, error) { sig := v.HashedRekordObj.Signature if sig == nil { return nil, nil, &types.InputValidationError{Err: errors.New("missing signature")} } // Hashed rekord type only works for x509 signature types sigObj, err := x509.NewSignatureWithOpts(bytes.NewReader(sig.Content), options.WithED25519ph()) if err != nil { return nil, nil, &types.InputValidationError{Err: err} } key := sig.PublicKey if key == nil { return nil, nil, &types.InputValidationError{Err: errors.New("missing public key")} } keyObj, err := x509.NewPublicKey(bytes.NewReader(key.Content)) if err != nil { return nil, nil, &types.InputValidationError{Err: err} } data := v.HashedRekordObj.Data if data == nil { return nil, nil, &types.InputValidationError{Err: errors.New("missing data")} } hash := data.Hash if hash == nil { return nil, nil, &types.InputValidationError{Err: errors.New("missing hash")} } if !govalidator.IsHash(swag.StringValue(hash.Value), swag.StringValue(hash.Algorithm)) { return nil, nil, &types.InputValidationError{Err: errors.New("invalid value for hash")} } var alg crypto.Hash switch swag.StringValue(hash.Algorithm) { case models.HashedrekordV001SchemaDataHashAlgorithmSha384: alg = crypto.SHA384 case models.HashedrekordV001SchemaDataHashAlgorithmSha512: alg = crypto.SHA512 default: alg = crypto.SHA256 } decoded, err := hex.DecodeString(*hash.Value) if err != nil { return nil, nil, err } if err := sigObj.Verify(nil, keyObj, options.WithDigest(decoded), options.WithCryptoSignerOpts(alg)); err != nil { return nil, nil, &types.InputValidationError{Err: fmt.Errorf("verifying signature: %w", err)} } return sigObj, keyObj, nil } func getDataHashAlgorithm(hashAlgorithm crypto.Hash) string { switch hashAlgorithm { case crypto.SHA384: return models.HashedrekordV001SchemaDataHashAlgorithmSha384 case crypto.SHA512: return models.HashedrekordV001SchemaDataHashAlgorithmSha512 default: return models.HashedrekordV001SchemaDataHashAlgorithmSha256 } } func (v V001Entry) CreateFromArtifactProperties(_ context.Context, props types.ArtifactProperties) (models.ProposedEntry, error) { returnVal := models.Hashedrekord{} re := V001Entry{} // we will need artifact, public-key, signature re.HashedRekordObj.Data = &models.HashedrekordV001SchemaData{} var err error if props.PKIFormat != string(pki.X509) { return nil, errors.New("hashedrekord entries can only be created for artifacts signed with x509-based PKI") } re.HashedRekordObj.Signature = &models.HashedrekordV001SchemaSignature{} sigBytes := props.SignatureBytes if sigBytes == nil { if props.SignaturePath == nil { return nil, errors.New("a detached signature must be provided") } sigBytes, err = os.ReadFile(filepath.Clean(props.SignaturePath.Path)) if err != nil { return nil, fmt.Errorf("error reading signature file: %w", err) } } re.HashedRekordObj.Signature.Content = strfmt.Base64(sigBytes) re.HashedRekordObj.Signature.PublicKey = &models.HashedrekordV001SchemaSignaturePublicKey{} publicKeyBytes := props.PublicKeyBytes if len(publicKeyBytes) == 0 { if len(props.PublicKeyPaths) != 1 { return nil, errors.New("only one public key must be provided to verify detached signature") } keyBytes, err := os.ReadFile(filepath.Clean(props.PublicKeyPaths[0].Path)) if err != nil { return nil, fmt.Errorf("error reading public key file: %w", err) } publicKeyBytes = append(publicKeyBytes, keyBytes) } else if len(publicKeyBytes) != 1 { return nil, errors.New("only one public key must be provided") } hashAlgorithm, hashValue := util.UnprefixSHA(props.ArtifactHash) re.HashedRekordObj.Signature.PublicKey.Content = strfmt.Base64(publicKeyBytes[0]) re.HashedRekordObj.Data.Hash = &models.HashedrekordV001SchemaDataHash{ Algorithm: swag.String(getDataHashAlgorithm(hashAlgorithm)), Value: swag.String(hashValue), } if _, _, err := re.validate(); err != nil { return nil, err } returnVal.APIVersion = swag.String(re.APIVersion()) returnVal.Spec = re.HashedRekordObj return &returnVal, nil } func (v V001Entry) Verifiers() ([]pki.PublicKey, error) { if v.HashedRekordObj.Signature == nil || v.HashedRekordObj.Signature.PublicKey == nil || v.HashedRekordObj.Signature.PublicKey.Content == nil { return nil, errors.New("hashedrekord v0.0.1 entry not initialized") } key, err := x509.NewPublicKey(bytes.NewReader(v.HashedRekordObj.Signature.PublicKey.Content)) if err != nil { return nil, err } return []pki.PublicKey{key}, nil } func (v V001Entry) ArtifactHash() (string, error) { if v.HashedRekordObj.Data == nil || v.HashedRekordObj.Data.Hash == nil || v.HashedRekordObj.Data.Hash.Value == nil || v.HashedRekordObj.Data.Hash.Algorithm == nil { return "", errors.New("hashedrekord v0.0.1 entry not initialized") } return strings.ToLower(fmt.Sprintf("%s:%s", *v.HashedRekordObj.Data.Hash.Algorithm, *v.HashedRekordObj.Data.Hash.Value)), nil } func (v V001Entry) Insertable() (bool, error) { if v.HashedRekordObj.Signature == nil { return false, errors.New("missing signature property") } if len(v.HashedRekordObj.Signature.Content) == 0 { return false, errors.New("missing signature content") } if v.HashedRekordObj.Signature.PublicKey == nil { return false, errors.New("missing publicKey property") } if len(v.HashedRekordObj.Signature.PublicKey.Content) == 0 { return false, errors.New("missing publicKey content") } if v.HashedRekordObj.Data == nil { return false, errors.New("missing data property") } if v.HashedRekordObj.Data.Hash == nil { return false, errors.New("missing hash property") } if v.HashedRekordObj.Data.Hash.Algorithm == nil { return false, errors.New("missing hash algorithm") } if v.HashedRekordObj.Data.Hash.Value == nil { return false, errors.New("missing hash value") } return true, nil }
// // Copyright 2021 The Sigstore Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package helm import ( "bytes" "context" "crypto/sha256" "encoding/hex" "encoding/json" "errors" "fmt" "io" "os" "path/filepath" "strings" "github.com/go-openapi/strfmt" "github.com/go-openapi/swag" "github.com/sigstore/rekor/pkg/generated/models" "github.com/sigstore/rekor/pkg/log" "github.com/sigstore/rekor/pkg/pki" "github.com/sigstore/rekor/pkg/pki/pgp" "github.com/sigstore/rekor/pkg/types" "github.com/sigstore/rekor/pkg/types/helm" "github.com/sigstore/rekor/pkg/util" "golang.org/x/sync/errgroup" ) const ( APIVERSION = "0.0.1" ) func init() { if err := helm.VersionMap.SetEntryFactory(APIVERSION, NewEntry); err != nil { log.Logger.Panic(err) } } type V001Entry struct { HelmObj models.HelmV001Schema } func (v V001Entry) APIVersion() string { return APIVERSION } func NewEntry() types.EntryImpl { return &V001Entry{} } func (v V001Entry) IndexKeys() ([]string, error) { var result []string keyObj, err := pgp.NewPublicKey(bytes.NewReader(*v.HelmObj.PublicKey.Content)) if err != nil { return nil, err } provenance := helm.Provenance{} if err := provenance.Unmarshal(bytes.NewReader(v.HelmObj.Chart.Provenance.Content)); err != nil { return nil, err } key, err := keyObj.CanonicalValue() if err != nil { return nil, err } keyHash := sha256.Sum256(key) result = append(result, strings.ToLower(hex.EncodeToString(keyHash[:]))) result = append(result, keyObj.Subjects()...) algorithm, chartHash, err := provenance.GetChartAlgorithmHash() if err != nil { log.Logger.Error(err) } else { hashKey := strings.ToLower(fmt.Sprintf("%s:%s", algorithm, chartHash)) result = append(result, hashKey) } return result, nil } func (v *V001Entry) Unmarshal(pe models.ProposedEntry) error { helm, ok := pe.(*models.Helm) if !ok { return errors.New("cannot unmarshal non Helm v0.0.1 type") } if err := types.DecodeEntry(helm.Spec, &v.HelmObj); err != nil { return err } // field validation if err := v.HelmObj.Validate(strfmt.Default); err != nil { return err } // cross field validation return v.validate() } func (v *V001Entry) fetchExternalEntities(ctx context.Context) (*helm.Provenance, *pgp.PublicKey, *pgp.Signature, error) { if err := v.validate(); err != nil { return nil, nil, nil, &types.InputValidationError{Err: err} } g, ctx := errgroup.WithContext(ctx) provenanceR, provenanceW := io.Pipe() defer provenanceR.Close() closePipesOnError := types.PipeCloser(provenanceR, provenanceW) g.Go(func() error { defer provenanceW.Close() dataReadCloser := bytes.NewReader(v.HelmObj.Chart.Provenance.Content) /* #nosec G110 */ if _, err := io.Copy(provenanceW, dataReadCloser); err != nil { return closePipesOnError(err) } return nil }) keyResult := make(chan *pgp.PublicKey) g.Go(func() error { defer close(keyResult) keyReadCloser := bytes.NewReader(*v.HelmObj.PublicKey.Content) keyObj, err := pgp.NewPublicKey(keyReadCloser) if err != nil { return closePipesOnError(&types.InputValidationError{Err: err}) } select { case <-ctx.Done(): return ctx.Err() case keyResult <- keyObj: return nil } }) var key *pgp.PublicKey provenance := &helm.Provenance{} var sig *pgp.Signature g.Go(func() error { if err := provenance.Unmarshal(provenanceR); err != nil { return closePipesOnError(&types.InputValidationError{Err: err}) } key = <-keyResult if key == nil { return closePipesOnError(errors.New("error processing public key")) } // Set signature var err error sig, err = pgp.NewSignature(provenance.Block.ArmoredSignature.Body) if err != nil { return closePipesOnError(&types.InputValidationError{Err: err}) } // Verify signature if err := sig.Verify(bytes.NewReader(provenance.Block.Bytes), key); err != nil { return closePipesOnError(&types.InputValidationError{Err: err}) } select { case <-ctx.Done(): return ctx.Err() default: return nil } }) if err := g.Wait(); err != nil { return nil, nil, nil, err } return provenance, key, sig, nil } func (v *V001Entry) Canonicalize(ctx context.Context) ([]byte, error) { provenanceObj, keyObj, sigObj, err := v.fetchExternalEntities(ctx) if err != nil { return nil, err } if keyObj == nil { return nil, errors.New("key object not initialized before canonicalization") } canonicalEntry := models.HelmV001Schema{} canonicalEntry.PublicKey = &models.HelmV001SchemaPublicKey{} keyContent, err := keyObj.CanonicalValue() if err != nil { return nil, err } canonicalEntry.PublicKey.Content = (*strfmt.Base64)(&keyContent) canonicalEntry.Chart = &models.HelmV001SchemaChart{} algorithm, chartHash, err := provenanceObj.GetChartAlgorithmHash() if err != nil { return nil, err } canonicalEntry.Chart.Hash = &models.HelmV001SchemaChartHash{} canonicalEntry.Chart.Hash.Algorithm = &algorithm canonicalEntry.Chart.Hash.Value = &chartHash canonicalEntry.Chart.Provenance = &models.HelmV001SchemaChartProvenance{} canonicalEntry.Chart.Provenance.Signature = &models.HelmV001SchemaChartProvenanceSignature{} sigContent, err := sigObj.CanonicalValue() if err != nil { return nil, err } canonicalEntry.Chart.Provenance.Signature.Content = sigContent // wrap in valid object with kind and apiVersion set helmObj := models.Helm{} helmObj.APIVersion = swag.String(APIVERSION) helmObj.Spec = &canonicalEntry return json.Marshal(&helmObj) } // validate performs cross-field validation for fields in object func (v V001Entry) validate() error { key := v.HelmObj.PublicKey if key == nil { return errors.New("missing public key") } if key.Content == nil || len(*key.Content) == 0 { return errors.New("'content' must be specified for publicKey") } chart := v.HelmObj.Chart if chart == nil { return errors.New("missing chart") } provenance := chart.Provenance if provenance == nil { return errors.New("missing provenance") } if provenance.Signature == nil || provenance.Signature.Content == nil { if len(provenance.Content) == 0 { return errors.New("'content' must be specified for provenance") } } return nil } func (v V001Entry) CreateFromArtifactProperties(ctx context.Context, props types.ArtifactProperties) (models.ProposedEntry, error) { //TODO: how to select version of item to create returnVal := models.Helm{} re := V001Entry{} // we will need provenance file and public-key re.HelmObj = models.HelmV001Schema{} re.HelmObj.Chart = &models.HelmV001SchemaChart{} re.HelmObj.Chart.Provenance = &models.HelmV001SchemaChartProvenance{} var err error artifactBytes := props.ArtifactBytes if artifactBytes == nil { var artifactReader io.ReadCloser if props.ArtifactPath == nil { return nil, errors.New("path to artifact file must be specified") } if props.ArtifactPath.IsAbs() { artifactReader, err = util.FileOrURLReadCloser(ctx, props.ArtifactPath.String(), nil) if err != nil { return nil, fmt.Errorf("error reading chart file: %w", err) } } else { artifactReader, err = os.Open(filepath.Clean(props.ArtifactPath.Path)) if err != nil { return nil, fmt.Errorf("error opening chart file: %w", err) } } artifactBytes, err = io.ReadAll(artifactReader) if err != nil { return nil, fmt.Errorf("error reading chart file: %w", err) } } re.HelmObj.Chart.Provenance.Content = strfmt.Base64(artifactBytes) re.HelmObj.PublicKey = &models.HelmV001SchemaPublicKey{} publicKeyBytes := props.PublicKeyBytes if len(publicKeyBytes) == 0 { if len(props.PublicKeyPaths) != 1 { return nil, errors.New("only one public key must be provided") } keyBytes, err := os.ReadFile(filepath.Clean(props.PublicKeyPaths[0].Path)) if err != nil { return nil, fmt.Errorf("error reading public key file: %w", err) } publicKeyBytes = append(publicKeyBytes, keyBytes) } else if len(publicKeyBytes) != 1 { return nil, errors.New("only one public key must be provided") } re.HelmObj.PublicKey.Content = (*strfmt.Base64)(&publicKeyBytes[0]) if err := re.validate(); err != nil { return nil, err } if _, _, _, err := re.fetchExternalEntities(ctx); err != nil { return nil, fmt.Errorf("error retrieving external entities: %w", err) } returnVal.APIVersion = swag.String(re.APIVersion()) returnVal.Spec = re.HelmObj return &returnVal, nil } func (v V001Entry) Verifiers() ([]pki.PublicKey, error) { if v.HelmObj.PublicKey == nil || v.HelmObj.PublicKey.Content == nil { return nil, errors.New("helm v0.0.1 entry not initialized") } key, err := pgp.NewPublicKey(bytes.NewReader(*v.HelmObj.PublicKey.Content)) if err != nil { return nil, err } return []pki.PublicKey{key}, nil } func (v V001Entry) ArtifactHash() (string, error) { if v.HelmObj.Chart == nil || v.HelmObj.Chart.Hash == nil || v.HelmObj.Chart.Hash.Algorithm == nil || v.HelmObj.Chart.Hash.Value == nil { return "", errors.New("helm v0.0.1 entry not initialized") } return strings.ToLower(fmt.Sprintf("%s:%s", *v.HelmObj.Chart.Hash.Algorithm, *v.HelmObj.Chart.Hash.Value)), nil } func (v V001Entry) Insertable() (bool, error) { if v.HelmObj.PublicKey == nil { return false, errors.New("missing public key property") } if v.HelmObj.PublicKey.Content == nil || len(*v.HelmObj.PublicKey.Content) == 0 { return false, errors.New("missing public key content") } if v.HelmObj.Chart == nil { return false, errors.New("missing chart property") } if v.HelmObj.Chart.Provenance == nil { return false, errors.New("missing provenance property") } if len(v.HelmObj.Chart.Provenance.Content) == 0 { return false, errors.New("missing provenance content") } return true, nil }
// // Copyright 2021 The Sigstore Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package intoto import ( "bytes" "context" "crypto" "crypto/sha256" "encoding/base64" "encoding/hex" "encoding/json" "errors" "fmt" "os" "path/filepath" "strings" "github.com/in-toto/in-toto-golang/in_toto" "github.com/secure-systems-lab/go-securesystemslib/dsse" "github.com/spf13/viper" "github.com/go-openapi/strfmt" "github.com/go-openapi/swag" "github.com/sigstore/rekor/pkg/generated/models" "github.com/sigstore/rekor/pkg/log" "github.com/sigstore/rekor/pkg/pki" "github.com/sigstore/rekor/pkg/pki/x509" "github.com/sigstore/rekor/pkg/types" "github.com/sigstore/rekor/pkg/types/intoto" "github.com/sigstore/sigstore/pkg/signature" dsse_verifier "github.com/sigstore/sigstore/pkg/signature/dsse" ) const ( APIVERSION = "0.0.1" ) func init() { if err := intoto.VersionMap.SetEntryFactory(APIVERSION, NewEntry); err != nil { log.Logger.Panic(err) } } type V001Entry struct { IntotoObj models.IntotoV001Schema keyObj pki.PublicKey env dsse.Envelope } func (v V001Entry) APIVersion() string { return APIVERSION } func NewEntry() types.EntryImpl { return &V001Entry{} } func (v V001Entry) IndexKeys() ([]string, error) { var result []string // add digest over entire DSSE envelope if v.IntotoObj.Content != nil && v.IntotoObj.Content.Hash != nil { hashkey := strings.ToLower(fmt.Sprintf("%s:%s", swag.StringValue(v.IntotoObj.Content.Hash.Algorithm), swag.StringValue(v.IntotoObj.Content.Hash.Value))) result = append(result, hashkey) } else { log.Logger.Error("could not find content digest to include in index keys") } // add digest over public key if v.keyObj != nil { key, err := v.keyObj.CanonicalValue() if err == nil { keyHash := sha256.Sum256(key) result = append(result, fmt.Sprintf("sha256:%s", strings.ToLower(hex.EncodeToString(keyHash[:])))) // add digest over any subjects within signing certificate result = append(result, v.keyObj.Subjects()...) } else { log.Logger.Errorf("could not canonicalize public key to include in index keys: %w", err) } } else { log.Logger.Error("could not find public key to include in index keys") } // add digest base64-decoded payload inside of DSSE envelope if v.IntotoObj.Content != nil && v.IntotoObj.Content.PayloadHash != nil { payloadHash := strings.ToLower(fmt.Sprintf("%s:%s", swag.StringValue(v.IntotoObj.Content.PayloadHash.Algorithm), swag.StringValue(v.IntotoObj.Content.PayloadHash.Value))) result = append(result, payloadHash) } else { log.Logger.Error("could not find payload digest to include in index keys") } switch v.env.PayloadType { case in_toto.PayloadType: statement, err := parseStatement(v.env.Payload) if err != nil { log.Logger.Errorf("error parsing payload as intoto statement: %w", err) break } for _, s := range statement.Subject { for alg, ds := range s.Digest { result = append(result, alg+":"+ds) } } // Not all in-toto statements will contain a SLSA provenance predicate. // See https://github.com/in-toto/attestation/blob/main/spec/README.md#predicate // for other predicates. if predicate, err := parseSlsaPredicate(v.env.Payload); err == nil { if predicate.Predicate.Materials != nil { for _, s := range predicate.Predicate.Materials { for alg, ds := range s.Digest { result = append(result, alg+":"+ds) } } } } default: log.Logger.Infof("unknown in_toto statement type (%s), cannot extract additional index keys", v.env.PayloadType) } return result, nil } func parseStatement(p string) (*in_toto.Statement, error) { ps := in_toto.Statement{} payload, err := base64.StdEncoding.DecodeString(p) if err != nil { return nil, err } if err := json.Unmarshal(payload, &ps); err != nil { return nil, err } return &ps, nil } func parseSlsaPredicate(p string) (*in_toto.ProvenanceStatement, error) { predicate := in_toto.ProvenanceStatement{} payload, err := base64.StdEncoding.DecodeString(p) if err != nil { return nil, err } if err := json.Unmarshal(payload, &predicate); err != nil { return nil, err } return &predicate, nil } func (v *V001Entry) Unmarshal(pe models.ProposedEntry) error { it, ok := pe.(*models.Intoto) if !ok { return errors.New("cannot unmarshal non Intoto v0.0.1 type") } var err error if err := types.DecodeEntry(it.Spec, &v.IntotoObj); err != nil { return err } // field validation if err := v.IntotoObj.Validate(strfmt.Default); err != nil { return err } v.keyObj, err = x509.NewPublicKey(bytes.NewReader(*v.IntotoObj.PublicKey)) if err != nil { return err } return v.validate() } func (v *V001Entry) Canonicalize(_ context.Context) ([]byte, error) { if v.keyObj == nil { return nil, errors.New("cannot canonicalize empty key") } if v.IntotoObj.Content == nil { return nil, errors.New("missing content") } if v.IntotoObj.Content.Hash == nil { return nil, errors.New("missing envelope hash") } // PayloadHash is not present for old entries pk, err := v.keyObj.CanonicalValue() if err != nil { return nil, err } pkb := strfmt.Base64(pk) canonicalEntry := models.IntotoV001Schema{ PublicKey: &pkb, Content: &models.IntotoV001SchemaContent{ Hash: &models.IntotoV001SchemaContentHash{ Algorithm: v.IntotoObj.Content.Hash.Algorithm, Value: v.IntotoObj.Content.Hash.Value, }, }, } // Set PayloadHash if present if v.IntotoObj.Content.PayloadHash != nil { canonicalEntry.Content.PayloadHash = &models.IntotoV001SchemaContentPayloadHash{ Algorithm: v.IntotoObj.Content.PayloadHash.Algorithm, Value: v.IntotoObj.Content.PayloadHash.Value, } } itObj := models.Intoto{} itObj.APIVersion = swag.String(APIVERSION) itObj.Spec = &canonicalEntry return json.Marshal(&itObj) } // validate performs cross-field validation for fields in object func (v *V001Entry) validate() error { // TODO handle multiple pk := v.keyObj.(*x509.PublicKey) // one of two cases must be true: // - ProposedEntry: client gives an envelope; (client provided hash/payloadhash are ignored as they are computed server-side) OR // - CommittedEntry: NO envelope and hash/payloadHash must be present if v.IntotoObj.Content.Envelope == "" { if v.IntotoObj.Content.Hash == nil { return fmt.Errorf("missing hash value for envelope") } else if err := v.IntotoObj.Content.Hash.Validate(strfmt.Default); err != nil { return fmt.Errorf("validation error on envelope hash: %w", err) } // PayloadHash is not present for old entries if v.IntotoObj.Content.PayloadHash != nil { if err := v.IntotoObj.Content.PayloadHash.Validate(strfmt.Default); err != nil { return fmt.Errorf("validation error on payload hash: %w", err) } } // if there is no envelope, and hash/payloadHash are valid, then there's nothing else to do here return nil } vfr, err := signature.LoadVerifier(pk.CryptoPubKey(), crypto.SHA256) if err != nil { return err } dsseVerifier := dsse_verifier.WrapVerifier(vfr) if err := dsseVerifier.VerifySignature(strings.NewReader(v.IntotoObj.Content.Envelope), nil); err != nil { return err } if err := json.Unmarshal([]byte(v.IntotoObj.Content.Envelope), &v.env); err != nil { return err } attBytes, err := base64.StdEncoding.DecodeString(v.env.Payload) if err != nil { return err } // validation logic complete without errors, hydrate local object attHash := sha256.Sum256(attBytes) v.IntotoObj.Content.PayloadHash = &models.IntotoV001SchemaContentPayloadHash{ Algorithm: swag.String(models.IntotoV001SchemaContentPayloadHashAlgorithmSha256), Value: swag.String(hex.EncodeToString(attHash[:])), } h := sha256.Sum256([]byte(v.IntotoObj.Content.Envelope)) v.IntotoObj.Content.Hash = &models.IntotoV001SchemaContentHash{ Algorithm: swag.String(models.IntotoV001SchemaContentHashAlgorithmSha256), Value: swag.String(hex.EncodeToString(h[:])), } return nil } // AttestationKey returns the digest of the attestation that was uploaded, to be used to lookup the attestation from storage func (v *V001Entry) AttestationKey() string { if v.IntotoObj.Content != nil && v.IntotoObj.Content.PayloadHash != nil { return fmt.Sprintf("%s:%s", *v.IntotoObj.Content.PayloadHash.Algorithm, *v.IntotoObj.Content.PayloadHash.Value) } return "" } // AttestationKeyValue returns both the key and value to be persisted into attestation storage func (v *V001Entry) AttestationKeyValue() (string, []byte) { storageSize := base64.StdEncoding.DecodedLen(len(v.env.Payload)) if storageSize > viper.GetInt("max_attestation_size") { log.Logger.Infof("Skipping attestation storage, size %d is greater than max %d", storageSize, viper.GetInt("max_attestation_size")) return "", nil } attBytes, _ := base64.StdEncoding.DecodeString(v.env.Payload) return v.AttestationKey(), attBytes } func (v V001Entry) CreateFromArtifactProperties(_ context.Context, props types.ArtifactProperties) (models.ProposedEntry, error) { returnVal := models.Intoto{} var err error artifactBytes := props.ArtifactBytes if artifactBytes == nil { if props.ArtifactPath == nil { return nil, errors.New("path to artifact file must be specified") } if props.ArtifactPath.IsAbs() { return nil, errors.New("intoto envelopes cannot be fetched over HTTP(S)") } artifactBytes, err = os.ReadFile(filepath.Clean(props.ArtifactPath.Path)) if err != nil { return nil, err } } publicKeyBytes := props.PublicKeyBytes if len(publicKeyBytes) == 0 { if len(props.PublicKeyPaths) != 1 { return nil, errors.New("only one public key must be provided to verify signature") } keyBytes, err := os.ReadFile(filepath.Clean(props.PublicKeyPaths[0].Path)) if err != nil { return nil, fmt.Errorf("error reading public key file: %w", err) } publicKeyBytes = append(publicKeyBytes, keyBytes) } else if len(publicKeyBytes) != 1 { return nil, errors.New("only one public key must be provided") } kb := strfmt.Base64(publicKeyBytes[0]) re := V001Entry{ IntotoObj: models.IntotoV001Schema{ Content: &models.IntotoV001SchemaContent{ Envelope: string(artifactBytes), }, PublicKey: &kb, }, } h := sha256.Sum256([]byte(re.IntotoObj.Content.Envelope)) re.IntotoObj.Content.Hash = &models.IntotoV001SchemaContentHash{ Algorithm: swag.String(models.IntotoV001SchemaContentHashAlgorithmSha256), Value: swag.String(hex.EncodeToString(h[:])), } returnVal.Spec = re.IntotoObj returnVal.APIVersion = swag.String(re.APIVersion()) return &returnVal, nil } func (v V001Entry) Verifiers() ([]pki.PublicKey, error) { if v.IntotoObj.PublicKey == nil { return nil, errors.New("intoto v0.0.1 entry not initialized") } key, err := x509.NewPublicKey(bytes.NewReader(*v.IntotoObj.PublicKey)) if err != nil { return nil, err } return []pki.PublicKey{key}, nil } func (v V001Entry) ArtifactHash() (string, error) { if v.IntotoObj.Content == nil || v.IntotoObj.Content.PayloadHash == nil || v.IntotoObj.Content.PayloadHash.Algorithm == nil || v.IntotoObj.Content.PayloadHash.Value == nil { return "", errors.New("hashedrekord v0.0.1 entry not initialized") } return strings.ToLower(fmt.Sprintf("%s:%s", *v.IntotoObj.Content.PayloadHash.Algorithm, *v.IntotoObj.Content.PayloadHash.Value)), nil } func (v V001Entry) Insertable() (bool, error) { if v.IntotoObj.Content == nil { return false, errors.New("missing content property") } if len(v.IntotoObj.Content.Envelope) == 0 { return false, errors.New("missing envelope content") } if v.IntotoObj.PublicKey == nil || len(*v.IntotoObj.PublicKey) == 0 { return false, errors.New("missing publicKey content") } if v.keyObj == nil { return false, errors.New("failed to parse public key") } if v.env.Payload == "" || v.env.PayloadType == "" || len(v.env.Signatures) == 0 { return false, errors.New("invalid DSSE envelope") } return true, nil }
// // Copyright 2022 The Sigstore Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package intoto import ( "bytes" "context" "crypto" "crypto/sha256" "encoding/base64" "encoding/hex" "encoding/json" "errors" "fmt" "os" "path/filepath" "strings" "github.com/in-toto/in-toto-golang/in_toto" "github.com/secure-systems-lab/go-securesystemslib/dsse" "github.com/spf13/viper" "golang.org/x/exp/slices" "github.com/go-openapi/strfmt" "github.com/go-openapi/swag" "github.com/sigstore/rekor/pkg/generated/models" "github.com/sigstore/rekor/pkg/log" "github.com/sigstore/rekor/pkg/pki" "github.com/sigstore/rekor/pkg/pki/x509" "github.com/sigstore/rekor/pkg/types" "github.com/sigstore/rekor/pkg/types/intoto" "github.com/sigstore/sigstore/pkg/signature" "github.com/sigstore/sigstore/pkg/signature/options" ) const ( APIVERSION = "0.0.2" ) func init() { if err := intoto.VersionMap.SetEntryFactory(APIVERSION, NewEntry); err != nil { log.Logger.Panic(err) } } type V002Entry struct { IntotoObj models.IntotoV002Schema env dsse.Envelope } func (v V002Entry) APIVersion() string { return APIVERSION } func NewEntry() types.EntryImpl { return &V002Entry{} } func (v V002Entry) IndexKeys() ([]string, error) { var result []string if v.IntotoObj.Content == nil || v.IntotoObj.Content.Envelope == nil { log.Logger.Info("IntotoObj content or dsse envelope is nil") return result, nil } for _, sig := range v.IntotoObj.Content.Envelope.Signatures { if sig == nil || sig.PublicKey == nil { return result, errors.New("malformed or missing signature") } keyObj, err := x509.NewPublicKey(bytes.NewReader(*sig.PublicKey)) if err != nil { return result, err } canonKey, err := keyObj.CanonicalValue() if err != nil { return result, fmt.Errorf("could not canonicize key: %w", err) } keyHash := sha256.Sum256(canonKey) result = append(result, "sha256:"+hex.EncodeToString(keyHash[:])) result = append(result, keyObj.Subjects()...) } payloadKey := strings.ToLower(fmt.Sprintf("%s:%s", *v.IntotoObj.Content.PayloadHash.Algorithm, *v.IntotoObj.Content.PayloadHash.Value)) result = append(result, payloadKey) // since we can't deterministically calculate this server-side (due to public keys being added inline, and also canonicalization being potentially different), // we'll just skip adding this index key // hashkey := strings.ToLower(fmt.Sprintf("%s:%s", *v.IntotoObj.Content.Hash.Algorithm, *v.IntotoObj.Content.Hash.Value)) // result = append(result, hashkey) switch *v.IntotoObj.Content.Envelope.PayloadType { case in_toto.PayloadType: if v.IntotoObj.Content.Envelope.Payload == nil { log.Logger.Info("IntotoObj DSSE payload is empty") return result, nil } decodedPayload, err := base64.StdEncoding.DecodeString(string(v.IntotoObj.Content.Envelope.Payload)) if err != nil { return result, fmt.Errorf("could not decode envelope payload: %w", err) } statement, err := parseStatement(decodedPayload) if err != nil { return result, err } for _, s := range statement.Subject { for alg, ds := range s.Digest { result = append(result, alg+":"+ds) } } // Not all in-toto statements will contain a SLSA provenance predicate. // See https://github.com/in-toto/attestation/blob/main/spec/README.md#predicate // for other predicates. if predicate, err := parseSlsaPredicate(decodedPayload); err == nil { if predicate.Predicate.Materials != nil { for _, s := range predicate.Predicate.Materials { for alg, ds := range s.Digest { result = append(result, alg+":"+ds) } } } } default: log.Logger.Infof("Unknown in_toto DSSE envelope Type: %s", *v.IntotoObj.Content.Envelope.PayloadType) } return result, nil } func parseStatement(p []byte) (*in_toto.Statement, error) { ps := in_toto.Statement{} if err := json.Unmarshal(p, &ps); err != nil { return nil, err } return &ps, nil } func parseSlsaPredicate(p []byte) (*in_toto.ProvenanceStatement, error) { predicate := in_toto.ProvenanceStatement{} if err := json.Unmarshal(p, &predicate); err != nil { return nil, err } return &predicate, nil } func (v *V002Entry) Unmarshal(pe models.ProposedEntry) error { it, ok := pe.(*models.Intoto) if !ok { return errors.New("cannot unmarshal non Intoto v0.0.2 type") } var err error if err := types.DecodeEntry(it.Spec, &v.IntotoObj); err != nil { return err } // field validation if err := v.IntotoObj.Validate(strfmt.Default); err != nil { return err } if string(v.IntotoObj.Content.Envelope.Payload) == "" { return nil } env := &dsse.Envelope{ Payload: string(v.IntotoObj.Content.Envelope.Payload), PayloadType: *v.IntotoObj.Content.Envelope.PayloadType, } allPubKeyBytes := make([][]byte, 0) for i, sig := range v.IntotoObj.Content.Envelope.Signatures { if sig == nil { v.IntotoObj.Content.Envelope.Signatures = slices.Delete(v.IntotoObj.Content.Envelope.Signatures, i, i) continue } env.Signatures = append(env.Signatures, dsse.Signature{ KeyID: sig.Keyid, Sig: string(*sig.Sig), }) allPubKeyBytes = append(allPubKeyBytes, *sig.PublicKey) } if _, err := verifyEnvelope(allPubKeyBytes, env); err != nil { return err } v.env = *env decodedPayload, err := base64.StdEncoding.DecodeString(string(v.IntotoObj.Content.Envelope.Payload)) if err != nil { return fmt.Errorf("could not decode envelope payload: %w", err) } h := sha256.Sum256(decodedPayload) v.IntotoObj.Content.PayloadHash = &models.IntotoV002SchemaContentPayloadHash{ Algorithm: swag.String(models.IntotoV002SchemaContentPayloadHashAlgorithmSha256), Value: swag.String(hex.EncodeToString(h[:])), } return nil } func (v *V002Entry) Canonicalize(_ context.Context) ([]byte, error) { if err := v.IntotoObj.Validate(strfmt.Default); err != nil { return nil, err } if v.IntotoObj.Content.Hash == nil { return nil, errors.New("missing envelope digest") } if err := v.IntotoObj.Content.Hash.Validate(strfmt.Default); err != nil { return nil, fmt.Errorf("error validating envelope digest: %w", err) } if v.IntotoObj.Content.PayloadHash == nil { return nil, errors.New("missing payload digest") } if err := v.IntotoObj.Content.PayloadHash.Validate(strfmt.Default); err != nil { return nil, fmt.Errorf("error validating payload digest: %w", err) } if len(v.IntotoObj.Content.Envelope.Signatures) == 0 { return nil, errors.New("missing signatures") } canonicalEntry := models.IntotoV002Schema{ Content: &models.IntotoV002SchemaContent{ Envelope: &models.IntotoV002SchemaContentEnvelope{ PayloadType: v.IntotoObj.Content.Envelope.PayloadType, Signatures: v.IntotoObj.Content.Envelope.Signatures, }, Hash: v.IntotoObj.Content.Hash, PayloadHash: v.IntotoObj.Content.PayloadHash, }, } itObj := models.Intoto{} itObj.APIVersion = swag.String(APIVERSION) itObj.Spec = &canonicalEntry return json.Marshal(&itObj) } // AttestationKey returns the digest of the attestation that was uploaded, to be used to lookup the attestation from storage func (v *V002Entry) AttestationKey() string { if v.IntotoObj.Content != nil && v.IntotoObj.Content.PayloadHash != nil { return fmt.Sprintf("%s:%s", *v.IntotoObj.Content.PayloadHash.Algorithm, *v.IntotoObj.Content.PayloadHash.Value) } return "" } // AttestationKeyValue returns both the key and value to be persisted into attestation storage func (v *V002Entry) AttestationKeyValue() (string, []byte) { storageSize := base64.StdEncoding.DecodedLen(len(v.env.Payload)) if storageSize > viper.GetInt("max_attestation_size") { log.Logger.Infof("Skipping attestation storage, size %d is greater than max %d", storageSize, viper.GetInt("max_attestation_size")) return "", nil } attBytes, err := base64.StdEncoding.DecodeString(v.env.Payload) if err != nil { log.Logger.Infof("could not decode envelope payload: %w", err) return "", nil } return v.AttestationKey(), attBytes } type verifier struct { s signature.Signer v signature.Verifier } func (v *verifier) KeyID() (string, error) { return "", nil } func (v *verifier) Public() crypto.PublicKey { // the dsse library uses this to generate a key ID if the KeyID function returns an empty string // as well for the AcceptedKey return value. Unfortunately since key ids can be arbitrary, we don't // know how to generate a matching id for the key id on the envelope's signature... // dsse verify will skip verifiers whose key id doesn't match the signature's key id, unless it fails // to generate one from the public key... so we trick it by returning nil ¯\_(ツ)_/¯ return nil } func (v *verifier) Sign(_ context.Context, data []byte) (sig []byte, err error) { if v.s == nil { return nil, errors.New("nil signer") } sig, err = v.s.SignMessage(bytes.NewReader(data), options.WithCryptoSignerOpts(crypto.SHA256)) if err != nil { return nil, err } return sig, nil } func (v *verifier) Verify(_ context.Context, data, sig []byte) error { if v.v == nil { return errors.New("nil verifier") } return v.v.VerifySignature(bytes.NewReader(sig), bytes.NewReader(data)) } func (v V002Entry) CreateFromArtifactProperties(_ context.Context, props types.ArtifactProperties) (models.ProposedEntry, error) { returnVal := models.Intoto{} re := V002Entry{ IntotoObj: models.IntotoV002Schema{ Content: &models.IntotoV002SchemaContent{ Envelope: &models.IntotoV002SchemaContentEnvelope{}, }, }} var err error artifactBytes := props.ArtifactBytes if artifactBytes == nil { if props.ArtifactPath == nil { return nil, errors.New("path to artifact file must be specified") } if props.ArtifactPath.IsAbs() { return nil, errors.New("intoto envelopes cannot be fetched over HTTP(S)") } artifactBytes, err = os.ReadFile(filepath.Clean(props.ArtifactPath.Path)) if err != nil { return nil, err } } env := dsse.Envelope{} if err := json.Unmarshal(artifactBytes, &env); err != nil { return nil, fmt.Errorf("payload must be a valid dsse envelope: %w", err) } allPubKeyBytes := make([][]byte, 0) if len(props.PublicKeyBytes) > 0 { allPubKeyBytes = append(allPubKeyBytes, props.PublicKeyBytes...) } if len(props.PublicKeyPaths) > 0 { for _, path := range props.PublicKeyPaths { if path.IsAbs() { return nil, errors.New("dsse public keys cannot be fetched over HTTP(S)") } publicKeyBytes, err := os.ReadFile(filepath.Clean(path.Path)) if err != nil { return nil, fmt.Errorf("error reading public key file: %w", err) } allPubKeyBytes = append(allPubKeyBytes, publicKeyBytes) } } keysBySig, err := verifyEnvelope(allPubKeyBytes, &env) if err != nil { return nil, err } b64 := strfmt.Base64([]byte(env.Payload)) re.IntotoObj.Content.Envelope.Payload = b64 re.IntotoObj.Content.Envelope.PayloadType = &env.PayloadType for _, sig := range env.Signatures { key, ok := keysBySig[sig.Sig] if !ok { return nil, errors.New("all signatures must have a key that verifies it") } canonKey, err := key.CanonicalValue() if err != nil { return nil, fmt.Errorf("could not canonicize key: %w", err) } keyBytes := strfmt.Base64(canonKey) sigBytes := strfmt.Base64([]byte(sig.Sig)) re.IntotoObj.Content.Envelope.Signatures = append(re.IntotoObj.Content.Envelope.Signatures, &models.IntotoV002SchemaContentEnvelopeSignaturesItems0{ Keyid: sig.KeyID, Sig: &sigBytes, PublicKey: &keyBytes, }) } h := sha256.Sum256([]byte(artifactBytes)) re.IntotoObj.Content.Hash = &models.IntotoV002SchemaContentHash{ Algorithm: swag.String(models.IntotoV001SchemaContentHashAlgorithmSha256), Value: swag.String(hex.EncodeToString(h[:])), } returnVal.Spec = re.IntotoObj returnVal.APIVersion = swag.String(re.APIVersion()) return &returnVal, nil } // verifyEnvelope takes in an array of possible key bytes and attempts to parse them as x509 public keys. // it then uses these to verify the envelope and makes sure that every signature on the envelope is verified. // it returns a map of verifiers indexed by the signature the verifier corresponds to. func verifyEnvelope(allPubKeyBytes [][]byte, env *dsse.Envelope) (map[string]*x509.PublicKey, error) { // generate a fake id for these keys so we can get back to the key bytes and match them to their corresponding signature verifierBySig := make(map[string]*x509.PublicKey) allSigs := make(map[string]struct{}) for _, sig := range env.Signatures { allSigs[sig.Sig] = struct{}{} } for _, pubKeyBytes := range allPubKeyBytes { key, err := x509.NewPublicKey(bytes.NewReader(pubKeyBytes)) if err != nil { return nil, fmt.Errorf("could not parse public key as x509: %w", err) } vfr, err := signature.LoadVerifier(key.CryptoPubKey(), crypto.SHA256) if err != nil { return nil, fmt.Errorf("could not load verifier: %w", err) } dsseVfr, err := dsse.NewEnvelopeVerifier(&verifier{ v: vfr, }) if err != nil { return nil, fmt.Errorf("could not use public key as a dsse verifier: %w", err) } accepted, err := dsseVfr.Verify(context.Background(), env) if err != nil { return nil, fmt.Errorf("could not verify envelope: %w", err) } for _, accept := range accepted { delete(allSigs, accept.Sig.Sig) verifierBySig[accept.Sig.Sig] = key } } if len(allSigs) > 0 { return nil, errors.New("all signatures must have a key that verifies it") } return verifierBySig, nil } func (v V002Entry) Verifiers() ([]pki.PublicKey, error) { if v.IntotoObj.Content == nil || v.IntotoObj.Content.Envelope == nil { return nil, errors.New("intoto v0.0.2 entry not initialized") } sigs := v.IntotoObj.Content.Envelope.Signatures if len(sigs) == 0 { return nil, errors.New("no signatures found on intoto entry") } var keys []pki.PublicKey for _, s := range v.IntotoObj.Content.Envelope.Signatures { key, err := x509.NewPublicKey(bytes.NewReader(*s.PublicKey)) if err != nil { return nil, err } keys = append(keys, key) } return keys, nil } func (v V002Entry) ArtifactHash() (string, error) { if v.IntotoObj.Content == nil || v.IntotoObj.Content.PayloadHash == nil || v.IntotoObj.Content.PayloadHash.Algorithm == nil || v.IntotoObj.Content.PayloadHash.Value == nil { return "", errors.New("intoto v0.0.2 entry not initialized") } return strings.ToLower(fmt.Sprintf("%s:%s", *v.IntotoObj.Content.PayloadHash.Algorithm, *v.IntotoObj.Content.PayloadHash.Value)), nil } func (v V002Entry) Insertable() (bool, error) { if v.IntotoObj.Content == nil { return false, errors.New("missing content property") } if v.IntotoObj.Content.Envelope == nil { return false, errors.New("missing envelope property") } if len(v.IntotoObj.Content.Envelope.Payload) == 0 { return false, errors.New("missing envelope content") } if v.IntotoObj.Content.Envelope.PayloadType == nil || len(*v.IntotoObj.Content.Envelope.PayloadType) == 0 { return false, errors.New("missing payloadType content") } if len(v.IntotoObj.Content.Envelope.Signatures) == 0 { return false, errors.New("missing signatures content") } for _, sig := range v.IntotoObj.Content.Envelope.Signatures { if sig == nil { return false, errors.New("missing signature entry") } if sig.Sig == nil || len(*sig.Sig) == 0 { return false, errors.New("missing signature content") } if sig.PublicKey == nil || len(*sig.PublicKey) == 0 { return false, errors.New("missing publicKey content") } } if v.env.Payload == "" || v.env.PayloadType == "" || len(v.env.Signatures) == 0 { return false, errors.New("invalid DSSE envelope") } return true, nil }
// // Copyright 2021 The Sigstore Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package jar import ( "archive/zip" "bytes" "context" "crypto/sha256" "encoding/hex" "encoding/json" "errors" "fmt" "io" "os" "path" "path/filepath" "strings" "github.com/sigstore/rekor/pkg/log" "github.com/sigstore/rekor/pkg/pki" "github.com/sigstore/rekor/pkg/pki/pkcs7" "github.com/sigstore/rekor/pkg/pki/x509" "github.com/sigstore/rekor/pkg/types" "github.com/sigstore/rekor/pkg/types/jar" "github.com/sigstore/rekor/pkg/util" "github.com/asaskevich/govalidator" "github.com/go-openapi/strfmt" "github.com/go-openapi/swag" jarutils "github.com/sassoftware/relic/lib/signjar" "github.com/sigstore/rekor/pkg/generated/models" "github.com/spf13/viper" ) const ( APIVERSION = "0.0.1" ) func init() { if err := jar.VersionMap.SetEntryFactory(APIVERSION, NewEntry); err != nil { log.Logger.Panic(err) } } type V001Entry struct { JARModel models.JarV001Schema } func (v V001Entry) APIVersion() string { return APIVERSION } func NewEntry() types.EntryImpl { return &V001Entry{} } func (v *V001Entry) IndexKeys() ([]string, error) { var result []string keyObj, err := pkcs7.NewSignature(bytes.NewReader(v.JARModel.Signature.Content)) if err != nil { return nil, err } key, err := keyObj.CanonicalValue() if err != nil { return nil, err } keyHash := sha256.Sum256(key) result = append(result, strings.ToLower(hex.EncodeToString(keyHash[:]))) if v.JARModel.Archive.Hash != nil { hashKey := strings.ToLower(fmt.Sprintf("%s:%s", *v.JARModel.Archive.Hash.Algorithm, *v.JARModel.Archive.Hash.Value)) result = append(result, hashKey) } return result, nil } func (v *V001Entry) Unmarshal(pe models.ProposedEntry) error { jar, ok := pe.(*models.Jar) if !ok { return errors.New("cannot unmarshal non JAR v0.0.1 type") } if err := types.DecodeEntry(jar.Spec, &v.JARModel); err != nil { return err } // field validation if err := v.JARModel.Validate(strfmt.Default); err != nil { return err } return v.validate() } func (v *V001Entry) fetchExternalEntities(_ context.Context) (*pkcs7.PublicKey, *pkcs7.Signature, error) { if err := v.validate(); err != nil { return nil, nil, &types.InputValidationError{Err: err} } oldSHA := "" if v.JARModel.Archive.Hash != nil && v.JARModel.Archive.Hash.Value != nil { oldSHA = swag.StringValue(v.JARModel.Archive.Hash.Value) } dataReadCloser := bytes.NewReader(v.JARModel.Archive.Content) hasher := sha256.New() b := &bytes.Buffer{} n, err := io.Copy(io.MultiWriter(hasher, b), dataReadCloser) if err != nil { return nil, nil, err } computedSHA := hex.EncodeToString(hasher.Sum(nil)) if oldSHA != "" && computedSHA != oldSHA { return nil, nil, &types.InputValidationError{Err: fmt.Errorf("SHA mismatch: %s != %s", computedSHA, oldSHA)} } zipReader, err := zip.NewReader(bytes.NewReader(b.Bytes()), n) if err != nil { return nil, nil, &types.InputValidationError{Err: err} } // Checking that uncompressed metadata files are within acceptable bounds before reading into memory. // Checks match those performed by the relic library in the jarutils.Verify method below. For example, // the META-INF/MANIFEST.MF is read into memory by the relic lib, but a META-INF/LICENSE file is not. for _, f := range zipReader.File { dir, name := path.Split(strings.ToUpper(f.Name)) if dir != "META-INF/" || name == "" || strings.LastIndex(name, ".") < 0 { continue } if f.UncompressedSize64 > viper.GetUint64("max_jar_metadata_size") && viper.GetUint64("max_jar_metadata_size") > 0 { return nil, nil, &types.InputValidationError{Err: fmt.Errorf("uncompressed jar metadata of size %d exceeds max allowed size %d", f.UncompressedSize64, viper.GetUint64("max_jar_metadata_size"))} } } // this ensures that the JAR is signed and the signature verifies, as // well as checks that the hashes in the signed manifest are all valid jarObjs, err := jarutils.Verify(zipReader, false) if err != nil { return nil, nil, &types.InputValidationError{Err: err} } switch len(jarObjs) { case 0: return nil, nil, &types.InputValidationError{Err: errors.New("no signatures detected in JAR archive")} case 1: default: return nil, nil, &types.InputValidationError{Err: errors.New("multiple signatures detected in JAR; unable to process")} } // we need to find and extract the PKCS7 bundle from the JAR file manually sigPKCS7, err := extractPKCS7SignatureFromJAR(zipReader) if err != nil { return nil, nil, &types.InputValidationError{Err: err} } keyObj, err := pkcs7.NewPublicKey(bytes.NewReader(sigPKCS7)) if err != nil { return nil, nil, &types.InputValidationError{Err: err} } sigObj, err := pkcs7.NewSignature(bytes.NewReader(sigPKCS7)) if err != nil { return nil, nil, &types.InputValidationError{Err: err} } // if we get here, all goroutines succeeded without error if oldSHA == "" { v.JARModel.Archive.Hash = &models.JarV001SchemaArchiveHash{ Algorithm: swag.String(models.JarV001SchemaArchiveHashAlgorithmSha256), Value: swag.String(computedSHA), } } return keyObj, sigObj, nil } func (v *V001Entry) Canonicalize(ctx context.Context) ([]byte, error) { keyObj, sigObj, err := v.fetchExternalEntities(ctx) if err != nil { return nil, err } // need to canonicalize key content keyContent, err := keyObj.CanonicalValue() if err != nil { return nil, err } sigContent, err := sigObj.CanonicalValue() if err != nil { return nil, err } canonicalEntry := models.JarV001Schema{ Signature: &models.JarV001SchemaSignature{ PublicKey: &models.JarV001SchemaSignaturePublicKey{ Content: (*strfmt.Base64)(&keyContent), }, Content: sigContent, }, Archive: &models.JarV001SchemaArchive{ Hash: &models.JarV001SchemaArchiveHash{ Algorithm: v.JARModel.Archive.Hash.Algorithm, Value: v.JARModel.Archive.Hash.Value, }, }, } // archive content is not set deliberately v.JARModel = canonicalEntry // wrap in valid object with kind and apiVersion set jar := models.Jar{} jar.APIVersion = swag.String(APIVERSION) jar.Spec = &canonicalEntry return json.Marshal(&jar) } // validate performs cross-field validation for fields in object func (v *V001Entry) validate() error { archive := v.JARModel.Archive if archive == nil { return errors.New("missing package") } // if the signature isn't present, then we need content to extract if v.JARModel.Signature == nil || v.JARModel.Signature.Content == nil { if len(archive.Content) == 0 { return errors.New("'content' must be specified for package") } } hash := archive.Hash if hash != nil { if !govalidator.IsHash(swag.StringValue(hash.Value), swag.StringValue(hash.Algorithm)) { return errors.New("invalid value for hash") } } return nil } // extractPKCS7SignatureFromJAR extracts the first signature file from the JAR and returns it func extractPKCS7SignatureFromJAR(inz *zip.Reader) ([]byte, error) { for _, f := range inz.File { dir, name := path.Split(strings.ToUpper(f.Name)) if dir != "META-INF/" || name == "" { continue } i := strings.LastIndex(name, ".") if i < 0 { continue } fileExt := name[i:] if fileExt == ".RSA" || fileExt == ".DSA" || fileExt == ".EC" || strings.HasPrefix(name, "SIG-") { fileReader, err := f.Open() if err != nil { return nil, err } contents, err := io.ReadAll(fileReader) if err != nil { return nil, err } if err = fileReader.Close(); err != nil { return nil, err } return contents, nil } } return nil, errors.New("unable to locate signature in JAR file") } func (v *V001Entry) CreateFromArtifactProperties(ctx context.Context, props types.ArtifactProperties) (models.ProposedEntry, error) { returnVal := models.Jar{} re := V001Entry{} // we will need only the artifact; public-key & signature are embedded in JAR re.JARModel = models.JarV001Schema{} re.JARModel.Archive = &models.JarV001SchemaArchive{} var err error artifactBytes := props.ArtifactBytes if artifactBytes == nil { var artifactReader io.ReadCloser if props.ArtifactPath == nil { return nil, errors.New("path to artifact file must be specified") } if props.ArtifactPath.IsAbs() { artifactReader, err = util.FileOrURLReadCloser(ctx, props.ArtifactPath.String(), nil) if err != nil { return nil, fmt.Errorf("error reading JAR file: %w", err) } } else { artifactReader, err = os.Open(filepath.Clean(props.ArtifactPath.Path)) if err != nil { return nil, fmt.Errorf("error opening JAR file: %w", err) } } artifactBytes, err = io.ReadAll(artifactReader) if err != nil { return nil, fmt.Errorf("error reading JAR file: %w", err) } } re.JARModel.Archive.Content = (strfmt.Base64)(artifactBytes) if err := re.validate(); err != nil { return nil, err } if _, _, err := re.fetchExternalEntities(ctx); err != nil { return nil, fmt.Errorf("error retrieving external entities: %w", err) } returnVal.APIVersion = swag.String(re.APIVersion()) returnVal.Spec = re.JARModel return &returnVal, nil } func (v V001Entry) Verifiers() ([]pki.PublicKey, error) { if v.JARModel.Signature == nil || v.JARModel.Signature.PublicKey == nil || v.JARModel.Signature.PublicKey.Content == nil { return nil, errors.New("jar v0.0.1 entry not initialized") } key, err := x509.NewPublicKey(bytes.NewReader(*v.JARModel.Signature.PublicKey.Content)) if err != nil { return nil, err } return []pki.PublicKey{key}, nil } func (v V001Entry) ArtifactHash() (string, error) { if v.JARModel.Archive == nil || v.JARModel.Archive.Hash == nil || v.JARModel.Archive.Hash.Value == nil || v.JARModel.Archive.Hash.Algorithm == nil { return "", errors.New("jar v0.0.1 entry not initialized") } return strings.ToLower(fmt.Sprintf("%s:%s", *v.JARModel.Archive.Hash.Algorithm, *v.JARModel.Archive.Hash.Value)), nil } func (v V001Entry) Insertable() (bool, error) { if v.JARModel.Archive == nil { return false, errors.New("missing archive property") } if len(v.JARModel.Archive.Content) == 0 { return false, errors.New("missing archive content") } return true, nil }
// // Copyright 2021 The Sigstore Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package rekord import ( "bytes" "context" "crypto/sha256" "encoding/hex" "encoding/json" "errors" "fmt" "io" "os" "path/filepath" "strings" "github.com/asaskevich/govalidator" "github.com/go-openapi/strfmt" "github.com/go-openapi/swag" "golang.org/x/sync/errgroup" "github.com/sigstore/rekor/pkg/generated/models" "github.com/sigstore/rekor/pkg/log" "github.com/sigstore/rekor/pkg/pki" "github.com/sigstore/rekor/pkg/pki/minisign" "github.com/sigstore/rekor/pkg/pki/pgp" "github.com/sigstore/rekor/pkg/pki/ssh" "github.com/sigstore/rekor/pkg/pki/x509" "github.com/sigstore/rekor/pkg/types" "github.com/sigstore/rekor/pkg/types/rekord" "github.com/sigstore/rekor/pkg/util" ) const ( APIVERSION = "0.0.1" ) func init() { if err := rekord.VersionMap.SetEntryFactory(APIVERSION, NewEntry); err != nil { log.Logger.Panic(err) } } type V001Entry struct { RekordObj models.RekordV001Schema } func (v V001Entry) APIVersion() string { return APIVERSION } func NewEntry() types.EntryImpl { return &V001Entry{} } func (v V001Entry) IndexKeys() ([]string, error) { var result []string af, err := pki.NewArtifactFactory(pki.Format(*v.RekordObj.Signature.Format)) if err != nil { return nil, err } keyObj, err := af.NewPublicKey(bytes.NewReader(*v.RekordObj.Signature.PublicKey.Content)) if err != nil { return nil, err } key, err := keyObj.CanonicalValue() if err != nil { log.Logger.Error(err) } else { keyHash := sha256.Sum256(key) result = append(result, strings.ToLower(hex.EncodeToString(keyHash[:]))) } result = append(result, keyObj.Subjects()...) if v.RekordObj.Data.Hash != nil { hashKey := strings.ToLower(fmt.Sprintf("%s:%s", *v.RekordObj.Data.Hash.Algorithm, *v.RekordObj.Data.Hash.Value)) result = append(result, hashKey) } return result, nil } func (v *V001Entry) Unmarshal(pe models.ProposedEntry) error { rekord, ok := pe.(*models.Rekord) if !ok { return errors.New("cannot unmarshal non Rekord v0.0.1 type") } if err := types.DecodeEntry(rekord.Spec, &v.RekordObj); err != nil { return err } // field validation if err := v.RekordObj.Validate(strfmt.Default); err != nil { return err } // cross field validation return v.validate() } func (v *V001Entry) fetchExternalEntities(ctx context.Context) (pki.PublicKey, pki.Signature, error) { g, ctx := errgroup.WithContext(ctx) af, err := pki.NewArtifactFactory(pki.Format(*v.RekordObj.Signature.Format)) if err != nil { return nil, nil, err } hashR, hashW := io.Pipe() sigR, sigW := io.Pipe() defer hashR.Close() defer sigR.Close() closePipesOnError := types.PipeCloser(hashR, hashW, sigR, sigW) oldSHA := "" if v.RekordObj.Data.Hash != nil && v.RekordObj.Data.Hash.Value != nil { oldSHA = swag.StringValue(v.RekordObj.Data.Hash.Value) } g.Go(func() error { defer hashW.Close() defer sigW.Close() dataReadCloser := bytes.NewReader(v.RekordObj.Data.Content) /* #nosec G110 */ if _, err := io.Copy(io.MultiWriter(hashW, sigW), dataReadCloser); err != nil { return closePipesOnError(err) } return nil }) hashResult := make(chan string) g.Go(func() error { defer close(hashResult) hasher := sha256.New() if _, err := io.Copy(hasher, hashR); err != nil { return closePipesOnError(err) } computedSHA := hex.EncodeToString(hasher.Sum(nil)) if oldSHA != "" && computedSHA != oldSHA { return closePipesOnError(&types.InputValidationError{Err: fmt.Errorf("SHA mismatch: %s != %s", computedSHA, oldSHA)}) } select { case <-ctx.Done(): return ctx.Err() case hashResult <- computedSHA: return nil } }) sigResult := make(chan pki.Signature) g.Go(func() error { defer close(sigResult) sigReadCloser := bytes.NewReader(*v.RekordObj.Signature.Content) signature, err := af.NewSignature(sigReadCloser) if err != nil { return closePipesOnError(&types.InputValidationError{Err: err}) } select { case <-ctx.Done(): return ctx.Err() case sigResult <- signature: return nil } }) keyResult := make(chan pki.PublicKey) g.Go(func() error { defer close(keyResult) keyReadCloser := bytes.NewReader(*v.RekordObj.Signature.PublicKey.Content) key, err := af.NewPublicKey(keyReadCloser) if err != nil { return closePipesOnError(&types.InputValidationError{Err: err}) } select { case <-ctx.Done(): return ctx.Err() case keyResult <- key: return nil } }) var ( keyObj pki.PublicKey sigObj pki.Signature ) g.Go(func() error { keyObj, sigObj = <-keyResult, <-sigResult if keyObj == nil || sigObj == nil { return closePipesOnError(errors.New("failed to read signature or public key")) } var err error if err = sigObj.Verify(sigR, keyObj); err != nil { return closePipesOnError(&types.InputValidationError{Err: err}) } select { case <-ctx.Done(): return ctx.Err() default: return nil } }) computedSHA := <-hashResult if err := g.Wait(); err != nil { return nil, nil, err } // if we get here, all goroutines succeeded without error if oldSHA == "" { v.RekordObj.Data.Hash = &models.RekordV001SchemaDataHash{} v.RekordObj.Data.Hash.Algorithm = swag.String(models.RekordV001SchemaDataHashAlgorithmSha256) v.RekordObj.Data.Hash.Value = swag.String(computedSHA) } return keyObj, sigObj, nil } func (v *V001Entry) Canonicalize(ctx context.Context) ([]byte, error) { keyObj, sigObj, err := v.fetchExternalEntities(ctx) if err != nil { return nil, err } canonicalEntry := models.RekordV001Schema{} // need to canonicalize signature & key content canonicalEntry.Signature = &models.RekordV001SchemaSignature{} // signature URL (if known) is not set deliberately canonicalEntry.Signature.Format = v.RekordObj.Signature.Format var sigContent []byte sigContent, err = sigObj.CanonicalValue() if err != nil { return nil, err } canonicalEntry.Signature.Content = (*strfmt.Base64)(&sigContent) var pubKeyContent []byte canonicalEntry.Signature.PublicKey = &models.RekordV001SchemaSignaturePublicKey{} pubKeyContent, err = keyObj.CanonicalValue() if err != nil { return nil, err } canonicalEntry.Signature.PublicKey.Content = (*strfmt.Base64)(&pubKeyContent) canonicalEntry.Data = &models.RekordV001SchemaData{} canonicalEntry.Data.Hash = v.RekordObj.Data.Hash // data content is not set deliberately // wrap in valid object with kind and apiVersion set rekordObj := models.Rekord{} rekordObj.APIVersion = swag.String(APIVERSION) rekordObj.Spec = &canonicalEntry v.RekordObj = canonicalEntry bytes, err := json.Marshal(&rekordObj) if err != nil { return nil, err } return bytes, nil } // validate performs cross-field validation for fields in object func (v V001Entry) validate() error { sig := v.RekordObj.Signature if v.RekordObj.Signature == nil { return errors.New("missing signature") } if sig.Content == nil || len(*sig.Content) == 0 { return errors.New("'content' must be specified for signature") } key := sig.PublicKey if key == nil { return errors.New("missing public key") } if key.Content == nil || len(*key.Content) == 0 { return errors.New("'content' must be specified for publicKey") } data := v.RekordObj.Data if data == nil { return errors.New("missing data") } hash := data.Hash if hash != nil { if !govalidator.IsHash(swag.StringValue(hash.Value), swag.StringValue(hash.Algorithm)) { return errors.New("invalid value for hash") } } else if len(data.Content) == 0 { return errors.New("'content' must be specified for data") } return nil } func (v V001Entry) CreateFromArtifactProperties(ctx context.Context, props types.ArtifactProperties) (models.ProposedEntry, error) { returnVal := models.Rekord{} re := V001Entry{} // we will need artifact, public-key, signature re.RekordObj.Data = &models.RekordV001SchemaData{} var err error artifactBytes := props.ArtifactBytes if len(artifactBytes) == 0 { var artifactReader io.ReadCloser if props.ArtifactPath == nil { return nil, errors.New("path to artifact file must be specified") } if props.ArtifactPath.IsAbs() { artifactReader, err = util.FileOrURLReadCloser(ctx, props.ArtifactPath.String(), nil) if err != nil { return nil, fmt.Errorf("error reading artifact file: %w", err) } } else { artifactReader, err = os.Open(filepath.Clean(props.ArtifactPath.Path)) if err != nil { return nil, fmt.Errorf("error opening artifact file: %w", err) } } artifactBytes, err = io.ReadAll(artifactReader) if err != nil { return nil, fmt.Errorf("error reading artifact file: %w", err) } } re.RekordObj.Data.Content = strfmt.Base64(artifactBytes) re.RekordObj.Signature = &models.RekordV001SchemaSignature{} switch props.PKIFormat { case "pgp": re.RekordObj.Signature.Format = swag.String(models.RekordV001SchemaSignatureFormatPgp) case "minisign": re.RekordObj.Signature.Format = swag.String(models.RekordV001SchemaSignatureFormatMinisign) case "x509": re.RekordObj.Signature.Format = swag.String(models.RekordV001SchemaSignatureFormatX509) case "ssh": re.RekordObj.Signature.Format = swag.String(models.RekordV001SchemaSignatureFormatSSH) default: return nil, fmt.Errorf("unexpected format of public key: %s", props.PKIFormat) } sigBytes := props.SignatureBytes if len(sigBytes) == 0 { if props.SignaturePath == nil { return nil, errors.New("a detached signature must be provided") } sigBytes, err = os.ReadFile(filepath.Clean(props.SignaturePath.Path)) if err != nil { return nil, fmt.Errorf("error reading signature file: %w", err) } re.RekordObj.Signature.Content = (*strfmt.Base64)(&sigBytes) } else { re.RekordObj.Signature.Content = (*strfmt.Base64)(&sigBytes) } re.RekordObj.Signature.PublicKey = &models.RekordV001SchemaSignaturePublicKey{} publicKeyBytes := props.PublicKeyBytes if len(publicKeyBytes) == 0 { if len(props.PublicKeyPaths) != 1 { return nil, errors.New("only one public key must be provided to verify detached signature") } keyBytes, err := os.ReadFile(filepath.Clean(props.PublicKeyPaths[0].Path)) if err != nil { return nil, fmt.Errorf("error reading public key file: %w", err) } publicKeyBytes = append(publicKeyBytes, keyBytes) } else if len(publicKeyBytes) != 1 { return nil, errors.New("only one public key must be provided") } re.RekordObj.Signature.PublicKey.Content = (*strfmt.Base64)(&publicKeyBytes[0]) if err := re.validate(); err != nil { return nil, err } if _, _, err := re.fetchExternalEntities(ctx); err != nil { return nil, fmt.Errorf("error retrieving external entities: %w", err) } returnVal.APIVersion = swag.String(re.APIVersion()) returnVal.Spec = re.RekordObj return &returnVal, nil } func (v V001Entry) Verifiers() ([]pki.PublicKey, error) { if v.RekordObj.Signature == nil || v.RekordObj.Signature.PublicKey == nil || v.RekordObj.Signature.PublicKey.Content == nil { return nil, errors.New("rekord v0.0.1 entry not initialized") } var key pki.PublicKey var err error switch f := *v.RekordObj.Signature.Format; f { case "x509": key, err = x509.NewPublicKey(bytes.NewReader(*v.RekordObj.Signature.PublicKey.Content)) case "ssh": key, err = ssh.NewPublicKey(bytes.NewReader(*v.RekordObj.Signature.PublicKey.Content)) case "pgp": key, err = pgp.NewPublicKey(bytes.NewReader(*v.RekordObj.Signature.PublicKey.Content)) case "minisign": key, err = minisign.NewPublicKey(bytes.NewReader(*v.RekordObj.Signature.PublicKey.Content)) default: return nil, fmt.Errorf("unexpected format of public key: %s", f) } if err != nil { return nil, err } return []pki.PublicKey{key}, nil } func (v V001Entry) ArtifactHash() (string, error) { if v.RekordObj.Data == nil || v.RekordObj.Data.Hash == nil || v.RekordObj.Data.Hash.Value == nil || v.RekordObj.Data.Hash.Algorithm == nil { return "", errors.New("rekord v0.0.1 entry not initialized") } return strings.ToLower(fmt.Sprintf("%s:%s", *v.RekordObj.Data.Hash.Algorithm, *v.RekordObj.Data.Hash.Value)), nil } func (v V001Entry) Insertable() (bool, error) { if v.RekordObj.Signature == nil { return false, errors.New("missing signature property") } if v.RekordObj.Signature.Content == nil || len(*v.RekordObj.Signature.Content) == 0 { return false, errors.New("missing signature content") } if v.RekordObj.Signature.PublicKey == nil { return false, errors.New("missing publicKey property") } if v.RekordObj.Signature.PublicKey.Content == nil || len(*v.RekordObj.Signature.PublicKey.Content) == 0 { return false, errors.New("missing publicKey content") } if v.RekordObj.Signature.Format == nil || len(*v.RekordObj.Signature.Format) == 0 { return false, errors.New("missing signature format") } if v.RekordObj.Data == nil { return false, errors.New("missing data property") } if len(v.RekordObj.Data.Content) == 0 { return false, errors.New("missing data content") } return true, nil }
// // Copyright 2021 The Sigstore Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package rfc3161 import ( "context" "crypto/sha256" "encoding/asn1" "encoding/base64" "encoding/hex" "encoding/json" "errors" "fmt" "os" "path/filepath" "strings" "github.com/sigstore/rekor/pkg/pki" "github.com/sigstore/rekor/pkg/types/rfc3161" "github.com/go-openapi/strfmt" "github.com/go-openapi/swag" "github.com/sassoftware/relic/lib/pkcs9" "github.com/sigstore/rekor/pkg/generated/models" "github.com/sigstore/rekor/pkg/log" "github.com/sigstore/rekor/pkg/types" ) const ( APIVERSION = "0.0.1" ) func init() { if err := rfc3161.VersionMap.SetEntryFactory(APIVERSION, NewEntry); err != nil { log.Logger.Panic(err) } } type V001Entry struct { Rfc3161Obj models.Rfc3161V001Schema tsrContent *strfmt.Base64 } func (v V001Entry) APIVersion() string { return APIVERSION } func NewEntry() types.EntryImpl { return &V001Entry{} } func NewEntryFromBytes(timestamp []byte) models.ProposedEntry { b64 := strfmt.Base64(timestamp) re := V001Entry{ Rfc3161Obj: models.Rfc3161V001Schema{ Tsr: &models.Rfc3161V001SchemaTsr{ Content: &b64, }, }, } return &models.Rfc3161{ Spec: re.Rfc3161Obj, APIVersion: swag.String(re.APIVersion()), } } func (v V001Entry) IndexKeys() ([]string, error) { var result []string str := v.Rfc3161Obj.Tsr.Content.String() tb, err := base64.StdEncoding.DecodeString(str) if err != nil { return nil, err } h := sha256.Sum256(tb) hx := hex.EncodeToString(h[:]) payloadKey := "sha256:" + hx result = append(result, payloadKey) return result, nil } func (v *V001Entry) Unmarshal(pe models.ProposedEntry) error { rfc3161Resp, ok := pe.(*models.Rfc3161) if !ok { return errors.New("cannot unmarshal non Rfc3161 v0.0.1 type") } if err := types.DecodeEntry(rfc3161Resp.Spec, &v.Rfc3161Obj); err != nil { return err } // field validation if err := v.Rfc3161Obj.Validate(strfmt.Default); err != nil { return err } if err := v.validate(); err != nil { return err } v.tsrContent = v.Rfc3161Obj.Tsr.Content return nil } func (v *V001Entry) Canonicalize(_ context.Context) ([]byte, error) { if v.tsrContent == nil { return nil, &types.InputValidationError{Err: errors.New("tsr content must be set before canonicalizing")} } canonicalEntry := models.Rfc3161V001Schema{ Tsr: &models.Rfc3161V001SchemaTsr{ Content: v.tsrContent, }, } // wrap in valid object with kind and apiVersion set ref3161Obj := models.Rfc3161{} ref3161Obj.APIVersion = swag.String(APIVERSION) ref3161Obj.Spec = &canonicalEntry return json.Marshal(&ref3161Obj) } // validate performs cross-field validation for fields in object func (v V001Entry) validate() error { data := v.Rfc3161Obj.Tsr if data == nil { return errors.New("missing tsr data") } content := *data.Content if len(content) == 0 { return errors.New("'content' must be specified for data") } b, err := base64.StdEncoding.DecodeString(content.String()) if err != nil { return err } if len(b) > (10 * 1024) { return fmt.Errorf("tsr exceeds maximum allowed size (10kB)") } var tsr pkcs9.TimeStampResp _, err = asn1.Unmarshal(b, &tsr) if err != nil { return err } if tsr.Status.Status != pkcs9.StatusGranted && tsr.Status.Status != pkcs9.StatusGrantedWithMods { return fmt.Errorf("tsr status not granted: %v", tsr.Status.Status) } if !tsr.TimeStampToken.ContentType.Equal(asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 7, 2}) { return fmt.Errorf("tsr wrong content type: %v", tsr.TimeStampToken.ContentType) } _, err = tsr.TimeStampToken.Content.Verify(nil, false) if err != nil { return fmt.Errorf("tsr verification error: %w", err) } return nil } func (v V001Entry) CreateFromArtifactProperties(_ context.Context, props types.ArtifactProperties) (models.ProposedEntry, error) { returnVal := models.Rfc3161{} var err error artifactBytes := props.ArtifactBytes if artifactBytes == nil { if props.ArtifactPath == nil { return nil, errors.New("path to artifact file must be specified") } if props.ArtifactPath.IsAbs() { return nil, errors.New("RFC3161 timestamps cannot be fetched over HTTP(S)") } artifactBytes, err = os.ReadFile(filepath.Clean(props.ArtifactPath.Path)) if err != nil { return nil, fmt.Errorf("error reading artifact file: %w", err) } } b64 := strfmt.Base64(artifactBytes) re := V001Entry{ Rfc3161Obj: models.Rfc3161V001Schema{ Tsr: &models.Rfc3161V001SchemaTsr{ Content: &b64, }, }, } returnVal.Spec = re.Rfc3161Obj returnVal.APIVersion = swag.String(re.APIVersion()) return &returnVal, nil } func (v V001Entry) Verifiers() ([]pki.PublicKey, error) { return nil, errors.New("Verifiers() does not support rfc3161 entry type") } func (v V001Entry) ArtifactHash() (string, error) { if v.Rfc3161Obj.Tsr == nil || v.Rfc3161Obj.Tsr.Content == nil { return "", errors.New("rfc3161 v0.0.1 entry not initialized") } tsrDecoded, err := base64.StdEncoding.DecodeString(v.Rfc3161Obj.Tsr.Content.String()) if err != nil { return "", err } h := sha256.Sum256(tsrDecoded) return strings.ToLower(fmt.Sprintf("sha256:%s", hex.EncodeToString(h[:]))), nil } func (v V001Entry) Insertable() (bool, error) { if v.Rfc3161Obj.Tsr == nil { return false, errors.New("missing tsr property") } if v.Rfc3161Obj.Tsr.Content == nil || len(*v.Rfc3161Obj.Tsr.Content) == 0 { return false, errors.New("missing tsr content") } if v.tsrContent == nil || len(*v.tsrContent) == 0 { return false, errors.New("timestamp response has not been parsed") } return true, nil }
// // Copyright 2021 The Sigstore Authors. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. package rpm import ( "bytes" "context" "crypto/sha256" "encoding/hex" "encoding/json" "errors" "fmt" "io" "os" "path/filepath" "strconv" "strings" "github.com/asaskevich/govalidator" rpmutils "github.com/cavaliercoder/go-rpm" "github.com/go-openapi/strfmt" "github.com/go-openapi/swag" "golang.org/x/sync/errgroup" "github.com/sigstore/rekor/pkg/generated/models" "github.com/sigstore/rekor/pkg/log" "github.com/sigstore/rekor/pkg/pki" "github.com/sigstore/rekor/pkg/pki/pgp" "github.com/sigstore/rekor/pkg/types" "github.com/sigstore/rekor/pkg/types/rpm" "github.com/sigstore/rekor/pkg/util" ) const ( APIVERSION = "0.0.1" ) func init() { if err := rpm.VersionMap.SetEntryFactory(APIVERSION, NewEntry); err != nil { log.Logger.Panic(err) } } type V001Entry struct { RPMModel models.RpmV001Schema } func (v V001Entry) APIVersion() string { return APIVERSION } func NewEntry() types.EntryImpl { return &V001Entry{} } func (v V001Entry) IndexKeys() ([]string, error) { var result []string keyObj, err := pgp.NewPublicKey(bytes.NewReader(*v.RPMModel.PublicKey.Content)) if err != nil { return nil, err } key, err := keyObj.CanonicalValue() if err != nil { return nil, err } keyHash := sha256.Sum256(key) result = append(result, strings.ToLower(hex.EncodeToString(keyHash[:]))) result = append(result, keyObj.Subjects()...) if v.RPMModel.Package.Hash != nil { hashKey := strings.ToLower(fmt.Sprintf("%s:%s", *v.RPMModel.Package.Hash.Algorithm, *v.RPMModel.Package.Hash.Value)) result = append(result, hashKey) } return result, nil } func (v *V001Entry) Unmarshal(pe models.ProposedEntry) error { rpm, ok := pe.(*models.Rpm) if !ok { return errors.New("cannot unmarshal non RPM v0.0.1 type") } if err := types.DecodeEntry(rpm.Spec, &v.RPMModel); err != nil { return err } // field validation if err := v.RPMModel.Validate(strfmt.Default); err != nil { return err } return v.validate() } func (v *V001Entry) fetchExternalEntities(ctx context.Context) (*pgp.PublicKey, *rpmutils.PackageFile, error) { if err := v.validate(); err != nil { return nil, nil, &types.InputValidationError{Err: err} } g, ctx := errgroup.WithContext(ctx) hashR, hashW := io.Pipe() sigR, sigW := io.Pipe() rpmR, rpmW := io.Pipe() defer hashR.Close() defer sigR.Close() defer rpmR.Close() closePipesOnError := types.PipeCloser(hashR, hashW, sigR, sigW, rpmR, rpmW) oldSHA := "" if v.RPMModel.Package.Hash != nil && v.RPMModel.Package.Hash.Value != nil { oldSHA = swag.StringValue(v.RPMModel.Package.Hash.Value) } g.Go(func() error { defer hashW.Close() defer sigW.Close() defer rpmW.Close() dataReadCloser := bytes.NewReader(v.RPMModel.Package.Content) /* #nosec G110 */ if _, err := io.Copy(io.MultiWriter(hashW, sigW, rpmW), dataReadCloser); err != nil { return closePipesOnError(err) } return nil }) hashResult := make(chan string) g.Go(func() error { defer close(hashResult) hasher := sha256.New() if _, err := io.Copy(hasher, hashR); err != nil { return closePipesOnError(err) } computedSHA := hex.EncodeToString(hasher.Sum(nil)) if oldSHA != "" && computedSHA != oldSHA { return closePipesOnError(&types.InputValidationError{Err: fmt.Errorf("SHA mismatch: %s != %s", computedSHA, oldSHA)}) } select { case <-ctx.Done(): return ctx.Err() case hashResult <- computedSHA: return nil } }) var keyObj *pgp.PublicKey g.Go(func() error { keyReadCloser := bytes.NewReader(*v.RPMModel.PublicKey.Content) var err error keyObj, err = pgp.NewPublicKey(keyReadCloser) if err != nil { return closePipesOnError(&types.InputValidationError{Err: err}) } keyring, err := keyObj.KeyRing() if err != nil { return closePipesOnError(&types.InputValidationError{Err: err}) } if _, err := rpmutils.GPGCheck(sigR, keyring); err != nil { return closePipesOnError(&types.InputValidationError{Err: err}) } select { case <-ctx.Done(): return ctx.Err() default: return nil } }) var rpmObj *rpmutils.PackageFile g.Go(func() error { var err error rpmObj, err = rpmutils.ReadPackageFile(rpmR) if err != nil { return closePipesOnError(&types.InputValidationError{Err: err}) } // ReadPackageFile does not drain the entire reader so we need to discard the rest if _, err = io.Copy(io.Discard, rpmR); err != nil { return closePipesOnError(err) } select { case <-ctx.Done(): return ctx.Err() default: return nil } }) computedSHA := <-hashResult if err := g.Wait(); err != nil { return nil, nil, err } // if we get here, all goroutines succeeded without error if oldSHA == "" { v.RPMModel.Package.Hash = &models.RpmV001SchemaPackageHash{} v.RPMModel.Package.Hash.Algorithm = swag.String(models.RpmV001SchemaPackageHashAlgorithmSha256) v.RPMModel.Package.Hash.Value = swag.String(computedSHA) } return keyObj, rpmObj, nil } func (v *V001Entry) Canonicalize(ctx context.Context) ([]byte, error) { keyObj, rpmObj, err := v.fetchExternalEntities(ctx) if err != nil { return nil, err } canonicalEntry := models.RpmV001Schema{} // need to canonicalize key content var pubKeyContent []byte canonicalEntry.PublicKey = &models.RpmV001SchemaPublicKey{} pubKeyContent, err = keyObj.CanonicalValue() if err != nil { return nil, err } canonicalEntry.PublicKey.Content = (*strfmt.Base64)(&pubKeyContent) canonicalEntry.Package = &models.RpmV001SchemaPackage{} canonicalEntry.Package.Hash = &models.RpmV001SchemaPackageHash{} canonicalEntry.Package.Hash.Algorithm = v.RPMModel.Package.Hash.Algorithm canonicalEntry.Package.Hash.Value = v.RPMModel.Package.Hash.Value // data content is not set deliberately // set NEVRA headers canonicalEntry.Package.Headers = make(map[string]string) canonicalEntry.Package.Headers["Name"] = rpmObj.Name() canonicalEntry.Package.Headers["Epoch"] = strconv.Itoa(rpmObj.Epoch()) canonicalEntry.Package.Headers["Version"] = rpmObj.Version() canonicalEntry.Package.Headers["Release"] = rpmObj.Release() canonicalEntry.Package.Headers["Architecture"] = rpmObj.Architecture() if md5sum := rpmObj.GetBytes(0, 1004); md5sum != nil { canonicalEntry.Package.Headers["RPMSIGTAG_MD5"] = hex.EncodeToString(md5sum) } if sha1sum := rpmObj.GetBytes(0, 1012); sha1sum != nil { canonicalEntry.Package.Headers["RPMSIGTAG_SHA1"] = hex.EncodeToString(sha1sum) } if sha256sum := rpmObj.GetBytes(0, 1016); sha256sum != nil { canonicalEntry.Package.Headers["RPMSIGTAG_SHA256"] = hex.EncodeToString(sha256sum) } // wrap in valid object with kind and apiVersion set rpm := models.Rpm{} rpm.APIVersion = swag.String(APIVERSION) rpm.Spec = &canonicalEntry return json.Marshal(&rpm) } // validate performs cross-field validation for fields in object func (v V001Entry) validate() error { key := v.RPMModel.PublicKey if key == nil { return errors.New("missing public key") } if key.Content == nil || len(*key.Content) == 0 { return errors.New("'content' must be specified for publicKey") } pkg := v.RPMModel.Package if pkg == nil { return errors.New("missing package") } hash := pkg.Hash if hash != nil { if !govalidator.IsHash(swag.StringValue(hash.Value), swag.StringValue(hash.Algorithm)) { return errors.New("invalid value for hash") } } else if len(pkg.Content) == 0 { return errors.New("'content' must be specified for package") } return nil } func (v V001Entry) CreateFromArtifactProperties(ctx context.Context, props types.ArtifactProperties) (models.ProposedEntry, error) { returnVal := models.Rpm{} re := V001Entry{} // we will need artifact, public-key, signature re.RPMModel = models.RpmV001Schema{} re.RPMModel.Package = &models.RpmV001SchemaPackage{} var err error artifactBytes := props.ArtifactBytes if artifactBytes == nil { var artifactReader io.ReadCloser if props.ArtifactPath == nil { return nil, errors.New("path to artifact file must be specified") } if props.ArtifactPath.IsAbs() { artifactReader, err = util.FileOrURLReadCloser(ctx, props.ArtifactPath.String(), nil) if err != nil { return nil, fmt.Errorf("error reading RPM file: %w", err) } } else { artifactReader, err = os.Open(filepath.Clean(props.ArtifactPath.Path)) if err != nil { return nil, fmt.Errorf("error opening RPM file: %w", err) } } artifactBytes, err = io.ReadAll(artifactReader) if err != nil { return nil, fmt.Errorf("error reading RPM file: %w", err) } } re.RPMModel.Package.Content = strfmt.Base64(artifactBytes) re.RPMModel.PublicKey = &models.RpmV001SchemaPublicKey{} publicKeyBytes := props.PublicKeyBytes if len(publicKeyBytes) == 0 { if len(props.PublicKeyPaths) != 1 { return nil, errors.New("only one public key must be provided to verify RPM signature") } keyBytes, err := os.ReadFile(filepath.Clean(props.PublicKeyPaths[0].Path)) if err != nil { return nil, fmt.Errorf("error reading public key file: %w", err) } publicKeyBytes = append(publicKeyBytes, keyBytes) } else if len(publicKeyBytes) != 1 { return nil, errors.New("only one public key must be provided") } re.RPMModel.PublicKey.Content = (*strfmt.Base64)(&publicKeyBytes[0]) if err := re.validate(); err != nil { return nil, err } if _, _, err := re.fetchExternalEntities(context.Background()); err != nil { return nil, fmt.Errorf("error retrieving external entities: %w", err) } returnVal.APIVersion = swag.String(re.APIVersion()) returnVal.Spec = re.RPMModel return &returnVal, nil } func (v V001Entry) Verifiers() ([]pki.PublicKey, error) { if v.RPMModel.PublicKey == nil || v.RPMModel.PublicKey.Content == nil { return nil, errors.New("rpm v0.0.1 entry not initialized") } key, err := pgp.NewPublicKey(bytes.NewReader(*v.RPMModel.PublicKey.Content)) if err != nil { return nil, err } return []pki.PublicKey{key}, nil } func (v V001Entry) ArtifactHash() (string, error) { if v.RPMModel.Package == nil || v.RPMModel.Package.Hash == nil || v.RPMModel.Package.Hash.Value == nil || v.RPMModel.Package.Hash.Algorithm == nil { return "", errors.New("rpm v0.0.1 entry not initialized") } return strings.ToLower(fmt.Sprintf("%s:%s", *v.RPMModel.Package.Hash.Algorithm, *v.RPMModel.Package.Hash.Value)), nil } func (v V001Entry) Insertable() (bool, error) { if v.RPMModel.PublicKey == nil { return false, errors.New("missing publicKey property") } if v.RPMModel.PublicKey.Content == nil || len(*v.RPMModel.PublicKey.Content) == 0 { return false, errors.New("missing publicKey content") } if v.RPMModel.Package == nil { return false, errors.New("missing package property") } if len(v.RPMModel.Package.Content) == 0 { return false, errors.New("missing package content") } return true, nil }
/* Copyright © 2021 The Sigstore Authors. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ package tuf import ( "bytes" "context" "crypto/sha256" "encoding/base64" "encoding/hex" "encoding/json" "errors" "fmt" "io" "os" "path/filepath" "strconv" "strings" "time" "github.com/theupdateframework/go-tuf/data" // This will support deprecated ECDSA hex-encoded keys in TUF metadata. // Will be removed when sigstore migrates entirely off hex-encoded. _ "github.com/theupdateframework/go-tuf/pkg/deprecated/set_ecdsa" "golang.org/x/sync/errgroup" "github.com/sigstore/rekor/pkg/log" "github.com/sigstore/rekor/pkg/types" "github.com/sigstore/rekor/pkg/types/tuf" "github.com/sigstore/rekor/pkg/util" "github.com/go-openapi/strfmt" "github.com/sigstore/rekor/pkg/pki" ptuf "github.com/sigstore/rekor/pkg/pki/tuf" "github.com/go-openapi/swag" "github.com/sigstore/rekor/pkg/generated/models" ) const ( APIVERSION = "0.0.1" ) func init() { if err := tuf.VersionMap.SetEntryFactory(APIVERSION, NewEntry); err != nil { log.Logger.Panic(err) } } type BaseSigned struct { Type string `json:"_type"` Expires time.Time `json:"expires"` Version int `json:"version"` } type V001Entry struct { TufObj models.TUFV001Schema } func (v V001Entry) APIVersion() string { return APIVERSION } func NewEntry() types.EntryImpl { return &V001Entry{} } func (v V001Entry) IndexKeys() ([]string, error) { var result []string keyBytes, err := v.parseRootContent() if err != nil { return nil, err } sigBytes, err := v.parseMetadataContent() if err != nil { return nil, err } key, err := ptuf.NewPublicKey(bytes.NewReader(keyBytes)) if err != nil { return nil, err } sig, err := ptuf.NewSignature(bytes.NewReader(sigBytes)) if err != nil { return nil, err } // Index metadata hash, type, and version. metadata, err := sig.CanonicalValue() if err != nil { return nil, err } metadataHash := sha256.Sum256(metadata) result = append(result, strings.ToLower(hex.EncodeToString(metadataHash[:]))) result = append(result, sig.Role) result = append(result, strconv.Itoa(sig.Version)) // Index root.json hash. root, err := key.CanonicalValue() if err != nil { log.Logger.Error(err) } else { rootHash := sha256.Sum256(root) result = append(result, strings.ToLower(hex.EncodeToString(rootHash[:]))) } // TODO: Index individual key IDs return result, nil } func (v *V001Entry) Unmarshal(pe models.ProposedEntry) error { tuf, ok := pe.(*models.TUF) if !ok { return errors.New("cannot unmarshal non tuf v0.0.1 type") } if err := types.DecodeEntry(tuf.Spec, &v.TufObj); err != nil { return err } // field validation if err := v.TufObj.Validate(strfmt.Default); err != nil { return err } // cross field validation return v.Validate() } func (v *V001Entry) fetchExternalEntities(ctx context.Context) (pki.PublicKey, pki.Signature, error) { g, ctx := errgroup.WithContext(ctx) metaR, metaW := io.Pipe() rootR, rootW := io.Pipe() defer metaR.Close() defer rootR.Close() closePipesOnError := types.PipeCloser(metaR, metaW, rootR, rootW) // verify artifact signature sigResult := make(chan pki.Signature) g.Go(func() error { defer close(sigResult) var contentBytes []byte if v.TufObj.Metadata.Content != nil { var err error contentBytes, err = v.parseMetadataContent() if err != nil { return closePipesOnError(err) } } sigReadCloser := bytes.NewReader(contentBytes) signature, err := ptuf.NewSignature(sigReadCloser) if err != nil { return closePipesOnError(&types.InputValidationError{Err: err}) } select { case <-ctx.Done(): return ctx.Err() case sigResult <- signature: return nil } }) keyResult := make(chan pki.PublicKey) g.Go(func() error { defer close(keyResult) var contentBytes []byte if v.TufObj.Root.Content != nil { var err error contentBytes, err = v.parseRootContent() if err != nil { return closePipesOnError(err) } } keyReadCloser := bytes.NewReader(contentBytes) key, err := ptuf.NewPublicKey(keyReadCloser) if err != nil { return closePipesOnError(&types.InputValidationError{Err: err}) } select { case <-ctx.Done(): return ctx.Err() case keyResult <- key: return nil } }) var ( keyObj pki.PublicKey sigObj pki.Signature ) // the sigObj contains the signed content. g.Go(func() error { keyObj, sigObj = <-keyResult, <-sigResult if keyObj == nil || sigObj == nil { return closePipesOnError(errors.New("failed to read signature or public key")) } var err error if err = sigObj.Verify(nil, keyObj); err != nil { return closePipesOnError(&types.InputValidationError{Err: err}) } select { case <-ctx.Done(): return ctx.Err() default: return nil } }) if err := g.Wait(); err != nil { return nil, nil, err } return keyObj, sigObj, nil } func (v *V001Entry) Canonicalize(ctx context.Context) ([]byte, error) { key, sig, err := v.fetchExternalEntities(ctx) if err != nil { return nil, err } canonicalEntry := models.TUFV001Schema{} canonicalEntry.SpecVersion, err = key.(*ptuf.PublicKey).SpecVersion() if err != nil { return nil, err } // need to canonicalize manifest (canonicalize JSON) canonicalEntry.Root = &models.TUFV001SchemaRoot{} canonicalEntry.Root.Content, err = key.CanonicalValue() if err != nil { return nil, err } canonicalEntry.Metadata = &models.TUFV001SchemaMetadata{} canonicalEntry.Metadata.Content, err = sig.CanonicalValue() if err != nil { return nil, err } // wrap in valid object with kind and apiVersion set tuf := models.TUF{} tuf.APIVersion = swag.String(APIVERSION) tuf.Spec = &canonicalEntry return json.Marshal(&tuf) } // Validate performs cross-field validation for fields in object // FIXME: we can probably export ValidateMetablock on in-toto.go func (v V001Entry) Validate() error { root := v.TufObj.Root if root == nil { return errors.New("missing root") } if root.Content == nil { return errors.New("root must be specified") } tufManifest := v.TufObj.Metadata if tufManifest == nil { return errors.New("missing TUF metadata") } if tufManifest.Content == nil { return errors.New("TUF metadata must be specified") } return nil } func (v V001Entry) CreateFromArtifactProperties(ctx context.Context, props types.ArtifactProperties) (models.ProposedEntry, error) { // This will do only syntactic checks of the metablock, not signature verification. // Signature verification occurs in FetchExternalEntries() returnVal := models.TUF{} re := V001Entry{} // we will need the manifest and root var err error artifactBytes := props.ArtifactBytes re.TufObj.Metadata = &models.TUFV001SchemaMetadata{} if artifactBytes == nil { var artifactReader io.ReadCloser if props.ArtifactPath == nil { return nil, errors.New("path to artifact file must be specified") } if props.ArtifactPath.IsAbs() { artifactReader, err = util.FileOrURLReadCloser(ctx, props.ArtifactPath.String(), nil) if err != nil { return nil, fmt.Errorf("error reading RPM file: %w", err) } } else { artifactReader, err = os.Open(filepath.Clean(props.ArtifactPath.Path)) if err != nil { return nil, fmt.Errorf("error opening RPM file: %w", err) } } artifactBytes, err = io.ReadAll(artifactReader) if err != nil { return nil, fmt.Errorf("error reading RPM file: %w", err) } } s := &data.Signed{} if err := json.Unmarshal(artifactBytes, s); err != nil { return nil, err } re.TufObj.Metadata.Content = s rootBytes := props.PublicKeyBytes re.TufObj.Root = &models.TUFV001SchemaRoot{} if len(rootBytes) == 0 { if len(props.PublicKeyPaths) != 1 { return nil, errors.New("only one path to root file must be specified") } keyBytes, err := os.ReadFile(filepath.Clean(props.PublicKeyPaths[0].Path)) if err != nil { return nil, fmt.Errorf("error reading root file: %w", err) } rootBytes = append(rootBytes, keyBytes) } else if len(rootBytes) != 1 { return nil, errors.New("only one root key must be provided") } root := &data.Signed{} if err := json.Unmarshal(rootBytes[0], root); err != nil { return nil, err } re.TufObj.Root.Content = root if err := re.Validate(); err != nil { return nil, err } if _, _, err := re.fetchExternalEntities(ctx); err != nil { return nil, fmt.Errorf("error retrieving external entities: %w", err) } returnVal.APIVersion = swag.String(re.APIVersion()) returnVal.Spec = re.TufObj return &returnVal, nil } func (v V001Entry) Verifiers() ([]pki.PublicKey, error) { if v.TufObj.Root == nil { return nil, errors.New("tuf v0.0.1 entry not initialized") } keyBytes, err := v.parseRootContent() if err != nil { return nil, err } key, err := ptuf.NewPublicKey(bytes.NewReader(keyBytes)) if err != nil { return nil, err } return []pki.PublicKey{key}, nil } func (v V001Entry) ArtifactHash() (string, error) { if v.TufObj.Metadata == nil || v.TufObj.Metadata.Content == nil { return "", errors.New("tuf v0.0.1 entry not initialized") } sigBytes, err := v.parseMetadataContent() if err != nil { return "", err } sig, err := ptuf.NewSignature(bytes.NewReader(sigBytes)) if err != nil { return "", err } metadata, err := sig.CanonicalValue() if err != nil { return "", err } metadataHash := sha256.Sum256(metadata) return strings.ToLower(fmt.Sprintf("sha256:%s", hex.EncodeToString(metadataHash[:]))), nil } func (v V001Entry) Insertable() (bool, error) { if v.TufObj.Metadata == nil { return false, errors.New("missing metadata property") } if v.TufObj.Metadata.Content == nil { return false, errors.New("missing metadata content") } if v.TufObj.Root == nil { return false, errors.New("missing root property") } if v.TufObj.Root.Content == nil { return false, errors.New("missing root content") } return true, nil } func (v V001Entry) parseRootContent() ([]byte, error) { var keyBytes []byte // Root.Content can either be a base64-encoded string or object switch v := v.TufObj.Root.Content.(type) { case string: b, err := base64.StdEncoding.DecodeString(v) if err != nil { return nil, fmt.Errorf("base64 decoding TUF root content: %w", err) } keyBytes = b default: var err error keyBytes, err = json.Marshal(v) if err != nil { return nil, err } } return keyBytes, nil } func (v V001Entry) parseMetadataContent() ([]byte, error) { var sigBytes []byte // Metadata.Content can either be a base64-encoded string or object switch v := v.TufObj.Metadata.Content.(type) { case string: b, err := base64.StdEncoding.DecodeString(v) if err != nil { return nil, fmt.Errorf("base64 decoding TUF metadata content: %w", err) } sigBytes = b default: var err error sigBytes, err = json.Marshal(v) if err != nil { return nil, err } } return sigBytes, nil }