// Copyright 2023 The Sigstore Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bundle
import (
"crypto/x509"
"encoding/base64"
"errors"
"fmt"
"os"
"strings"
"github.com/secure-systems-lab/go-securesystemslib/dsse"
protobundle "github.com/sigstore/protobuf-specs/gen/pb-go/bundle/v1"
protocommon "github.com/sigstore/protobuf-specs/gen/pb-go/common/v1"
protodsse "github.com/sigstore/protobuf-specs/gen/pb-go/dsse"
"golang.org/x/mod/semver"
"google.golang.org/protobuf/encoding/protojson"
"github.com/sigstore/sigstore-go/pkg/tlog"
"github.com/sigstore/sigstore-go/pkg/verify"
)
var ErrValidation = errors.New("validation error")
var ErrUnsupportedMediaType = fmt.Errorf("%w: unsupported media type", ErrValidation)
var ErrEmptyBundle = fmt.Errorf("%w: empty protobuf bundle", ErrValidation)
var ErrMissingVerificationMaterial = fmt.Errorf("%w: missing verification material", ErrValidation)
var ErrMissingBundleContent = fmt.Errorf("%w: missing bundle content", ErrValidation)
var ErrUnimplemented = errors.New("unimplemented")
var ErrInvalidAttestation = fmt.Errorf("%w: invalid attestation", ErrValidation)
var ErrMissingEnvelope = fmt.Errorf("%w: missing valid envelope", ErrInvalidAttestation)
var ErrDecodingJSON = fmt.Errorf("%w: decoding json", ErrInvalidAttestation)
var ErrDecodingB64 = fmt.Errorf("%w: decoding base64", ErrInvalidAttestation)
const mediaTypeBase = "application/vnd.dev.sigstore.bundle"
func ErrValidationError(err error) error {
return fmt.Errorf("%w: %w", ErrValidation, err)
}
type Bundle struct {
*protobundle.Bundle
hasInclusionPromise bool
hasInclusionProof bool
}
func NewBundle(pbundle *protobundle.Bundle) (*Bundle, error) {
bundle := &Bundle{
Bundle: pbundle,
hasInclusionPromise: false,
hasInclusionProof: false,
}
err := bundle.validate()
if err != nil {
return nil, err
}
return bundle, nil
}
// Deprecated: use Bundle instead
type ProtobufBundle = Bundle
// Deprecated: use NewBundle instead
func NewProtobufBundle(b *protobundle.Bundle) (*ProtobufBundle, error) {
return NewBundle(b)
}
func (b *Bundle) validate() error {
bundleVersion, err := b.Version()
if err != nil {
return fmt.Errorf("error getting bundle version: %w", err)
}
// if bundle version is < 0.1, return error
if semver.Compare(bundleVersion, "v0.1") < 0 {
return fmt.Errorf("%w: bundle version %s is not supported", ErrUnsupportedMediaType, bundleVersion)
}
// fetch tlog entries, as next check needs to check them for inclusion proof/promise
entries, err := b.TlogEntries()
if err != nil {
return err
}
// if bundle version == v0.1, require inclusion promise
if semver.Compare(bundleVersion, "v0.1") == 0 {
if len(entries) > 0 && !b.hasInclusionPromise {
return errors.New("inclusion promises missing in bundle (required for bundle v0.1)")
}
} else {
// if bundle version >= v0.2, require inclusion proof
if len(entries) > 0 && !b.hasInclusionProof {
return errors.New("inclusion proof missing in bundle (required for bundle v0.2)")
}
}
// if bundle version >= v0.3, require verification material to not be X.509 certificate chain (only single certificate is allowed)
if semver.Compare(bundleVersion, "v0.3") >= 0 {
certs := b.VerificationMaterial.GetX509CertificateChain()
if certs != nil {
return errors.New("verification material cannot be X.509 certificate chain (for bundle v0.3)")
}
}
// if bundle version is >= v0.4, return error as this version is not supported
if semver.Compare(bundleVersion, "v0.4") >= 0 {
return fmt.Errorf("%w: bundle version %s is not yet supported", ErrUnsupportedMediaType, bundleVersion)
}
err = validateBundle(b.Bundle)
if err != nil {
return fmt.Errorf("invalid bundle: %w", err)
}
return nil
}
// MediaTypeString returns a mediatype string for the specified bundle version.
// The function returns an error if the resulting string does validate.
func MediaTypeString(version string) (string, error) {
if version == "" {
return "", fmt.Errorf("unable to build media type string, no version defined")
}
var mtString string
version = strings.TrimPrefix(version, "v")
mtString = fmt.Sprintf("%s.v%s+json", mediaTypeBase, strings.TrimPrefix(version, "v"))
if version == "0.1" || version == "0.2" {
mtString = fmt.Sprintf("%s+json;version=%s", mediaTypeBase, strings.TrimPrefix(version, "v"))
}
if _, err := getBundleVersion(mtString); err != nil {
return "", fmt.Errorf("unable to build mediatype: %w", err)
}
return mtString, nil
}
func (b *Bundle) Version() (string, error) {
return getBundleVersion(b.MediaType)
}
func getBundleVersion(mediaType string) (string, error) {
switch mediaType {
case mediaTypeBase + "+json;version=0.1":
return "v0.1", nil
case mediaTypeBase + "+json;version=0.2":
return "v0.2", nil
case mediaTypeBase + "+json;version=0.3":
return "v0.3", nil
}
if strings.HasPrefix(mediaType, mediaTypeBase+".v") && strings.HasSuffix(mediaType, "+json") {
version := strings.TrimPrefix(mediaType, mediaTypeBase+".")
version = strings.TrimSuffix(version, "+json")
if semver.IsValid(version) {
return version, nil
}
return "", fmt.Errorf("%w: invalid bundle version: %s", ErrUnsupportedMediaType, version)
}
return "", fmt.Errorf("%w: %s", ErrUnsupportedMediaType, mediaType)
}
func validateBundle(b *protobundle.Bundle) error {
if b == nil {
return ErrEmptyBundle
}
if b.Content == nil {
return ErrMissingBundleContent
}
switch b.Content.(type) {
case *protobundle.Bundle_DsseEnvelope, *protobundle.Bundle_MessageSignature:
default:
return fmt.Errorf("invalid bundle content: bundle content must be either a message signature or dsse envelope")
}
if b.VerificationMaterial == nil || b.VerificationMaterial.Content == nil {
return ErrMissingVerificationMaterial
}
switch b.VerificationMaterial.Content.(type) {
case *protobundle.VerificationMaterial_PublicKey, *protobundle.VerificationMaterial_Certificate, *protobundle.VerificationMaterial_X509CertificateChain:
default:
return fmt.Errorf("invalid verification material content: verification material must be one of public key, x509 certificate and x509 certificate chain")
}
return nil
}
func LoadJSONFromPath(path string) (*Bundle, error) {
var bundle Bundle
bundle.Bundle = new(protobundle.Bundle)
contents, err := os.ReadFile(path)
if err != nil {
return nil, err
}
err = bundle.UnmarshalJSON(contents)
if err != nil {
return nil, err
}
return &bundle, nil
}
func (b *Bundle) MarshalJSON() ([]byte, error) {
return protojson.Marshal(b.Bundle)
}
func (b *Bundle) UnmarshalJSON(data []byte) error {
b.Bundle = new(protobundle.Bundle)
err := protojson.Unmarshal(data, b.Bundle)
if err != nil {
return err
}
err = b.validate()
if err != nil {
return err
}
return nil
}
func (b *Bundle) VerificationContent() (verify.VerificationContent, error) {
if b.VerificationMaterial == nil {
return nil, ErrMissingVerificationMaterial
}
switch content := b.VerificationMaterial.GetContent().(type) {
case *protobundle.VerificationMaterial_X509CertificateChain:
if content.X509CertificateChain == nil {
return nil, ErrMissingVerificationMaterial
}
certs := content.X509CertificateChain.GetCertificates()
if len(certs) == 0 || certs[0].RawBytes == nil {
return nil, ErrMissingVerificationMaterial
}
parsedCert, err := x509.ParseCertificate(certs[0].RawBytes)
if err != nil {
return nil, ErrValidationError(err)
}
cert := &Certificate{
certificate: parsedCert,
}
return cert, nil
case *protobundle.VerificationMaterial_Certificate:
if content.Certificate == nil || content.Certificate.RawBytes == nil {
return nil, ErrMissingVerificationMaterial
}
parsedCert, err := x509.ParseCertificate(content.Certificate.RawBytes)
if err != nil {
return nil, ErrValidationError(err)
}
cert := &Certificate{
certificate: parsedCert,
}
return cert, nil
case *protobundle.VerificationMaterial_PublicKey:
if content.PublicKey == nil {
return nil, ErrMissingVerificationMaterial
}
pk := &PublicKey{
hint: content.PublicKey.Hint,
}
return pk, nil
default:
return nil, ErrMissingVerificationMaterial
}
}
func (b *Bundle) HasInclusionPromise() bool {
return b.hasInclusionPromise
}
func (b *Bundle) HasInclusionProof() bool {
return b.hasInclusionProof
}
func (b *Bundle) TlogEntries() ([]*tlog.Entry, error) {
if b.VerificationMaterial == nil {
return nil, nil
}
tlogEntries := make([]*tlog.Entry, len(b.VerificationMaterial.TlogEntries))
var err error
for i, entry := range b.VerificationMaterial.TlogEntries {
tlogEntries[i], err = tlog.ParseTransparencyLogEntry(entry)
if err != nil {
return nil, ErrValidationError(err)
}
if tlogEntries[i].HasInclusionPromise() {
b.hasInclusionPromise = true
}
if tlogEntries[i].HasInclusionProof() {
b.hasInclusionProof = true
}
}
return tlogEntries, nil
}
func (b *Bundle) SignatureContent() (verify.SignatureContent, error) {
switch content := b.Content.(type) { //nolint:gocritic
case *protobundle.Bundle_DsseEnvelope:
envelope, err := parseEnvelope(content.DsseEnvelope)
if err != nil {
return nil, err
}
return envelope, nil
case *protobundle.Bundle_MessageSignature:
if content.MessageSignature == nil || content.MessageSignature.MessageDigest == nil {
return nil, ErrMissingVerificationMaterial
}
return NewMessageSignature(
content.MessageSignature.MessageDigest.Digest,
protocommon.HashAlgorithm_name[int32(content.MessageSignature.MessageDigest.Algorithm)],
content.MessageSignature.Signature,
), nil
}
return nil, ErrMissingVerificationMaterial
}
func (b *Bundle) Envelope() (*Envelope, error) {
switch content := b.Content.(type) { //nolint:gocritic
case *protobundle.Bundle_DsseEnvelope:
envelope, err := parseEnvelope(content.DsseEnvelope)
if err != nil {
return nil, err
}
return envelope, nil
}
return nil, ErrMissingVerificationMaterial
}
func (b *Bundle) Timestamps() ([][]byte, error) {
if b.VerificationMaterial == nil {
return nil, ErrMissingVerificationMaterial
}
signedTimestamps := make([][]byte, 0)
if b.VerificationMaterial.TimestampVerificationData == nil {
return signedTimestamps, nil
}
for _, timestamp := range b.VerificationMaterial.TimestampVerificationData.Rfc3161Timestamps {
signedTimestamps = append(signedTimestamps, timestamp.SignedTimestamp)
}
return signedTimestamps, nil
}
// MinVersion returns true if the bundle version is greater than or equal to the expected version.
func (b *Bundle) MinVersion(expectVersion string) bool {
version, err := b.Version()
if err != nil {
return false
}
if !strings.HasPrefix(expectVersion, "v") {
expectVersion = "v" + expectVersion
}
return semver.Compare(version, expectVersion) >= 0
}
func parseEnvelope(input *protodsse.Envelope) (*Envelope, error) {
if input == nil {
return nil, ErrMissingEnvelope
}
output := &dsse.Envelope{}
payload := input.GetPayload()
if payload == nil {
return nil, ErrMissingEnvelope
}
output.Payload = base64.StdEncoding.EncodeToString([]byte(payload))
output.PayloadType = string(input.GetPayloadType())
output.Signatures = make([]dsse.Signature, len(input.GetSignatures()))
for i, sig := range input.GetSignatures() {
if sig == nil {
return nil, ErrMissingEnvelope
}
output.Signatures[i].KeyID = sig.GetKeyid()
output.Signatures[i].Sig = base64.StdEncoding.EncodeToString(sig.GetSig())
}
return &Envelope{Envelope: output}, nil
}
// Copyright 2023 The Sigstore Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bundle
import (
"encoding/base64"
in_toto "github.com/in-toto/attestation/go/v1"
"github.com/secure-systems-lab/go-securesystemslib/dsse"
"github.com/sigstore/sigstore-go/pkg/verify"
"google.golang.org/protobuf/encoding/protojson"
)
const IntotoMediaType = "application/vnd.in-toto+json"
type MessageSignature struct {
digest []byte
digestAlgorithm string
signature []byte
}
func (m *MessageSignature) Digest() []byte {
return m.digest
}
func (m *MessageSignature) DigestAlgorithm() string {
return m.digestAlgorithm
}
func NewMessageSignature(digest []byte, digestAlgorithm string, signature []byte) *MessageSignature {
return &MessageSignature{
digest: digest,
digestAlgorithm: digestAlgorithm,
signature: signature,
}
}
type Envelope struct {
*dsse.Envelope
}
func (e *Envelope) Statement() (*in_toto.Statement, error) {
if e.PayloadType != IntotoMediaType {
return nil, ErrUnsupportedMediaType
}
var statement in_toto.Statement
raw, err := e.DecodeB64Payload()
if err != nil {
return nil, ErrDecodingB64
}
err = protojson.Unmarshal(raw, &statement)
if err != nil {
return nil, ErrDecodingJSON
}
return &statement, nil
}
func (e *Envelope) EnvelopeContent() verify.EnvelopeContent {
return e
}
func (e *Envelope) RawEnvelope() *dsse.Envelope {
return e.Envelope
}
func (m *MessageSignature) EnvelopeContent() verify.EnvelopeContent {
return nil
}
func (e *Envelope) MessageSignatureContent() verify.MessageSignatureContent {
return nil
}
func (m *MessageSignature) MessageSignatureContent() verify.MessageSignatureContent {
return m
}
func (m *MessageSignature) Signature() []byte {
return m.signature
}
func (e *Envelope) Signature() []byte {
if len(e.Signatures) == 0 {
return []byte{}
}
sigBytes, err := base64.StdEncoding.DecodeString(e.Signatures[0].Sig)
if err != nil {
return []byte{}
}
return sigBytes
}
// Copyright 2023 The Sigstore Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bundle
import (
"crypto"
"crypto/x509"
"time"
"github.com/sigstore/sigstore-go/pkg/root"
"github.com/sigstore/sigstore-go/pkg/verify"
)
type Certificate struct {
certificate *x509.Certificate
}
func NewCertificate(cert *x509.Certificate) *Certificate {
return &Certificate{certificate: cert}
}
type PublicKey struct {
hint string
}
func (pk PublicKey) Hint() string {
return pk.hint
}
func (c *Certificate) CompareKey(key any, _ root.TrustedMaterial) bool {
x509Key, ok := key.(*x509.Certificate)
if !ok {
return false
}
return c.certificate.Equal(x509Key)
}
func (c *Certificate) ValidAtTime(t time.Time, _ root.TrustedMaterial) bool {
return !c.certificate.NotAfter.Before(t) && !c.certificate.NotBefore.After(t)
}
func (c *Certificate) Certificate() *x509.Certificate {
return c.certificate
}
func (c *Certificate) PublicKey() verify.PublicKeyProvider {
return nil
}
func (pk *PublicKey) CompareKey(key any, tm root.TrustedMaterial) bool {
verifier, err := tm.PublicKeyVerifier(pk.hint)
if err != nil {
return false
}
pubKey, err := verifier.PublicKey()
if err != nil {
return false
}
if equaler, ok := key.(interface{ Equal(x crypto.PublicKey) bool }); ok {
return equaler.Equal(pubKey)
}
return false
}
func (pk *PublicKey) ValidAtTime(t time.Time, tm root.TrustedMaterial) bool {
verifier, err := tm.PublicKeyVerifier(pk.hint)
if err != nil {
return false
}
return verifier.ValidAtTime(t)
}
func (pk *PublicKey) Certificate() *x509.Certificate {
return nil
}
func (pk *PublicKey) PublicKey() verify.PublicKeyProvider {
return pk
}
// Copyright 2023 The Sigstore Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// This file is a verbatim copy of https://github.com/sigstore/fulcio/blob/3707d80bb25330bc7ffbd9702fb401cd643e36fa/pkg/certificate/extensions.go ,
// EXCEPT:
// - the parseExtensions func has been renamed ParseExtensions
package certificate
import (
"crypto/x509/pkix"
"encoding/asn1"
"errors"
"fmt"
)
var (
// Deprecated: Use OIDIssuerV2
OIDIssuer = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 1}
// Deprecated: Use OIDBuildTrigger
OIDGitHubWorkflowTrigger = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 2}
// Deprecated: Use OIDSourceRepositoryDigest
OIDGitHubWorkflowSHA = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 3}
// Deprecated: Use OIDBuildConfigURI or OIDBuildConfigDigest
OIDGitHubWorkflowName = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 4}
// Deprecated: Use SourceRepositoryURI
OIDGitHubWorkflowRepository = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 5}
// Deprecated: Use OIDSourceRepositoryRef
OIDGitHubWorkflowRef = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 6}
OIDOtherName = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 7}
OIDIssuerV2 = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 8}
// CI extensions
OIDBuildSignerURI = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 9}
OIDBuildSignerDigest = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 10}
OIDRunnerEnvironment = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 11}
OIDSourceRepositoryURI = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 12}
OIDSourceRepositoryDigest = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 13}
OIDSourceRepositoryRef = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 14}
OIDSourceRepositoryIdentifier = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 15}
OIDSourceRepositoryOwnerURI = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 16}
OIDSourceRepositoryOwnerIdentifier = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 17}
OIDBuildConfigURI = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 18}
OIDBuildConfigDigest = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 19}
OIDBuildTrigger = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 20}
OIDRunInvocationURI = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 21}
OIDSourceRepositoryVisibilityAtSigning = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 22}
)
// Extensions contains all custom x509 extensions defined by Fulcio
type Extensions struct {
// NB: New extensions must be added here and documented
// at docs/oidc-info.md
// The OIDC issuer. Should match `iss` claim of ID token or, in the case of
// a federated login like Dex it should match the issuer URL of the
// upstream issuer. The issuer is not set the extensions are invalid and
// will fail to render.
Issuer string `json:"issuer,omitempty"` // OID 1.3.6.1.4.1.57264.1.8 and 1.3.6.1.4.1.57264.1.1 (Deprecated)
// Deprecated
// Triggering event of the Github Workflow. Matches the `event_name` claim of ID
// tokens from Github Actions
GithubWorkflowTrigger string `json:"githubWorkflowTrigger,omitempty"` // OID 1.3.6.1.4.1.57264.1.2
// Deprecated
// SHA of git commit being built in Github Actions. Matches the `sha` claim of ID
// tokens from Github Actions
GithubWorkflowSHA string `json:"githubWorkflowSHA,omitempty"` //nolint:tagliatelle // OID 1.3.6.1.4.1.57264.1.3
// Deprecated
// Name of Github Actions Workflow. Matches the `workflow` claim of the ID
// tokens from Github Actions
GithubWorkflowName string `json:"githubWorkflowName,omitempty"` // OID 1.3.6.1.4.1.57264.1.4
// Deprecated
// Repository of the Github Actions Workflow. Matches the `repository` claim of the ID
// tokens from Github Actions
GithubWorkflowRepository string `json:"githubWorkflowRepository,omitempty"` // OID 1.3.6.1.4.1.57264.1.5
// Deprecated
// Git Ref of the Github Actions Workflow. Matches the `ref` claim of the ID tokens
// from Github Actions
GithubWorkflowRef string `json:"githubWorkflowRef,omitempty"` // 1.3.6.1.4.1.57264.1.6
// Reference to specific build instructions that are responsible for signing.
BuildSignerURI string `json:"buildSignerURI,omitempty"` //nolint:tagliatelle // 1.3.6.1.4.1.57264.1.9
// Immutable reference to the specific version of the build instructions that is responsible for signing.
BuildSignerDigest string `json:"buildSignerDigest,omitempty"` // 1.3.6.1.4.1.57264.1.10
// Specifies whether the build took place in platform-hosted cloud infrastructure or customer/self-hosted infrastructure.
RunnerEnvironment string `json:"runnerEnvironment,omitempty"` // 1.3.6.1.4.1.57264.1.11
// Source repository URL that the build was based on.
SourceRepositoryURI string `json:"sourceRepositoryURI,omitempty"` //nolint:tagliatelle // 1.3.6.1.4.1.57264.1.12
// Immutable reference to a specific version of the source code that the build was based upon.
SourceRepositoryDigest string `json:"sourceRepositoryDigest,omitempty"` // 1.3.6.1.4.1.57264.1.13
// Source Repository Ref that the build run was based upon.
SourceRepositoryRef string `json:"sourceRepositoryRef,omitempty"` // 1.3.6.1.4.1.57264.1.14
// Immutable identifier for the source repository the workflow was based upon.
SourceRepositoryIdentifier string `json:"sourceRepositoryIdentifier,omitempty"` // 1.3.6.1.4.1.57264.1.15
// Source repository owner URL of the owner of the source repository that the build was based on.
SourceRepositoryOwnerURI string `json:"sourceRepositoryOwnerURI,omitempty"` //nolint:tagliatelle // 1.3.6.1.4.1.57264.1.16
// Immutable identifier for the owner of the source repository that the workflow was based upon.
SourceRepositoryOwnerIdentifier string `json:"sourceRepositoryOwnerIdentifier,omitempty"` // 1.3.6.1.4.1.57264.1.17
// Build Config URL to the top-level/initiating build instructions.
BuildConfigURI string `json:"buildConfigURI,omitempty"` //nolint:tagliatelle // 1.3.6.1.4.1.57264.1.18
// Immutable reference to the specific version of the top-level/initiating build instructions.
BuildConfigDigest string `json:"buildConfigDigest,omitempty"` // 1.3.6.1.4.1.57264.1.19
// Event or action that initiated the build.
BuildTrigger string `json:"buildTrigger,omitempty"` // 1.3.6.1.4.1.57264.1.20
// Run Invocation URL to uniquely identify the build execution.
RunInvocationURI string `json:"runInvocationURI,omitempty"` //nolint:tagliatelle // 1.3.6.1.4.1.57264.1.21
// Source repository visibility at the time of signing the certificate.
SourceRepositoryVisibilityAtSigning string `json:"sourceRepositoryVisibilityAtSigning,omitempty"` // 1.3.6.1.4.1.57264.1.22
}
func ParseExtensions(ext []pkix.Extension) (Extensions, error) {
out := Extensions{}
for _, e := range ext {
switch {
// BEGIN: Deprecated
case e.Id.Equal(OIDIssuer):
out.Issuer = string(e.Value)
case e.Id.Equal(OIDGitHubWorkflowTrigger):
out.GithubWorkflowTrigger = string(e.Value)
case e.Id.Equal(OIDGitHubWorkflowSHA):
out.GithubWorkflowSHA = string(e.Value)
case e.Id.Equal(OIDGitHubWorkflowName):
out.GithubWorkflowName = string(e.Value)
case e.Id.Equal(OIDGitHubWorkflowRepository):
out.GithubWorkflowRepository = string(e.Value)
case e.Id.Equal(OIDGitHubWorkflowRef):
out.GithubWorkflowRef = string(e.Value)
// END: Deprecated
case e.Id.Equal(OIDIssuerV2):
if err := ParseDERString(e.Value, &out.Issuer); err != nil {
return Extensions{}, err
}
case e.Id.Equal(OIDBuildSignerURI):
if err := ParseDERString(e.Value, &out.BuildSignerURI); err != nil {
return Extensions{}, err
}
case e.Id.Equal(OIDBuildSignerDigest):
if err := ParseDERString(e.Value, &out.BuildSignerDigest); err != nil {
return Extensions{}, err
}
case e.Id.Equal(OIDRunnerEnvironment):
if err := ParseDERString(e.Value, &out.RunnerEnvironment); err != nil {
return Extensions{}, err
}
case e.Id.Equal(OIDSourceRepositoryURI):
if err := ParseDERString(e.Value, &out.SourceRepositoryURI); err != nil {
return Extensions{}, err
}
case e.Id.Equal(OIDSourceRepositoryDigest):
if err := ParseDERString(e.Value, &out.SourceRepositoryDigest); err != nil {
return Extensions{}, err
}
case e.Id.Equal(OIDSourceRepositoryRef):
if err := ParseDERString(e.Value, &out.SourceRepositoryRef); err != nil {
return Extensions{}, err
}
case e.Id.Equal(OIDSourceRepositoryIdentifier):
if err := ParseDERString(e.Value, &out.SourceRepositoryIdentifier); err != nil {
return Extensions{}, err
}
case e.Id.Equal(OIDSourceRepositoryOwnerURI):
if err := ParseDERString(e.Value, &out.SourceRepositoryOwnerURI); err != nil {
return Extensions{}, err
}
case e.Id.Equal(OIDSourceRepositoryOwnerIdentifier):
if err := ParseDERString(e.Value, &out.SourceRepositoryOwnerIdentifier); err != nil {
return Extensions{}, err
}
case e.Id.Equal(OIDBuildConfigURI):
if err := ParseDERString(e.Value, &out.BuildConfigURI); err != nil {
return Extensions{}, err
}
case e.Id.Equal(OIDBuildConfigDigest):
if err := ParseDERString(e.Value, &out.BuildConfigDigest); err != nil {
return Extensions{}, err
}
case e.Id.Equal(OIDBuildTrigger):
if err := ParseDERString(e.Value, &out.BuildTrigger); err != nil {
return Extensions{}, err
}
case e.Id.Equal(OIDRunInvocationURI):
if err := ParseDERString(e.Value, &out.RunInvocationURI); err != nil {
return Extensions{}, err
}
case e.Id.Equal(OIDSourceRepositoryVisibilityAtSigning):
if err := ParseDERString(e.Value, &out.SourceRepositoryVisibilityAtSigning); err != nil {
return Extensions{}, err
}
}
}
// We only ever return nil, but leaving error in place so that we can add
// more complex parsing of fields in a backwards compatible way if needed.
return out, nil
}
// ParseDERString decodes a DER-encoded string and puts the value in parsedVal.
// Returns an error if the unmarshalling fails or if there are trailing bytes in the encoding.
func ParseDERString(val []byte, parsedVal *string) error {
rest, err := asn1.Unmarshal(val, parsedVal)
if err != nil {
return fmt.Errorf("unexpected error unmarshalling DER-encoded string: %w", err)
}
if len(rest) != 0 {
return errors.New("unexpected trailing bytes in DER-encoded string")
}
return nil
}
// Copyright 2023 The Sigstore Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package certificate
import (
"crypto/x509"
"errors"
"fmt"
"reflect"
"github.com/sigstore/sigstore/pkg/cryptoutils"
)
type Summary struct {
CertificateIssuer string `json:"certificateIssuer"`
SubjectAlternativeName string `json:"subjectAlternativeName"`
Extensions
}
type ErrCompareExtensions struct {
field string
expected string
actual string
}
func (e *ErrCompareExtensions) Error() string {
return fmt.Sprintf("expected %s to be \"%s\", got \"%s\"", e.field, e.expected, e.actual)
}
func SummarizeCertificate(cert *x509.Certificate) (Summary, error) {
extensions, err := ParseExtensions(cert.Extensions)
if err != nil {
return Summary{}, err
}
var san string
switch {
case len(cert.URIs) > 0:
san = cert.URIs[0].String()
case len(cert.EmailAddresses) > 0:
san = cert.EmailAddresses[0]
}
if san == "" {
san, _ = cryptoutils.UnmarshalOtherNameSAN(cert.Extensions)
}
if san == "" {
return Summary{}, errors.New("no Subject Alternative Name found")
}
return Summary{CertificateIssuer: cert.Issuer.String(), SubjectAlternativeName: san, Extensions: extensions}, nil
}
// CompareExtensions compares two Extensions structs and returns an error if
// any set values in the expected struct not equal. Empty fields in the
// expectedExt struct are ignored.
func CompareExtensions(expectedExt, actualExt Extensions) error {
expExtValue := reflect.ValueOf(expectedExt)
actExtValue := reflect.ValueOf(actualExt)
fields := reflect.VisibleFields(expExtValue.Type())
for _, field := range fields {
expectedFieldVal := expExtValue.FieldByName(field.Name)
// if the expected field is empty, skip it
if expectedFieldVal.IsValid() && !expectedFieldVal.IsZero() {
actualFieldVal := actExtValue.FieldByName(field.Name)
if actualFieldVal.IsValid() {
if expectedFieldVal.Interface() != actualFieldVal.Interface() {
return &ErrCompareExtensions{field.Name, fmt.Sprintf("%v", expectedFieldVal.Interface()), fmt.Sprintf("%v", actualFieldVal.Interface())}
}
}
}
}
return nil
}
// Copyright 2024 The Sigstore Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package root
import (
"crypto/x509"
"errors"
"time"
)
type CertificateAuthority interface {
Verify(cert *x509.Certificate, observerTimestamp time.Time) ([][]*x509.Certificate, error)
}
type FulcioCertificateAuthority struct {
Root *x509.Certificate
Intermediates []*x509.Certificate
ValidityPeriodStart time.Time
ValidityPeriodEnd time.Time
URI string
}
var _ CertificateAuthority = &FulcioCertificateAuthority{}
func (ca *FulcioCertificateAuthority) Verify(cert *x509.Certificate, observerTimestamp time.Time) ([][]*x509.Certificate, error) {
if !ca.ValidityPeriodStart.IsZero() && observerTimestamp.Before(ca.ValidityPeriodStart) {
return nil, errors.New("certificate is not valid yet")
}
if !ca.ValidityPeriodEnd.IsZero() && observerTimestamp.After(ca.ValidityPeriodEnd) {
return nil, errors.New("certificate is no longer valid")
}
rootCertPool := x509.NewCertPool()
rootCertPool.AddCert(ca.Root)
intermediateCertPool := x509.NewCertPool()
for _, cert := range ca.Intermediates {
intermediateCertPool.AddCert(cert)
}
// From spec:
// > ## Certificate
// > For a signature with a given certificate to be considered valid, it must have a timestamp while every certificate in the chain up to the root is valid (the so-called “hybrid model” of certificate verification per Braun et al. (2013)).
opts := x509.VerifyOptions{
CurrentTime: observerTimestamp,
Roots: rootCertPool,
Intermediates: intermediateCertPool,
KeyUsages: []x509.ExtKeyUsage{
x509.ExtKeyUsageCodeSigning,
},
}
return cert.Verify(opts)
}
// Copyright 2024 The Sigstore Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package root
import (
"fmt"
"math/rand"
"os"
"slices"
"time"
v1 "github.com/sigstore/protobuf-specs/gen/pb-go/common/v1"
prototrustroot "github.com/sigstore/protobuf-specs/gen/pb-go/trustroot/v1"
"github.com/sigstore/sigstore-go/pkg/tuf"
"google.golang.org/protobuf/encoding/protojson"
"google.golang.org/protobuf/types/known/timestamppb"
)
const SigningConfigMediaType02 = "application/vnd.dev.sigstore.signingconfig.v0.2+json"
type SigningConfig struct {
signingConfig *prototrustroot.SigningConfig
}
type Service struct {
URL string
MajorAPIVersion uint32
ValidityPeriodStart time.Time
ValidityPeriodEnd time.Time
Operator string
}
type ServiceConfiguration struct {
Selector prototrustroot.ServiceSelector
Count uint32
}
func NewService(s *prototrustroot.Service) Service {
validFor := s.GetValidFor()
var start time.Time
if validFor.GetStart() != nil {
start = validFor.GetStart().AsTime()
}
var end time.Time
if validFor.GetEnd() != nil {
end = validFor.GetEnd().AsTime()
}
return Service{
URL: s.GetUrl(),
MajorAPIVersion: s.GetMajorApiVersion(),
ValidityPeriodStart: start,
ValidityPeriodEnd: end,
Operator: s.GetOperator(),
}
}
// SelectService returns which service endpoint should be used based on supported API versions
// and current time. It will select the first service with the highest API version that matches
// the criteria. Services should be sorted from newest to oldest validity period start time, to
// minimize how far clients need to search to find a matching service.
func SelectService(services []Service, supportedAPIVersions []uint32, currentTime time.Time) (Service, error) {
if len(supportedAPIVersions) == 0 {
return Service{}, fmt.Errorf("no supported API versions")
}
// Order supported versions from highest to lowest
sortedVersions := make([]uint32, len(supportedAPIVersions))
copy(sortedVersions, supportedAPIVersions)
slices.Sort(sortedVersions)
slices.Reverse(sortedVersions)
// Order services from newest to oldest
sortedServices := make([]Service, len(services))
copy(sortedServices, services)
slices.SortFunc(sortedServices, func(i, j Service) int {
return i.ValidityPeriodStart.Compare(j.ValidityPeriodStart)
})
slices.Reverse(sortedServices)
for _, version := range sortedVersions {
for _, s := range sortedServices {
if version == s.MajorAPIVersion && s.ValidAtTime(currentTime) {
return s, nil
}
}
}
return Service{}, fmt.Errorf("no matching service found for API versions %v and current time %v", supportedAPIVersions, currentTime)
}
// SelectServices returns which service endpoints should be used based on supported API versions
// and current time. It will use the configuration's selector to pick a set of services.
// ALL will return all service endpoints, ANY will return a random endpoint, and
// EXACT will return a random selection of a specified number of endpoints.
// It will select services from the highest supported API versions and will not select
// services from different API versions. It will select distinct service operators, selecting
// at most one service per operator.
func SelectServices(services []Service, config ServiceConfiguration, supportedAPIVersions []uint32, currentTime time.Time) ([]Service, error) {
if len(supportedAPIVersions) == 0 {
return nil, fmt.Errorf("no supported API versions")
}
// Order supported versions from highest to lowest
sortedVersions := make([]uint32, len(supportedAPIVersions))
copy(sortedVersions, supportedAPIVersions)
slices.Sort(sortedVersions)
slices.Reverse(sortedVersions)
// Order services from newest to oldest
sortedServices := make([]Service, len(services))
copy(sortedServices, services)
slices.SortFunc(sortedServices, func(i, j Service) int {
return i.ValidityPeriodStart.Compare(j.ValidityPeriodStart)
})
slices.Reverse(sortedServices)
operators := make(map[string]bool)
var selectedServices []Service
for _, version := range sortedVersions {
for _, s := range sortedServices {
if version == s.MajorAPIVersion && s.ValidAtTime(currentTime) {
// Select the newest service for a given operator
if !operators[s.Operator] {
operators[s.Operator] = true
selectedServices = append(selectedServices, s)
}
}
}
// Exit once a list of services is found
if len(selectedServices) != 0 {
break
}
}
if len(selectedServices) == 0 {
return nil, fmt.Errorf("no matching services found for API versions %v and current time %v", supportedAPIVersions, currentTime)
}
// Select services from the highest supported API version
switch config.Selector {
case prototrustroot.ServiceSelector_ALL:
return selectedServices, nil
case prototrustroot.ServiceSelector_ANY:
i := rand.Intn(len(selectedServices)) // #nosec G404
return []Service{selectedServices[i]}, nil
case prototrustroot.ServiceSelector_EXACT:
matchedUrls, err := selectExact(selectedServices, config.Count)
if err != nil {
return nil, err
}
return matchedUrls, nil
default:
return nil, fmt.Errorf("invalid service selector")
}
}
func selectExact[T any](slice []T, count uint32) ([]T, error) {
if count == 0 {
return nil, fmt.Errorf("service selector count must be greater than 0")
}
if int(count) > len(slice) {
return nil, fmt.Errorf("service selector count %d must be less than or equal to the slice length %d", count, len(slice))
}
sliceCopy := make([]T, len(slice))
copy(sliceCopy, slice)
var result []T
for range count {
i := rand.Intn(len(sliceCopy)) // #nosec G404
result = append(result, sliceCopy[i])
// Remove element from slice
sliceCopy[i], sliceCopy[len(sliceCopy)-1] = sliceCopy[len(sliceCopy)-1], sliceCopy[i]
sliceCopy = sliceCopy[:len(sliceCopy)-1]
}
return result, nil
}
func mapFunc[T, V any](ts []T, fn func(T) V) []V {
result := make([]V, len(ts))
for i, t := range ts {
result[i] = fn(t)
}
return result
}
func (s Service) ValidAtTime(t time.Time) bool {
if !s.ValidityPeriodStart.IsZero() && t.Before(s.ValidityPeriodStart) {
return false
}
if !s.ValidityPeriodEnd.IsZero() && t.After(s.ValidityPeriodEnd) {
return false
}
return true
}
func (s Service) ToServiceProtobuf() *prototrustroot.Service {
tr := &v1.TimeRange{
Start: timestamppb.New(s.ValidityPeriodStart),
}
if !s.ValidityPeriodEnd.IsZero() {
tr.End = timestamppb.New(s.ValidityPeriodEnd)
}
return &prototrustroot.Service{
Url: s.URL,
MajorApiVersion: s.MajorAPIVersion,
ValidFor: tr,
Operator: s.Operator,
}
}
func (sc ServiceConfiguration) ToConfigProtobuf() *prototrustroot.ServiceConfiguration {
return &prototrustroot.ServiceConfiguration{
Selector: sc.Selector,
Count: sc.Count,
}
}
func (sc *SigningConfig) FulcioCertificateAuthorityURLs() []Service {
var services []Service
for _, s := range sc.signingConfig.GetCaUrls() {
services = append(services, NewService(s))
}
return services
}
func (sc *SigningConfig) OIDCProviderURLs() []Service {
var services []Service
for _, s := range sc.signingConfig.GetOidcUrls() {
services = append(services, NewService(s))
}
return services
}
func (sc *SigningConfig) RekorLogURLs() []Service {
var services []Service
for _, s := range sc.signingConfig.GetRekorTlogUrls() {
services = append(services, NewService(s))
}
return services
}
func (sc *SigningConfig) RekorLogURLsConfig() ServiceConfiguration {
c := sc.signingConfig.GetRekorTlogConfig()
return ServiceConfiguration{
Selector: c.Selector,
Count: c.Count,
}
}
func (sc *SigningConfig) TimestampAuthorityURLs() []Service {
var services []Service
for _, s := range sc.signingConfig.GetTsaUrls() {
services = append(services, NewService(s))
}
return services
}
func (sc *SigningConfig) TimestampAuthorityURLsConfig() ServiceConfiguration {
c := sc.signingConfig.GetTsaConfig()
return ServiceConfiguration{
Selector: c.Selector,
Count: c.Count,
}
}
func (sc *SigningConfig) WithFulcioCertificateAuthorityURLs(fulcioURLs ...Service) *SigningConfig {
var services []*prototrustroot.Service
for _, u := range fulcioURLs {
services = append(services, u.ToServiceProtobuf())
}
sc.signingConfig.CaUrls = services
return sc
}
func (sc *SigningConfig) AddFulcioCertificateAuthorityURLs(fulcioURLs ...Service) *SigningConfig {
for _, u := range fulcioURLs {
sc.signingConfig.CaUrls = append(sc.signingConfig.CaUrls, u.ToServiceProtobuf())
}
return sc
}
func (sc *SigningConfig) WithOIDCProviderURLs(oidcURLs ...Service) *SigningConfig {
var services []*prototrustroot.Service
for _, u := range oidcURLs {
services = append(services, u.ToServiceProtobuf())
}
sc.signingConfig.OidcUrls = services
return sc
}
func (sc *SigningConfig) AddOIDCProviderURLs(oidcURLs ...Service) *SigningConfig {
for _, u := range oidcURLs {
sc.signingConfig.OidcUrls = append(sc.signingConfig.OidcUrls, u.ToServiceProtobuf())
}
return sc
}
func (sc *SigningConfig) WithRekorLogURLs(logURLs ...Service) *SigningConfig {
var services []*prototrustroot.Service
for _, u := range logURLs {
services = append(services, u.ToServiceProtobuf())
}
sc.signingConfig.RekorTlogUrls = services
return sc
}
func (sc *SigningConfig) AddRekorLogURLs(logURLs ...Service) *SigningConfig {
for _, u := range logURLs {
sc.signingConfig.RekorTlogUrls = append(sc.signingConfig.RekorTlogUrls, u.ToServiceProtobuf())
}
return sc
}
func (sc *SigningConfig) WithRekorTlogConfig(selector prototrustroot.ServiceSelector, count uint32) *SigningConfig {
sc.signingConfig.RekorTlogConfig.Selector = selector
sc.signingConfig.RekorTlogConfig.Count = count
return sc
}
func (sc *SigningConfig) WithTimestampAuthorityURLs(tsaURLs ...Service) *SigningConfig {
var services []*prototrustroot.Service
for _, u := range tsaURLs {
services = append(services, u.ToServiceProtobuf())
}
sc.signingConfig.TsaUrls = services
return sc
}
func (sc *SigningConfig) AddTimestampAuthorityURLs(tsaURLs ...Service) *SigningConfig {
for _, u := range tsaURLs {
sc.signingConfig.TsaUrls = append(sc.signingConfig.TsaUrls, u.ToServiceProtobuf())
}
return sc
}
func (sc *SigningConfig) WithTsaConfig(selector prototrustroot.ServiceSelector, count uint32) *SigningConfig {
sc.signingConfig.TsaConfig.Selector = selector
sc.signingConfig.TsaConfig.Count = count
return sc
}
func (sc SigningConfig) String() string {
return fmt.Sprintf("{CA: %v, OIDC: %v, RekorLogs: %v, TSAs: %v, MediaType: %s}",
sc.FulcioCertificateAuthorityURLs(),
sc.OIDCProviderURLs(),
sc.RekorLogURLs(),
sc.TimestampAuthorityURLs(),
SigningConfigMediaType02)
}
func (sc SigningConfig) MarshalJSON() ([]byte, error) {
return protojson.Marshal(sc.signingConfig)
}
// NewSigningConfig initializes a SigningConfig object from a mediaType string, Fulcio certificate
// authority URLs, OIDC provider URLs, Rekor transparency log URLs, timestamp authorities URLs,
// selection criteria for Rekor logs and TSAs.
func NewSigningConfig(mediaType string,
fulcioCertificateAuthorities []Service,
oidcProviders []Service,
rekorLogs []Service,
rekorLogsConfig ServiceConfiguration,
timestampAuthorities []Service,
timestampAuthoritiesConfig ServiceConfiguration) (*SigningConfig, error) {
if mediaType != SigningConfigMediaType02 {
return nil, fmt.Errorf("unsupported SigningConfig media type, must be: %s", SigningConfigMediaType02)
}
sc := &SigningConfig{
signingConfig: &prototrustroot.SigningConfig{
MediaType: mediaType,
CaUrls: mapFunc(fulcioCertificateAuthorities, Service.ToServiceProtobuf),
OidcUrls: mapFunc(oidcProviders, Service.ToServiceProtobuf),
RekorTlogUrls: mapFunc(rekorLogs, Service.ToServiceProtobuf),
RekorTlogConfig: rekorLogsConfig.ToConfigProtobuf(),
TsaUrls: mapFunc(timestampAuthorities, Service.ToServiceProtobuf),
TsaConfig: timestampAuthoritiesConfig.ToConfigProtobuf(),
},
}
return sc, nil
}
// NewSigningConfigFromProtobuf returns a Sigstore signing configuration.
func NewSigningConfigFromProtobuf(sc *prototrustroot.SigningConfig) (*SigningConfig, error) {
if sc.GetMediaType() != SigningConfigMediaType02 {
return nil, fmt.Errorf("unsupported SigningConfig media type: %s", sc.GetMediaType())
}
return &SigningConfig{signingConfig: sc}, nil
}
// NewSigningConfigFromPath returns a Sigstore signing configuration from a file.
func NewSigningConfigFromPath(path string) (*SigningConfig, error) {
scJSON, err := os.ReadFile(path)
if err != nil {
return nil, err
}
return NewSigningConfigFromJSON(scJSON)
}
// NewSigningConfigFromJSON returns a Sigstore signing configuration from JSON.
func NewSigningConfigFromJSON(rootJSON []byte) (*SigningConfig, error) {
pbSC, err := NewSigningConfigProtobuf(rootJSON)
if err != nil {
return nil, err
}
return NewSigningConfigFromProtobuf(pbSC)
}
// NewSigningConfigProtobuf returns a Sigstore signing configuration as a protobuf.
func NewSigningConfigProtobuf(scJSON []byte) (*prototrustroot.SigningConfig, error) {
pbSC := &prototrustroot.SigningConfig{}
err := protojson.Unmarshal(scJSON, pbSC)
if err != nil {
return nil, err
}
return pbSC, nil
}
// FetchSigningConfig fetches the public-good Sigstore signing configuration from TUF.
func FetchSigningConfig() (*SigningConfig, error) {
return FetchSigningConfigWithOptions(tuf.DefaultOptions())
}
// FetchSigningConfig fetches the public-good Sigstore signing configuration with the given options from TUF.
func FetchSigningConfigWithOptions(opts *tuf.Options) (*SigningConfig, error) {
client, err := tuf.New(opts)
if err != nil {
return nil, err
}
return GetSigningConfig(client)
}
// GetSigningConfig fetches the public-good Sigstore signing configuration target from TUF.
func GetSigningConfig(c *tuf.Client) (*SigningConfig, error) {
jsonBytes, err := c.GetTarget("signing_config.v0.2.json")
if err != nil {
return nil, err
}
return NewSigningConfigFromJSON(jsonBytes)
}
// Copyright 2024 The Sigstore Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package root
import (
"bytes"
"crypto/x509"
"errors"
"time"
tsaverification "github.com/sigstore/timestamp-authority/v2/pkg/verification"
)
type Timestamp struct {
Time time.Time
URI string
}
type TimestampingAuthority interface {
Verify(signedTimestamp []byte, signatureBytes []byte) (*Timestamp, error)
}
type SigstoreTimestampingAuthority struct {
Root *x509.Certificate
Intermediates []*x509.Certificate
Leaf *x509.Certificate
ValidityPeriodStart time.Time
ValidityPeriodEnd time.Time
URI string
}
var _ TimestampingAuthority = &SigstoreTimestampingAuthority{}
func (tsa *SigstoreTimestampingAuthority) Verify(signedTimestamp []byte, signatureBytes []byte) (*Timestamp, error) {
if tsa.Root == nil {
var tsaURIDisplay string
if tsa.URI != "" {
tsaURIDisplay = tsa.URI + " "
}
return nil, errors.New("timestamping authority " + tsaURIDisplay + "root certificate is nil")
}
trustedRootVerificationOptions := tsaverification.VerifyOpts{
Roots: []*x509.Certificate{tsa.Root},
Intermediates: tsa.Intermediates,
TSACertificate: tsa.Leaf,
}
// Ensure timestamp responses are from trusted sources
timestamp, err := tsaverification.VerifyTimestampResponse(signedTimestamp, bytes.NewReader(signatureBytes), trustedRootVerificationOptions)
if err != nil {
return nil, err
}
if !tsa.ValidityPeriodStart.IsZero() && timestamp.Time.Before(tsa.ValidityPeriodStart) {
return nil, errors.New("timestamp is before the validity period start")
}
if !tsa.ValidityPeriodEnd.IsZero() && timestamp.Time.After(tsa.ValidityPeriodEnd) {
return nil, errors.New("timestamp is after the validity period end")
}
// All above verification successful, so return nil
return &Timestamp{Time: timestamp.Time, URI: tsa.URI}, nil
}
// Copyright 2023 The Sigstore Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package root
import (
"fmt"
"time"
"github.com/sigstore/sigstore/pkg/signature"
)
type TrustedMaterial interface {
TimestampingAuthorities() []TimestampingAuthority
FulcioCertificateAuthorities() []CertificateAuthority
RekorLogs() map[string]*TransparencyLog
CTLogs() map[string]*TransparencyLog
PublicKeyVerifier(string) (TimeConstrainedVerifier, error)
}
type BaseTrustedMaterial struct{}
func (b *BaseTrustedMaterial) TimestampingAuthorities() []TimestampingAuthority {
return []TimestampingAuthority{}
}
func (b *BaseTrustedMaterial) FulcioCertificateAuthorities() []CertificateAuthority {
return []CertificateAuthority{}
}
func (b *BaseTrustedMaterial) RekorLogs() map[string]*TransparencyLog {
return map[string]*TransparencyLog{}
}
func (b *BaseTrustedMaterial) CTLogs() map[string]*TransparencyLog {
return map[string]*TransparencyLog{}
}
func (b *BaseTrustedMaterial) PublicKeyVerifier(_ string) (TimeConstrainedVerifier, error) {
return nil, fmt.Errorf("public key verifier not found")
}
type TrustedMaterialCollection []TrustedMaterial
// Ensure types implement interfaces
var _ TrustedMaterial = &BaseTrustedMaterial{}
var _ TrustedMaterial = TrustedMaterialCollection{}
func (tmc TrustedMaterialCollection) PublicKeyVerifier(keyID string) (TimeConstrainedVerifier, error) {
for _, tm := range tmc {
verifier, err := tm.PublicKeyVerifier(keyID)
if err == nil {
return verifier, nil
}
}
return nil, fmt.Errorf("public key verifier not found for keyID: %s", keyID)
}
func (tmc TrustedMaterialCollection) TimestampingAuthorities() []TimestampingAuthority {
var timestampingAuthorities []TimestampingAuthority
for _, tm := range tmc {
timestampingAuthorities = append(timestampingAuthorities, tm.TimestampingAuthorities()...)
}
return timestampingAuthorities
}
func (tmc TrustedMaterialCollection) FulcioCertificateAuthorities() []CertificateAuthority {
var certAuthorities []CertificateAuthority
for _, tm := range tmc {
certAuthorities = append(certAuthorities, tm.FulcioCertificateAuthorities()...)
}
return certAuthorities
}
func (tmc TrustedMaterialCollection) RekorLogs() map[string]*TransparencyLog {
rekorLogs := make(map[string]*TransparencyLog)
for _, tm := range tmc {
for keyID, tlogVerifier := range tm.RekorLogs() {
rekorLogs[keyID] = tlogVerifier
}
}
return rekorLogs
}
func (tmc TrustedMaterialCollection) CTLogs() map[string]*TransparencyLog {
rekorLogs := make(map[string]*TransparencyLog)
for _, tm := range tmc {
for keyID, tlogVerifier := range tm.CTLogs() {
rekorLogs[keyID] = tlogVerifier
}
}
return rekorLogs
}
type ValidityPeriodChecker interface {
ValidAtTime(time.Time) bool
}
type TimeConstrainedVerifier interface {
ValidityPeriodChecker
signature.Verifier
}
type TrustedPublicKeyMaterial struct {
BaseTrustedMaterial
publicKeyVerifier func(string) (TimeConstrainedVerifier, error)
}
func (tr *TrustedPublicKeyMaterial) PublicKeyVerifier(keyID string) (TimeConstrainedVerifier, error) {
return tr.publicKeyVerifier(keyID)
}
func NewTrustedPublicKeyMaterial(publicKeyVerifier func(string) (TimeConstrainedVerifier, error)) *TrustedPublicKeyMaterial {
return &TrustedPublicKeyMaterial{
publicKeyVerifier: publicKeyVerifier,
}
}
// ExpiringKey is a TimeConstrainedVerifier with a static validity period.
type ExpiringKey struct {
signature.Verifier
validityPeriodStart time.Time
validityPeriodEnd time.Time
}
var _ TimeConstrainedVerifier = &ExpiringKey{}
// ValidAtTime returns true if the key is valid at the given time. If the
// validity period start time is not set, the key is considered valid for all
// times before the end time. Likewise, if the validity period end time is not
// set, the key is considered valid for all times after the start time.
func (k *ExpiringKey) ValidAtTime(t time.Time) bool {
if !k.validityPeriodStart.IsZero() && t.Before(k.validityPeriodStart) {
return false
}
if !k.validityPeriodEnd.IsZero() && t.After(k.validityPeriodEnd) {
return false
}
return true
}
// NewExpiringKey returns a new ExpiringKey with the given validity period
func NewExpiringKey(verifier signature.Verifier, validityPeriodStart, validityPeriodEnd time.Time) *ExpiringKey {
return &ExpiringKey{
Verifier: verifier,
validityPeriodStart: validityPeriodStart,
validityPeriodEnd: validityPeriodEnd,
}
}
// NewTrustedPublicKeyMaterialFromMapping returns a TrustedPublicKeyMaterial from a map of key IDs to
// ExpiringKeys.
func NewTrustedPublicKeyMaterialFromMapping(trustedPublicKeys map[string]*ExpiringKey) *TrustedPublicKeyMaterial {
return NewTrustedPublicKeyMaterial(func(keyID string) (TimeConstrainedVerifier, error) {
expiringKey, ok := trustedPublicKeys[keyID]
if !ok {
return nil, fmt.Errorf("public key not found for keyID: %s", keyID)
}
return expiringKey, nil
})
}
// Copyright 2023 The Sigstore Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package root
import (
"crypto"
"crypto/ecdsa"
"crypto/ed25519"
"crypto/elliptic"
"crypto/rsa"
"crypto/x509"
"encoding/hex"
"fmt"
"log"
"os"
"sync"
"time"
protocommon "github.com/sigstore/protobuf-specs/gen/pb-go/common/v1"
prototrustroot "github.com/sigstore/protobuf-specs/gen/pb-go/trustroot/v1"
"github.com/sigstore/sigstore-go/pkg/tuf"
"google.golang.org/protobuf/encoding/protojson"
)
const TrustedRootMediaType01 = "application/vnd.dev.sigstore.trustedroot+json;version=0.1"
type TrustedRoot struct {
BaseTrustedMaterial
trustedRoot *prototrustroot.TrustedRoot
rekorLogs map[string]*TransparencyLog
certificateAuthorities []CertificateAuthority
ctLogs map[string]*TransparencyLog
timestampingAuthorities []TimestampingAuthority
}
type TransparencyLog struct {
BaseURL string
ID []byte
ValidityPeriodStart time.Time
ValidityPeriodEnd time.Time
// This is the hash algorithm used by the Merkle tree
HashFunc crypto.Hash
PublicKey crypto.PublicKey
// The hash algorithm used during signature creation
SignatureHashFunc crypto.Hash
}
const (
defaultTrustedRoot = "trusted_root.json"
)
func (tr *TrustedRoot) TimestampingAuthorities() []TimestampingAuthority {
return tr.timestampingAuthorities
}
func (tr *TrustedRoot) FulcioCertificateAuthorities() []CertificateAuthority {
return tr.certificateAuthorities
}
func (tr *TrustedRoot) RekorLogs() map[string]*TransparencyLog {
return tr.rekorLogs
}
func (tr *TrustedRoot) CTLogs() map[string]*TransparencyLog {
return tr.ctLogs
}
func (tr *TrustedRoot) MarshalJSON() ([]byte, error) {
err := tr.constructProtoTrustRoot()
if err != nil {
return nil, fmt.Errorf("failed constructing protobuf TrustRoot representation: %w", err)
}
return protojson.Marshal(tr.trustedRoot)
}
func NewTrustedRootFromProtobuf(protobufTrustedRoot *prototrustroot.TrustedRoot) (trustedRoot *TrustedRoot, err error) {
if protobufTrustedRoot.GetMediaType() != TrustedRootMediaType01 {
return nil, fmt.Errorf("unsupported TrustedRoot media type: %s", protobufTrustedRoot.GetMediaType())
}
trustedRoot = &TrustedRoot{trustedRoot: protobufTrustedRoot}
trustedRoot.rekorLogs, err = ParseTransparencyLogs(protobufTrustedRoot.GetTlogs())
if err != nil {
return nil, err
}
trustedRoot.certificateAuthorities, err = ParseCertificateAuthorities(protobufTrustedRoot.GetCertificateAuthorities())
if err != nil {
return nil, err
}
trustedRoot.timestampingAuthorities, err = ParseTimestampingAuthorities(protobufTrustedRoot.GetTimestampAuthorities())
if err != nil {
return nil, err
}
trustedRoot.ctLogs, err = ParseTransparencyLogs(protobufTrustedRoot.GetCtlogs())
if err != nil {
return nil, err
}
return trustedRoot, nil
}
func ParseTransparencyLogs(tlogs []*prototrustroot.TransparencyLogInstance) (transparencyLogs map[string]*TransparencyLog, err error) {
transparencyLogs = make(map[string]*TransparencyLog)
for _, tlog := range tlogs {
if tlog.GetHashAlgorithm() != protocommon.HashAlgorithm_SHA2_256 {
return nil, fmt.Errorf("unsupported tlog hash algorithm: %s", tlog.GetHashAlgorithm())
}
if tlog.GetLogId() == nil {
return nil, fmt.Errorf("tlog missing log ID")
}
if tlog.GetLogId().GetKeyId() == nil {
return nil, fmt.Errorf("tlog missing log ID key ID")
}
encodedKeyID := hex.EncodeToString(tlog.GetLogId().GetKeyId())
if tlog.GetPublicKey() == nil {
return nil, fmt.Errorf("tlog missing public key")
}
if tlog.GetPublicKey().GetRawBytes() == nil {
return nil, fmt.Errorf("tlog missing public key raw bytes")
}
var hashFunc crypto.Hash
switch tlog.GetHashAlgorithm() {
case protocommon.HashAlgorithm_SHA2_256:
hashFunc = crypto.SHA256
default:
return nil, fmt.Errorf("unsupported hash function for the tlog")
}
tlogEntry := &TransparencyLog{
BaseURL: tlog.GetBaseUrl(),
ID: tlog.GetLogId().GetKeyId(),
HashFunc: hashFunc,
SignatureHashFunc: crypto.SHA256,
}
switch tlog.GetPublicKey().GetKeyDetails() {
case protocommon.PublicKeyDetails_PKIX_ECDSA_P256_SHA_256,
protocommon.PublicKeyDetails_PKIX_ECDSA_P384_SHA_384,
protocommon.PublicKeyDetails_PKIX_ECDSA_P521_SHA_512:
key, err := x509.ParsePKIXPublicKey(tlog.GetPublicKey().GetRawBytes())
if err != nil {
return nil, fmt.Errorf("failed to parse public key for tlog: %s %w",
tlog.GetBaseUrl(),
err,
)
}
var ecKey *ecdsa.PublicKey
var ok bool
if ecKey, ok = key.(*ecdsa.PublicKey); !ok {
return nil, fmt.Errorf("tlog public key is not ECDSA: %s", tlog.GetPublicKey().GetKeyDetails())
}
tlogEntry.PublicKey = ecKey
// This key format has public key in PKIX RSA format and PKCS1#1v1.5 or RSASSA-PSS signature
case protocommon.PublicKeyDetails_PKIX_RSA_PKCS1V15_2048_SHA256,
protocommon.PublicKeyDetails_PKIX_RSA_PKCS1V15_3072_SHA256,
protocommon.PublicKeyDetails_PKIX_RSA_PKCS1V15_4096_SHA256:
key, err := x509.ParsePKIXPublicKey(tlog.GetPublicKey().GetRawBytes())
if err != nil {
return nil, fmt.Errorf("failed to parse public key for tlog: %s %w",
tlog.GetBaseUrl(),
err,
)
}
var rsaKey *rsa.PublicKey
var ok bool
if rsaKey, ok = key.(*rsa.PublicKey); !ok {
return nil, fmt.Errorf("tlog public key is not RSA: %s", tlog.GetPublicKey().GetKeyDetails())
}
tlogEntry.PublicKey = rsaKey
case protocommon.PublicKeyDetails_PKIX_ED25519:
key, err := x509.ParsePKIXPublicKey(tlog.GetPublicKey().GetRawBytes())
if err != nil {
return nil, fmt.Errorf("failed to parse public key for tlog: %s %w",
tlog.GetBaseUrl(),
err,
)
}
var edKey ed25519.PublicKey
var ok bool
if edKey, ok = key.(ed25519.PublicKey); !ok {
return nil, fmt.Errorf("tlog public key is not RSA: %s", tlog.GetPublicKey().GetKeyDetails())
}
tlogEntry.PublicKey = edKey
// This key format is deprecated, but currently in use for Sigstore staging instance
case protocommon.PublicKeyDetails_PKCS1_RSA_PKCS1V5: //nolint:staticcheck
key, err := x509.ParsePKCS1PublicKey(tlog.GetPublicKey().GetRawBytes())
if err != nil {
return nil, fmt.Errorf("failed to parse public key for tlog: %s %w",
tlog.GetBaseUrl(),
err,
)
}
tlogEntry.PublicKey = key
default:
return nil, fmt.Errorf("unsupported tlog public key type: %s", tlog.GetPublicKey().GetKeyDetails())
}
tlogEntry.SignatureHashFunc = getSignatureHashAlgo(tlogEntry.PublicKey)
transparencyLogs[encodedKeyID] = tlogEntry
if validFor := tlog.GetPublicKey().GetValidFor(); validFor != nil {
if validFor.GetStart() != nil {
transparencyLogs[encodedKeyID].ValidityPeriodStart = validFor.GetStart().AsTime()
} else {
return nil, fmt.Errorf("tlog missing public key validity period start time")
}
if validFor.GetEnd() != nil {
transparencyLogs[encodedKeyID].ValidityPeriodEnd = validFor.GetEnd().AsTime()
}
} else {
return nil, fmt.Errorf("tlog missing public key validity period")
}
}
return transparencyLogs, nil
}
func ParseCertificateAuthorities(certAuthorities []*prototrustroot.CertificateAuthority) (certificateAuthorities []CertificateAuthority, err error) {
certificateAuthorities = make([]CertificateAuthority, len(certAuthorities))
for i, certAuthority := range certAuthorities {
certificateAuthority, err := ParseCertificateAuthority(certAuthority)
if err != nil {
return nil, err
}
certificateAuthorities[i] = certificateAuthority
}
return certificateAuthorities, nil
}
func ParseCertificateAuthority(certAuthority *prototrustroot.CertificateAuthority) (*FulcioCertificateAuthority, error) {
if certAuthority == nil {
return nil, fmt.Errorf("CertificateAuthority is nil")
}
certChain := certAuthority.GetCertChain()
if certChain == nil {
return nil, fmt.Errorf("CertificateAuthority missing cert chain")
}
chainLen := len(certChain.GetCertificates())
if chainLen < 1 {
return nil, fmt.Errorf("CertificateAuthority cert chain is empty")
}
certificateAuthority := &FulcioCertificateAuthority{
URI: certAuthority.Uri,
}
for i, cert := range certChain.GetCertificates() {
parsedCert, err := x509.ParseCertificate(cert.RawBytes)
if err != nil {
return nil, fmt.Errorf("failed to parse certificate for %s %w",
certAuthority.Uri,
err,
)
}
if i < chainLen-1 {
certificateAuthority.Intermediates = append(certificateAuthority.Intermediates, parsedCert)
} else {
certificateAuthority.Root = parsedCert
}
}
validFor := certAuthority.GetValidFor()
if validFor != nil {
start := validFor.GetStart()
if start != nil {
certificateAuthority.ValidityPeriodStart = start.AsTime()
}
end := validFor.GetEnd()
if end != nil {
certificateAuthority.ValidityPeriodEnd = end.AsTime()
}
}
certificateAuthority.URI = certAuthority.Uri
return certificateAuthority, nil
}
func ParseTimestampingAuthorities(certAuthorities []*prototrustroot.CertificateAuthority) (timestampingAuthorities []TimestampingAuthority, err error) {
timestampingAuthorities = make([]TimestampingAuthority, len(certAuthorities))
for i, certAuthority := range certAuthorities {
timestampingAuthority, err := ParseTimestampingAuthority(certAuthority)
if err != nil {
return nil, err
}
timestampingAuthorities[i] = timestampingAuthority
}
return timestampingAuthorities, nil
}
func ParseTimestampingAuthority(certAuthority *prototrustroot.CertificateAuthority) (TimestampingAuthority, error) {
if certAuthority == nil {
return nil, fmt.Errorf("CertificateAuthority is nil")
}
certChain := certAuthority.GetCertChain()
if certChain == nil {
return nil, fmt.Errorf("CertificateAuthority missing cert chain")
}
chainLen := len(certChain.GetCertificates())
if chainLen < 1 {
return nil, fmt.Errorf("CertificateAuthority cert chain is empty")
}
timestampingAuthority := &SigstoreTimestampingAuthority{
URI: certAuthority.Uri,
}
for i, cert := range certChain.GetCertificates() {
parsedCert, err := x509.ParseCertificate(cert.RawBytes)
if err != nil {
return nil, fmt.Errorf("failed to parse certificate for %s %w",
certAuthority.Uri,
err,
)
}
switch {
case i == 0 && !parsedCert.IsCA:
timestampingAuthority.Leaf = parsedCert
case i < chainLen-1:
timestampingAuthority.Intermediates = append(timestampingAuthority.Intermediates, parsedCert)
case i == chainLen-1:
timestampingAuthority.Root = parsedCert
}
}
validFor := certAuthority.GetValidFor()
if validFor != nil {
start := validFor.GetStart()
if start != nil {
timestampingAuthority.ValidityPeriodStart = start.AsTime()
}
end := validFor.GetEnd()
if end != nil {
timestampingAuthority.ValidityPeriodEnd = end.AsTime()
}
}
timestampingAuthority.URI = certAuthority.Uri
return timestampingAuthority, nil
}
func NewTrustedRootFromPath(path string) (*TrustedRoot, error) {
trustedrootJSON, err := os.ReadFile(path)
if err != nil {
return nil, fmt.Errorf("failed to read trusted root %w",
err,
)
}
return NewTrustedRootFromJSON(trustedrootJSON)
}
// NewTrustedRootFromJSON returns the Sigstore trusted root.
func NewTrustedRootFromJSON(rootJSON []byte) (*TrustedRoot, error) {
pbTrustedRoot, err := NewTrustedRootProtobuf(rootJSON)
if err != nil {
return nil, err
}
return NewTrustedRootFromProtobuf(pbTrustedRoot)
}
// NewTrustedRootProtobuf returns the Sigstore trusted root as a protobuf.
func NewTrustedRootProtobuf(rootJSON []byte) (*prototrustroot.TrustedRoot, error) {
pbTrustedRoot := &prototrustroot.TrustedRoot{}
err := protojson.Unmarshal(rootJSON, pbTrustedRoot)
if err != nil {
return nil, fmt.Errorf("failed to proto-json unmarshal trusted root: %w", err)
}
return pbTrustedRoot, nil
}
// NewTrustedRoot initializes a TrustedRoot object from a mediaType string, list of Fulcio
// certificate authorities, list of timestamp authorities and maps of ctlogs and rekor
// transparency log instances.
// mediaType must be TrustedRootMediaType01 ("application/vnd.dev.sigstore.trustedroot+json;version=0.1").
func NewTrustedRoot(mediaType string,
certificateAuthorities []CertificateAuthority,
certificateTransparencyLogs map[string]*TransparencyLog,
timestampAuthorities []TimestampingAuthority,
transparencyLogs map[string]*TransparencyLog) (*TrustedRoot, error) {
// document that we assume 1 cert chain per target and with certs already ordered from leaf to root
if mediaType != TrustedRootMediaType01 {
return nil, fmt.Errorf("unsupported TrustedRoot media type: %s, must be %s", mediaType, TrustedRootMediaType01)
}
tr := &TrustedRoot{
certificateAuthorities: certificateAuthorities,
ctLogs: certificateTransparencyLogs,
timestampingAuthorities: timestampAuthorities,
rekorLogs: transparencyLogs,
}
return tr, nil
}
// FetchTrustedRoot fetches the Sigstore trusted root from TUF and returns it.
func FetchTrustedRoot() (*TrustedRoot, error) {
return FetchTrustedRootWithOptions(tuf.DefaultOptions())
}
// FetchTrustedRootWithOptions fetches the trusted root from TUF with the given options and returns it.
func FetchTrustedRootWithOptions(opts *tuf.Options) (*TrustedRoot, error) {
client, err := tuf.New(opts)
if err != nil {
return nil, fmt.Errorf("failed to create TUF client %w", err)
}
return GetTrustedRoot(client)
}
// GetTrustedRoot returns the trusted root
func GetTrustedRoot(c *tuf.Client) (*TrustedRoot, error) {
jsonBytes, err := c.GetTarget(defaultTrustedRoot)
if err != nil {
return nil, fmt.Errorf("failed to get trusted root from TUF client %w",
err,
)
}
return NewTrustedRootFromJSON(jsonBytes)
}
func getSignatureHashAlgo(pubKey crypto.PublicKey) crypto.Hash {
var h crypto.Hash
switch pk := pubKey.(type) {
case *rsa.PublicKey:
h = crypto.SHA256
case *ecdsa.PublicKey:
switch pk.Curve {
case elliptic.P256():
h = crypto.SHA256
case elliptic.P384():
h = crypto.SHA384
case elliptic.P521():
h = crypto.SHA512
default:
h = crypto.SHA256
}
case ed25519.PublicKey:
h = crypto.SHA512
default:
h = crypto.SHA256
}
return h
}
// LiveTrustedRoot is a wrapper around TrustedRoot that periodically
// refreshes the trusted root from TUF. This is needed for long-running
// processes to ensure that the trusted root does not expire.
type LiveTrustedRoot struct {
*TrustedRoot
mu sync.RWMutex
}
// NewLiveTrustedRoot returns a LiveTrustedRoot that will periodically
// refresh the trusted root from TUF.
func NewLiveTrustedRoot(opts *tuf.Options) (*LiveTrustedRoot, error) {
return NewLiveTrustedRootFromTarget(opts, defaultTrustedRoot)
}
// NewLiveTrustedRootFromTarget returns a LiveTrustedRoot that will
// periodically refresh the trusted root from TUF using the provided target.
func NewLiveTrustedRootFromTarget(opts *tuf.Options, target string) (*LiveTrustedRoot, error) {
return NewLiveTrustedRootFromTargetWithPeriod(opts, target, 24*time.Hour)
}
// NewLiveTrustedRootFromTargetWithPeriod returns a LiveTrustedRoot that
// performs a TUF refresh with the provided period, accesssing the provided
// target.
func NewLiveTrustedRootFromTargetWithPeriod(opts *tuf.Options, target string, rfPeriod time.Duration) (*LiveTrustedRoot, error) {
client, err := tuf.New(opts)
if err != nil {
return nil, fmt.Errorf("failed to create TUF client %w", err)
}
b, err := client.GetTarget(target)
if err != nil {
return nil, fmt.Errorf("failed to get target from TUF client %w", err)
}
tr, err := NewTrustedRootFromJSON(b)
if err != nil {
return nil, err
}
ltr := &LiveTrustedRoot{
TrustedRoot: tr,
mu: sync.RWMutex{},
}
ticker := time.NewTicker(rfPeriod)
go func() {
for range ticker.C {
client, err = tuf.New(opts)
if err != nil {
log.Printf("error creating TUF client: %v", err)
}
b, err := client.GetTarget(target)
if err != nil {
log.Printf("error fetching trusted root: %v", err)
}
newTr, err := NewTrustedRootFromJSON(b)
if err != nil {
log.Printf("error fetching trusted root: %v", err)
continue
}
ltr.mu.Lock()
ltr.TrustedRoot = newTr
ltr.mu.Unlock()
log.Printf("successfully refreshed the TUF root")
}
}()
return ltr, nil
}
func (l *LiveTrustedRoot) TimestampingAuthorities() []TimestampingAuthority {
l.mu.RLock()
defer l.mu.RUnlock()
return l.TrustedRoot.TimestampingAuthorities()
}
func (l *LiveTrustedRoot) FulcioCertificateAuthorities() []CertificateAuthority {
l.mu.RLock()
defer l.mu.RUnlock()
return l.TrustedRoot.FulcioCertificateAuthorities()
}
func (l *LiveTrustedRoot) RekorLogs() map[string]*TransparencyLog {
l.mu.RLock()
defer l.mu.RUnlock()
return l.TrustedRoot.RekorLogs()
}
func (l *LiveTrustedRoot) CTLogs() map[string]*TransparencyLog {
l.mu.RLock()
defer l.mu.RUnlock()
return l.TrustedRoot.CTLogs()
}
func (l *LiveTrustedRoot) PublicKeyVerifier(keyID string) (TimeConstrainedVerifier, error) {
l.mu.RLock()
defer l.mu.RUnlock()
return l.TrustedRoot.PublicKeyVerifier(keyID)
}
// Copyright 2023 The Sigstore Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package root
import (
"crypto"
"crypto/ecdsa"
"crypto/ed25519"
"crypto/elliptic"
"crypto/rsa"
"crypto/x509"
"fmt"
"sort"
"time"
protocommon "github.com/sigstore/protobuf-specs/gen/pb-go/common/v1"
prototrustroot "github.com/sigstore/protobuf-specs/gen/pb-go/trustroot/v1"
timestamppb "google.golang.org/protobuf/types/known/timestamppb"
)
func (tr *TrustedRoot) constructProtoTrustRoot() error {
tr.trustedRoot = &prototrustroot.TrustedRoot{}
tr.trustedRoot.MediaType = TrustedRootMediaType01
for logID, transparencyLog := range tr.rekorLogs {
tlProto, err := transparencyLogToProtobufTL(transparencyLog)
if err != nil {
return fmt.Errorf("failed converting rekor log %s to protobuf: %w", logID, err)
}
tr.trustedRoot.Tlogs = append(tr.trustedRoot.Tlogs, tlProto)
}
// ensure stable sorting of the slice
sortTlogSlice(tr.trustedRoot.Tlogs)
for logID, ctLog := range tr.ctLogs {
ctProto, err := transparencyLogToProtobufTL(ctLog)
if err != nil {
return fmt.Errorf("failed converting ctlog %s to protobuf: %w", logID, err)
}
tr.trustedRoot.Ctlogs = append(tr.trustedRoot.Ctlogs, ctProto)
}
// ensure stable sorting of the slice
sortTlogSlice(tr.trustedRoot.Ctlogs)
for _, ca := range tr.certificateAuthorities {
caProto, err := certificateAuthorityToProtobufCA(ca.(*FulcioCertificateAuthority))
if err != nil {
return fmt.Errorf("failed converting fulcio cert chain to protobuf: %w", err)
}
tr.trustedRoot.CertificateAuthorities = append(tr.trustedRoot.CertificateAuthorities, caProto)
}
// ensure stable sorting of the slice
sortCASlice(tr.trustedRoot.CertificateAuthorities)
for _, ca := range tr.timestampingAuthorities {
caProto, err := timestampingAuthorityToProtobufCA(ca.(*SigstoreTimestampingAuthority))
if err != nil {
return fmt.Errorf("failed converting TSA cert chain to protobuf: %w", err)
}
tr.trustedRoot.TimestampAuthorities = append(tr.trustedRoot.TimestampAuthorities, caProto)
}
// ensure stable sorting of the slice
sortCASlice(tr.trustedRoot.TimestampAuthorities)
return nil
}
func sortCASlice(slc []*prototrustroot.CertificateAuthority) {
sort.Slice(slc, func(i, j int) bool {
iTime := time.Unix(0, 0)
jTime := time.Unix(0, 0)
if slc[i].ValidFor.Start != nil {
iTime = slc[i].ValidFor.Start.AsTime()
}
if slc[j].ValidFor.Start != nil {
jTime = slc[j].ValidFor.Start.AsTime()
}
return iTime.Before(jTime)
})
}
func sortTlogSlice(slc []*prototrustroot.TransparencyLogInstance) {
sort.Slice(slc, func(i, j int) bool {
iTime := time.Unix(0, 0)
jTime := time.Unix(0, 0)
if slc[i].PublicKey.ValidFor.Start != nil {
iTime = slc[i].PublicKey.ValidFor.Start.AsTime()
}
if slc[j].PublicKey.ValidFor.Start != nil {
jTime = slc[j].PublicKey.ValidFor.Start.AsTime()
}
return iTime.Before(jTime)
})
}
func certificateAuthorityToProtobufCA(ca *FulcioCertificateAuthority) (*prototrustroot.CertificateAuthority, error) {
org := ""
if len(ca.Root.Subject.Organization) > 0 {
org = ca.Root.Subject.Organization[0]
}
var allCerts []*protocommon.X509Certificate
for _, intermed := range ca.Intermediates {
allCerts = append(allCerts, &protocommon.X509Certificate{RawBytes: intermed.Raw})
}
if ca.Root == nil {
return nil, fmt.Errorf("root certificate is nil")
}
allCerts = append(allCerts, &protocommon.X509Certificate{RawBytes: ca.Root.Raw})
caProto := prototrustroot.CertificateAuthority{
Uri: ca.URI,
Subject: &protocommon.DistinguishedName{
Organization: org,
CommonName: ca.Root.Subject.CommonName,
},
ValidFor: &protocommon.TimeRange{
Start: timestamppb.New(ca.ValidityPeriodStart),
},
CertChain: &protocommon.X509CertificateChain{
Certificates: allCerts,
},
}
if !ca.ValidityPeriodEnd.IsZero() {
caProto.ValidFor.End = timestamppb.New(ca.ValidityPeriodEnd)
}
return &caProto, nil
}
func timestampingAuthorityToProtobufCA(ca *SigstoreTimestampingAuthority) (*prototrustroot.CertificateAuthority, error) {
org := ""
if len(ca.Root.Subject.Organization) > 0 {
org = ca.Root.Subject.Organization[0]
}
var allCerts []*protocommon.X509Certificate
if ca.Leaf != nil {
allCerts = append(allCerts, &protocommon.X509Certificate{RawBytes: ca.Leaf.Raw})
}
for _, intermed := range ca.Intermediates {
allCerts = append(allCerts, &protocommon.X509Certificate{RawBytes: intermed.Raw})
}
if ca.Root == nil {
return nil, fmt.Errorf("root certificate is nil")
}
allCerts = append(allCerts, &protocommon.X509Certificate{RawBytes: ca.Root.Raw})
caProto := prototrustroot.CertificateAuthority{
Uri: ca.URI,
Subject: &protocommon.DistinguishedName{
Organization: org,
CommonName: ca.Root.Subject.CommonName,
},
ValidFor: &protocommon.TimeRange{
Start: timestamppb.New(ca.ValidityPeriodStart),
},
CertChain: &protocommon.X509CertificateChain{
Certificates: allCerts,
},
}
if !ca.ValidityPeriodEnd.IsZero() {
caProto.ValidFor.End = timestamppb.New(ca.ValidityPeriodEnd)
}
return &caProto, nil
}
func transparencyLogToProtobufTL(tl *TransparencyLog) (*prototrustroot.TransparencyLogInstance, error) {
hashAlgo, err := hashAlgorithmToProtobufHashAlgorithm(tl.HashFunc)
if err != nil {
return nil, fmt.Errorf("failed converting hash algorithm to protobuf: %w", err)
}
publicKey, err := publicKeyToProtobufPublicKey(tl.PublicKey, tl.ValidityPeriodStart, tl.ValidityPeriodEnd)
if err != nil {
return nil, fmt.Errorf("failed converting public key to protobuf: %w", err)
}
trProto := prototrustroot.TransparencyLogInstance{
BaseUrl: tl.BaseURL,
HashAlgorithm: hashAlgo,
PublicKey: publicKey,
LogId: &protocommon.LogId{
KeyId: tl.ID,
},
}
return &trProto, nil
}
func hashAlgorithmToProtobufHashAlgorithm(hashAlgorithm crypto.Hash) (protocommon.HashAlgorithm, error) {
switch hashAlgorithm {
case crypto.SHA256:
return protocommon.HashAlgorithm_SHA2_256, nil
case crypto.SHA384:
return protocommon.HashAlgorithm_SHA2_384, nil
case crypto.SHA512:
return protocommon.HashAlgorithm_SHA2_512, nil
case crypto.SHA3_256:
return protocommon.HashAlgorithm_SHA3_256, nil
case crypto.SHA3_384:
return protocommon.HashAlgorithm_SHA3_384, nil
default:
return 0, fmt.Errorf("unsupported hash algorithm for Merkle tree: %v", hashAlgorithm)
}
}
func publicKeyToProtobufPublicKey(publicKey crypto.PublicKey, start time.Time, end time.Time) (*protocommon.PublicKey, error) {
pkd := protocommon.PublicKey{
ValidFor: &protocommon.TimeRange{
Start: timestamppb.New(start),
},
}
if !end.IsZero() {
pkd.ValidFor.End = timestamppb.New(end)
}
rawBytes, err := x509.MarshalPKIXPublicKey(publicKey)
if err != nil {
return nil, fmt.Errorf("failed marshalling public key: %w", err)
}
pkd.RawBytes = rawBytes
switch p := publicKey.(type) {
case *ecdsa.PublicKey:
switch p.Curve {
case elliptic.P256():
pkd.KeyDetails = protocommon.PublicKeyDetails_PKIX_ECDSA_P256_SHA_256
case elliptic.P384():
pkd.KeyDetails = protocommon.PublicKeyDetails_PKIX_ECDSA_P384_SHA_384
case elliptic.P521():
pkd.KeyDetails = protocommon.PublicKeyDetails_PKIX_ECDSA_P521_SHA_512
default:
return nil, fmt.Errorf("unsupported curve for ecdsa key: %T", p.Curve)
}
case *rsa.PublicKey:
switch p.Size() * 8 {
case 2048:
pkd.KeyDetails = protocommon.PublicKeyDetails_PKIX_RSA_PKCS1V15_2048_SHA256
case 3072:
pkd.KeyDetails = protocommon.PublicKeyDetails_PKIX_RSA_PKCS1V15_3072_SHA256
case 4096:
pkd.KeyDetails = protocommon.PublicKeyDetails_PKIX_RSA_PKCS1V15_4096_SHA256
default:
return nil, fmt.Errorf("unsupported public modulus for RSA key: %d", p.Size())
}
case ed25519.PublicKey:
pkd.KeyDetails = protocommon.PublicKeyDetails_PKIX_ED25519
default:
return nil, fmt.Errorf("unknown public key type: %T", p)
}
return &pkd, nil
}
// Copyright 2023 The Sigstore Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ca
import (
"bytes"
"context"
"crypto"
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rand"
"crypto/rsa"
"crypto/sha256"
"crypto/x509"
"crypto/x509/pkix"
"encoding/asn1"
"encoding/base64"
"encoding/hex"
"encoding/json"
"fmt"
"math/big"
"strings"
"time"
"github.com/cyberphone/json-canonicalization/go/src/webpki.org/jsoncanonicalizer"
"github.com/digitorus/timestamp"
"github.com/go-openapi/runtime"
"github.com/go-openapi/swag/conv"
"github.com/secure-systems-lab/go-securesystemslib/dsse"
v1 "github.com/sigstore/protobuf-specs/gen/pb-go/common/v1"
"github.com/sigstore/rekor/pkg/generated/models"
"github.com/sigstore/rekor/pkg/pki"
"github.com/sigstore/rekor/pkg/types"
"github.com/sigstore/rekor/pkg/types/hashedrekord"
"github.com/sigstore/rekor/pkg/types/intoto"
"github.com/sigstore/rekor/pkg/types/rekord"
"github.com/sigstore/rekor/pkg/util"
"github.com/sigstore/sigstore-go/pkg/bundle"
"github.com/sigstore/sigstore-go/pkg/root"
"github.com/sigstore/sigstore-go/pkg/tlog"
"github.com/sigstore/sigstore-go/pkg/verify"
"github.com/sigstore/sigstore/pkg/cryptoutils"
"github.com/sigstore/sigstore/pkg/signature"
sigdsse "github.com/sigstore/sigstore/pkg/signature/dsse"
tsx509 "github.com/sigstore/timestamp-authority/v2/pkg/x509"
)
type VirtualSigstore struct {
fulcioCA *root.FulcioCertificateAuthority
fulcioIntermediateKey *ecdsa.PrivateKey
tsaCA *root.SigstoreTimestampingAuthority
tsaLeafKey *ecdsa.PrivateKey
rekorKey *ecdsa.PrivateKey
rekorRoot []byte
ctlogKey *ecdsa.PrivateKey
publicKeyVerifier map[string]root.TimeConstrainedVerifier
signingAlgorithmDetails signature.AlgorithmDetails
}
func NewVirtualSigstoreWithSigningAlg(signingKeyDetails v1.PublicKeyDetails) (*VirtualSigstore, error) {
ss := &VirtualSigstore{
fulcioCA: &root.FulcioCertificateAuthority{URI: "https://virtual.fulcio.sigstore.dev"},
tsaCA: &root.SigstoreTimestampingAuthority{URI: "https://virtual.tsa.sigstore.dev"},
}
rootCert, rootKey, err := GenerateRootCa()
if err != nil {
return nil, err
}
ss.fulcioCA.Root = rootCert
ss.tsaCA.Root = rootCert
intermediateCert, intermediateKey, _ := GenerateFulcioIntermediate(rootCert, rootKey)
ss.fulcioCA.Intermediates = []*x509.Certificate{intermediateCert}
ss.fulcioIntermediateKey = intermediateKey
tsaIntermediateCert, tsaIntermediateKey, err := GenerateTSAIntermediate(rootCert, rootKey)
if err != nil {
return nil, err
}
ss.tsaCA.Intermediates = []*x509.Certificate{tsaIntermediateCert}
tsaLeafKey, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
if err != nil {
return nil, err
}
tsaLeafCert, err := GenerateTSALeafCert(time.Now().Add(-5*time.Minute), tsaLeafKey, tsaIntermediateCert, tsaIntermediateKey)
if err != nil {
return nil, err
}
ss.tsaCA.Leaf = tsaLeafCert
ss.tsaLeafKey = tsaLeafKey
ss.fulcioCA.ValidityPeriodStart = time.Now().Add(-5 * time.Hour)
ss.fulcioCA.ValidityPeriodEnd = time.Now().Add(time.Hour)
ss.tsaCA.ValidityPeriodStart = time.Now().Add(-5 * time.Hour)
ss.tsaCA.ValidityPeriodEnd = time.Now().Add(time.Hour)
ss.rekorKey, err = ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
if err != nil {
return nil, err
}
ss.ctlogKey, err = ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
if err != nil {
return nil, err
}
ss.signingAlgorithmDetails, err = signature.GetAlgorithmDetails(signingKeyDetails)
if err != nil {
return nil, err
}
return ss, nil
}
func NewVirtualSigstore() (*VirtualSigstore, error) {
return NewVirtualSigstoreWithSigningAlg(v1.PublicKeyDetails_PKIX_ECDSA_P256_SHA_256)
}
func NewVirtualSigstoreWithExistingRekorEntry() (*VirtualSigstore, error) {
vs, err := NewVirtualSigstore()
if err != nil {
return nil, err
}
entry := []byte("foo")
hash := sha256.Sum256(append([]byte("\000"), entry...))
vs.rekorRoot = hash[:]
return vs, nil
}
// getLogID calculates the digest of a PKIX-encoded public key
func getLogID(pub crypto.PublicKey) (string, error) {
pubBytes, err := x509.MarshalPKIXPublicKey(pub)
if err != nil {
return "", err
}
digest := sha256.Sum256(pubBytes)
return hex.EncodeToString(digest[:]), nil
}
func (ca *VirtualSigstore) RekorLogID() (string, error) {
return getLogID(ca.rekorKey.Public())
}
func (ca *VirtualSigstore) RekorSignPayload(payload tlog.RekorPayload) ([]byte, error) {
jsonPayload, err := json.Marshal(payload)
if err != nil {
return nil, err
}
canonicalized, err := jsoncanonicalizer.Transform(jsonPayload)
if err != nil {
return nil, err
}
signer, err := signature.LoadECDSASignerVerifier(ca.rekorKey, crypto.SHA256)
if err != nil {
return nil, err
}
bundleSig, err := signer.SignMessage(bytes.NewReader(canonicalized))
if err != nil {
return nil, err
}
return bundleSig, nil
}
func (ca *VirtualSigstore) GenerateLeafCert(identity, issuer string) (*x509.Certificate, crypto.PrivateKey, error) {
var privKey crypto.PrivateKey
var err error
switch ca.signingAlgorithmDetails.GetKeyType() {
case signature.ECDSA:
var curve *elliptic.Curve
curve, err = ca.signingAlgorithmDetails.GetECDSACurve()
if err != nil {
return nil, nil, err
}
privKey, err = ecdsa.GenerateKey(*curve, rand.Reader)
case signature.RSA:
var keySize signature.RSAKeySize
keySize, err = ca.signingAlgorithmDetails.GetRSAKeySize()
if err != nil {
return nil, nil, err
}
privKey, err = rsa.GenerateKey(rand.Reader, int(keySize))
}
if err != nil {
return nil, nil, err
}
leafCert, err := GenerateLeafCert(identity, issuer, time.Now(), privKey, ca.fulcioCA.Intermediates[0], ca.fulcioIntermediateKey)
if err != nil {
return nil, nil, err
}
return leafCert, privKey, nil
}
func (ca *VirtualSigstore) Attest(identity, issuer string, envelopeBody []byte) (*TestEntity, error) {
// The timing here is important. We need to attest at a time when the leaf
// certificate is valid, so we match what GenerateLeafCert() does, above
return ca.AttestAtTime(identity, issuer, envelopeBody, time.Now().Add(5*time.Minute), false)
}
func (ca *VirtualSigstore) AttestAtTime(identity, issuer string, envelopeBody []byte, integratedTime time.Time, generateInclusionProof bool) (*TestEntity, error) {
leafCert, leafPrivKey, err := ca.GenerateLeafCert(identity, issuer)
if err != nil {
return nil, err
}
signer, err := signature.LoadSignerFromAlgorithmDetails(leafPrivKey, ca.signingAlgorithmDetails)
if err != nil {
return nil, err
}
dsseSigner, err := dsse.NewEnvelopeSigner(&sigdsse.SignerAdapter{
SignatureSigner: signer,
Pub: leafCert.PublicKey.(*ecdsa.PublicKey),
})
if err != nil {
return nil, err
}
envelope, err := dsseSigner.SignPayload(context.Background(), "application/vnd.in-toto+json", envelopeBody)
if err != nil {
return nil, err
}
sig, err := base64.StdEncoding.DecodeString(envelope.Signatures[0].Sig)
if err != nil {
return nil, err
}
tsr, err := generateTimestampingResponse(sig, ca.tsaCA.Leaf, ca.tsaLeafKey)
if err != nil {
return nil, err
}
entry, err := ca.GenerateTlogEntry(leafCert, envelope, sig, integratedTime.Unix(), generateInclusionProof)
if err != nil {
return nil, err
}
return &TestEntity{
certChain: []*x509.Certificate{leafCert, ca.fulcioCA.Intermediates[0], ca.fulcioCA.Root},
timestamps: [][]byte{tsr},
envelope: envelope,
tlogEntries: []*tlog.Entry{entry},
}, nil
}
func (ca *VirtualSigstore) Sign(identity, issuer string, artifact []byte) (*TestEntity, error) {
return ca.SignAtTimeWithVersion(identity, issuer, artifact, time.Now().Add(5*time.Minute), "v0.3")
}
func (ca *VirtualSigstore) SignWithVersion(identity, issuer string, artifact []byte, version string) (*TestEntity, error) {
return ca.SignAtTimeWithVersion(identity, issuer, artifact, time.Now().Add(5*time.Minute), version)
}
func (ca *VirtualSigstore) SignAtTime(identity, issuer string, artifact []byte, integratedTime time.Time) (*TestEntity, error) {
return ca.SignAtTimeWithVersion(identity, issuer, artifact, integratedTime, "v0.3")
}
func (ca *VirtualSigstore) SignAtTimeWithVersion(identity, issuer string, artifact []byte, integratedTime time.Time, version string) (*TestEntity, error) {
leafCert, leafPrivKey, err := ca.GenerateLeafCert(identity, issuer)
if err != nil {
return nil, err
}
signer, err := signature.LoadSignerFromAlgorithmDetails(leafPrivKey, ca.signingAlgorithmDetails)
if err != nil {
return nil, err
}
hashType := ca.signingAlgorithmDetails.GetHashType()
hasher := hashType.New()
hasher.Write(artifact)
digest := hasher.Sum(nil)
var digestString string
switch hashType {
case crypto.SHA256:
digestString = "SHA2_256"
case crypto.SHA384:
digestString = "SHA2_384"
case crypto.SHA512:
digestString = "SHA2_512"
}
sig, err := signer.SignMessage(bytes.NewReader(artifact))
if err != nil {
return nil, err
}
tsr, err := generateTimestampingResponse(sig, ca.tsaCA.Leaf, ca.tsaLeafKey)
if err != nil {
return nil, err
}
entry, err := ca.generateTlogEntryHashedRekord(leafCert, artifact, sig, integratedTime.Unix())
if err != nil {
return nil, err
}
return &TestEntity{
certChain: []*x509.Certificate{leafCert, ca.fulcioCA.Intermediates[0], ca.fulcioCA.Root},
timestamps: [][]byte{tsr},
messageSignature: bundle.NewMessageSignature(digest, digestString, sig),
tlogEntries: []*tlog.Entry{entry},
version: version,
}, nil
}
func (ca *VirtualSigstore) GenerateTlogEntry(leafCert *x509.Certificate, envelope *dsse.Envelope, sig []byte, integratedTime int64, generateInclusionProof bool) (*tlog.Entry, error) {
leafCertPem, err := cryptoutils.MarshalCertificateToPEM(leafCert)
if err != nil {
return nil, err
}
envelopeBytes, err := json.Marshal(envelope)
if err != nil {
return nil, err
}
rekorBody, err := generateRekorEntry(intoto.KIND, intoto.New().DefaultVersion(), envelopeBytes, leafCertPem, sig, ca.signingAlgorithmDetails)
if err != nil {
return nil, err
}
rekorLogID, err := getLogID(ca.rekorKey.Public())
if err != nil {
return nil, err
}
rekorLogIDRaw, err := hex.DecodeString(rekorLogID)
if err != nil {
return nil, err
}
logIndex := int64(0)
b := createRekorBundle(rekorLogID, integratedTime, logIndex, rekorBody)
set, err := ca.RekorSignPayload(*b)
if err != nil {
return nil, err
}
rekorBodyRaw, err := base64.StdEncoding.DecodeString(rekorBody)
if err != nil {
return nil, err
}
var inclusionProof *models.InclusionProof
if generateInclusionProof {
inclusionProof, err = ca.GetInclusionProof(rekorBodyRaw)
if err != nil {
return nil, err
}
}
return tlog.NewEntry(rekorBodyRaw, integratedTime, logIndex, rekorLogIDRaw, set, inclusionProof) //nolint:staticcheck
}
func (ca *VirtualSigstore) GetInclusionProof(rekorBodyRaw []byte) (*models.InclusionProof, error) {
signer, err := signature.LoadECDSASignerVerifier(ca.rekorKey, crypto.SHA256)
if err != nil {
return nil, err
}
hash := sha256.Sum256(append([]byte("\000"), rekorBodyRaw...))
rootHash := hash
var pathHash []byte
if ca.rekorRoot != nil {
rootHash = sha256.Sum256(append([]byte("\001"), append(hash[:], ca.rekorRoot...)...))
pathHash = ca.rekorRoot
}
encodedRootHash := hex.EncodeToString(rootHash[:])
var pathHashes []string
if pathHash != nil {
pathHashes = make([]string, 1)
pathHashes[0] = hex.EncodeToString(pathHash)
}
scBytes, err := util.CreateAndSignCheckpoint(context.Background(), "rekor.localhost", int64(123), uint64(42), rootHash[:], signer)
if err != nil {
return nil, err
}
return &models.InclusionProof{
TreeSize: conv.Pointer(int64(len(pathHashes) + 1)),
RootHash: &encodedRootHash,
LogIndex: conv.Pointer(int64(len(pathHashes))),
Hashes: pathHashes,
Checkpoint: conv.Pointer(string(scBytes)),
}, nil
}
func (ca *VirtualSigstore) generateTlogEntryHashedRekord(leafCert *x509.Certificate, artifact []byte, sig []byte, integratedTime int64) (*tlog.Entry, error) {
leafCertPem, err := cryptoutils.MarshalCertificateToPEM(leafCert)
if err != nil {
return nil, err
}
rekorBody, err := generateRekorEntry(hashedrekord.KIND, hashedrekord.New().DefaultVersion(), artifact, leafCertPem, sig, ca.signingAlgorithmDetails)
if err != nil {
return nil, err
}
rekorLogID, err := getLogID(ca.rekorKey.Public())
if err != nil {
return nil, err
}
rekorLogIDRaw, err := hex.DecodeString(rekorLogID)
if err != nil {
return nil, err
}
logIndex := int64(1000)
b := createRekorBundle(rekorLogID, integratedTime, logIndex, rekorBody)
set, err := ca.RekorSignPayload(*b)
if err != nil {
return nil, err
}
rekorBodyRaw, err := base64.StdEncoding.DecodeString(rekorBody)
if err != nil {
return nil, err
}
return tlog.NewEntry(rekorBodyRaw, integratedTime, logIndex, rekorLogIDRaw, set, nil) //nolint:staticcheck
}
func (ca *VirtualSigstore) PublicKeyVerifier(keyID string) (root.TimeConstrainedVerifier, error) {
v, ok := ca.publicKeyVerifier[keyID]
if !ok {
return nil, fmt.Errorf("public key not found for keyID: %s", keyID)
}
return v, nil
}
func generateRekorEntry(kind, version string, artifact []byte, cert []byte, sig []byte, algorithmDetails signature.AlgorithmDetails) (string, error) {
// Generate the Rekor Entry
entryImpl, err := createEntry(context.Background(), kind, version, artifact, cert, sig, algorithmDetails)
if err != nil {
return "", err
}
entryBytes, err := entryImpl.Canonicalize(context.Background())
if err != nil {
return "", err
}
return base64.StdEncoding.EncodeToString(entryBytes), nil
}
func createEntry(ctx context.Context, kind, apiVersion string, blobBytes, certBytes, sigBytes []byte, algorithmDetails signature.AlgorithmDetails) (types.EntryImpl, error) {
props := types.ArtifactProperties{
PublicKeyBytes: [][]byte{certBytes},
PKIFormat: string(pki.X509),
}
switch kind {
case rekord.KIND, intoto.KIND:
props.ArtifactBytes = blobBytes
props.SignatureBytes = sigBytes
case hashedrekord.KIND:
hashType := algorithmDetails.GetHashType()
hasher := hashType.New()
hasher.Write(blobBytes)
blobHash := hasher.Sum(nil)
props.ArtifactHash = strings.ToLower(hex.EncodeToString(blobHash))
props.SignatureBytes = sigBytes
default:
return nil, fmt.Errorf("unexpected entry kind: %s", kind)
}
proposedEntry, err := types.NewProposedEntry(ctx, kind, apiVersion, props)
if err != nil {
return nil, err
}
eimpl, err := types.CreateVersionedEntry(proposedEntry)
if err != nil {
return nil, err
}
can, err := types.CanonicalizeEntry(ctx, eimpl)
if err != nil {
return nil, err
}
proposedEntryCan, err := models.UnmarshalProposedEntry(bytes.NewReader(can), runtime.JSONConsumer())
if err != nil {
return nil, err
}
return types.UnmarshalEntry(proposedEntryCan)
}
func createRekorBundle(logID string, integratedTime int64, logIndex int64, rekorEntry string) *tlog.RekorPayload {
return &tlog.RekorPayload{
LogID: logID,
IntegratedTime: integratedTime,
LogIndex: logIndex,
Body: rekorEntry,
}
}
func (ca *VirtualSigstore) TimestampResponse(sig []byte) ([]byte, error) {
return generateTimestampingResponse(sig, ca.tsaCA.Leaf, ca.tsaLeafKey)
}
func generateTimestampingResponse(sig []byte, tsaCert *x509.Certificate, tsaKey *ecdsa.PrivateKey) ([]byte, error) {
var hash crypto.Hash
switch tsaKey.Curve {
case elliptic.P256():
hash = crypto.SHA256
case elliptic.P384():
hash = crypto.SHA384
case elliptic.P521():
hash = crypto.SHA512
}
tsq, err := timestamp.CreateRequest(bytes.NewReader(sig), ×tamp.RequestOptions{
Hash: hash,
})
if err != nil {
return nil, err
}
req, err := timestamp.ParseRequest([]byte(tsq))
if err != nil {
return nil, err
}
tsTemplate := timestamp.Timestamp{
HashAlgorithm: req.HashAlgorithm,
HashedMessage: req.HashedMessage,
Time: time.Now(),
Policy: asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 2},
Ordering: false,
Qualified: false,
ExtraExtensions: req.Extensions,
}
return tsTemplate.CreateResponseWithOpts(tsaCert, tsaKey, hash)
}
func (ca *VirtualSigstore) TimestampingAuthorities() []root.TimestampingAuthority {
return []root.TimestampingAuthority{ca.tsaCA}
}
func (ca *VirtualSigstore) FulcioCertificateAuthorities() []root.CertificateAuthority {
return []root.CertificateAuthority{ca.fulcioCA}
}
func (ca *VirtualSigstore) RekorLogs() map[string]*root.TransparencyLog {
verifiers := make(map[string]*root.TransparencyLog)
logID, err := getLogID(ca.rekorKey.Public())
if err != nil {
panic(err)
}
verifiers[logID] = &root.TransparencyLog{
BaseURL: "test",
ID: []byte(logID),
ValidityPeriodStart: time.Now().Add(-time.Hour),
ValidityPeriodEnd: time.Now().Add(time.Hour),
HashFunc: crypto.SHA256,
PublicKey: ca.rekorKey.Public(),
SignatureHashFunc: crypto.SHA256,
}
return verifiers
}
func (ca *VirtualSigstore) CTLogs() map[string]*root.TransparencyLog {
verifiers := make(map[string]*root.TransparencyLog)
logID, err := getLogID(ca.ctlogKey.Public())
if err != nil {
panic(err)
}
verifiers[logID] = &root.TransparencyLog{
BaseURL: "test",
ID: []byte(logID),
ValidityPeriodStart: time.Now().Add(-time.Hour),
ValidityPeriodEnd: time.Now().Add(time.Hour),
HashFunc: crypto.SHA256,
PublicKey: ca.ctlogKey.Public(),
}
return verifiers
}
type TestEntity struct {
certChain []*x509.Certificate
envelope *dsse.Envelope
messageSignature *bundle.MessageSignature
timestamps [][]byte
tlogEntries []*tlog.Entry
version string
}
func (e *TestEntity) VerificationContent() (verify.VerificationContent, error) {
return bundle.NewCertificate(e.certChain[0]), nil
}
func (e *TestEntity) HasInclusionPromise() bool {
return true
}
func (e *TestEntity) Version() (string, error) {
return e.version, nil
}
func (e *TestEntity) HasInclusionProof() bool {
for _, tlog := range e.tlogEntries {
if tlog.HasInclusionProof() {
return true
}
}
return false
}
func (e *TestEntity) SignatureContent() (verify.SignatureContent, error) {
if e.envelope != nil {
return &bundle.Envelope{Envelope: e.envelope}, nil
}
return e.messageSignature, nil
}
func (e *TestEntity) Timestamps() ([][]byte, error) {
return e.timestamps, nil
}
func (e *TestEntity) TlogEntries() ([]*tlog.Entry, error) {
return e.tlogEntries, nil
}
// Much of the following code is adapted from cosign/test/cert_utils.go
func createCertificate(template *x509.Certificate, parent *x509.Certificate, pub interface{}, priv crypto.Signer) (*x509.Certificate, error) {
certBytes, err := x509.CreateCertificate(rand.Reader, template, parent, pub, priv)
if err != nil {
return nil, err
}
cert, err := x509.ParseCertificate(certBytes)
if err != nil {
return nil, err
}
return cert, nil
}
func GenerateRootCa() (*x509.Certificate, *ecdsa.PrivateKey, error) {
rootTemplate := &x509.Certificate{
SerialNumber: big.NewInt(1),
Subject: pkix.Name{
CommonName: "sigstore",
Organization: []string{"sigstore.dev"},
},
NotBefore: time.Now().Add(-5 * time.Hour),
NotAfter: time.Now().Add(5 * time.Hour),
KeyUsage: x509.KeyUsageCertSign | x509.KeyUsageCRLSign,
BasicConstraintsValid: true,
IsCA: true,
}
priv, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
if err != nil {
return nil, nil, err
}
cert, err := createCertificate(rootTemplate, rootTemplate, &priv.PublicKey, priv)
if err != nil {
return nil, nil, err
}
return cert, priv, nil
}
func GenerateFulcioIntermediate(rootTemplate *x509.Certificate, rootPriv crypto.Signer) (*x509.Certificate, *ecdsa.PrivateKey, error) {
subTemplate := &x509.Certificate{
SerialNumber: big.NewInt(1),
Subject: pkix.Name{
CommonName: "sigstore-intermediate",
Organization: []string{"sigstore.dev"},
},
NotBefore: time.Now().Add(-2 * time.Minute),
NotAfter: time.Now().Add(2 * time.Hour),
KeyUsage: x509.KeyUsageCertSign | x509.KeyUsageCRLSign,
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageCodeSigning},
BasicConstraintsValid: true,
IsCA: true,
}
priv, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
if err != nil {
return nil, nil, err
}
cert, err := createCertificate(subTemplate, rootTemplate, &priv.PublicKey, rootPriv)
if err != nil {
return nil, nil, err
}
return cert, priv, nil
}
func GenerateTSAIntermediate(rootTemplate *x509.Certificate, rootPriv crypto.Signer) (*x509.Certificate, *ecdsa.PrivateKey, error) {
subTemplate := &x509.Certificate{
SerialNumber: big.NewInt(1),
Subject: pkix.Name{
CommonName: "sigstore-tsa-intermediate",
Organization: []string{"sigstore.dev"},
},
NotBefore: time.Now().Add(-2 * time.Minute),
NotAfter: time.Now().Add(2 * time.Hour),
KeyUsage: x509.KeyUsageCertSign | x509.KeyUsageCRLSign,
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageTimeStamping},
BasicConstraintsValid: true,
IsCA: true,
}
priv, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader)
if err != nil {
return nil, nil, err
}
cert, err := createCertificate(subTemplate, rootTemplate, &priv.PublicKey, rootPriv)
if err != nil {
return nil, nil, err
}
return cert, priv, nil
}
func GenerateLeafCert(subject string, oidcIssuer string, expiration time.Time, priv crypto.PrivateKey,
parentTemplate *x509.Certificate, parentPriv crypto.Signer) (*x509.Certificate, error) {
certTemplate := &x509.Certificate{
SerialNumber: big.NewInt(1),
EmailAddresses: []string{subject},
NotBefore: expiration,
NotAfter: expiration.Add(10 * time.Minute),
KeyUsage: x509.KeyUsageDigitalSignature,
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageCodeSigning},
IsCA: false,
ExtraExtensions: []pkix.Extension{{
// OID for OIDC Issuer extension
Id: asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 57264, 1, 1},
Critical: false,
Value: []byte(oidcIssuer),
},
},
}
signer, ok := priv.(crypto.Signer)
if !ok {
return nil, fmt.Errorf("private key does not implement crypto.Signer")
}
cert, err := createCertificate(certTemplate, parentTemplate, signer.Public(), parentPriv)
if err != nil {
return nil, err
}
return cert, nil
}
func GenerateTSALeafCert(expiration time.Time, priv *ecdsa.PrivateKey, parentTemplate *x509.Certificate, parentPriv crypto.Signer) (*x509.Certificate, error) {
timestampExt, err := asn1.Marshal([]asn1.ObjectIdentifier{tsx509.EKUTimestampingOID})
if err != nil {
return nil, err
}
certTemplate := &x509.Certificate{
SerialNumber: big.NewInt(1),
NotBefore: expiration,
NotAfter: expiration.Add(10 * time.Minute),
KeyUsage: x509.KeyUsageDigitalSignature,
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageTimeStamping},
IsCA: false,
// set EKU to x509.ExtKeyUsageTimeStamping but with a critical bit
ExtraExtensions: []pkix.Extension{
{
Id: asn1.ObjectIdentifier{2, 5, 29, 37},
Critical: true,
Value: timestampExt,
},
},
}
cert, err := createCertificate(certTemplate, parentTemplate, &priv.PublicKey, parentPriv)
if err != nil {
return nil, err
}
return cert, nil
}
// Copyright 2023 The Sigstore Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tlog
import (
"bytes"
"context"
"crypto/ecdsa"
"crypto/sha256"
"crypto/x509"
"encoding/base64"
"encoding/hex"
"encoding/json"
"encoding/pem"
"errors"
"fmt"
"time"
"github.com/cyberphone/json-canonicalization/go/src/webpki.org/jsoncanonicalizer"
"github.com/go-openapi/runtime"
"github.com/go-openapi/strfmt"
"github.com/go-openapi/swag/conv"
protocommon "github.com/sigstore/protobuf-specs/gen/pb-go/common/v1"
v1 "github.com/sigstore/protobuf-specs/gen/pb-go/rekor/v1"
rekortilespb "github.com/sigstore/rekor-tiles/v2/pkg/generated/protobuf"
"github.com/sigstore/rekor-tiles/v2/pkg/note"
typesverifier "github.com/sigstore/rekor-tiles/v2/pkg/types/verifier"
"github.com/sigstore/rekor-tiles/v2/pkg/verify"
"github.com/sigstore/rekor/pkg/generated/models"
"github.com/sigstore/rekor/pkg/types"
dsse_v001 "github.com/sigstore/rekor/pkg/types/dsse/v0.0.1"
hashedrekord_v001 "github.com/sigstore/rekor/pkg/types/hashedrekord/v0.0.1"
intoto_v002 "github.com/sigstore/rekor/pkg/types/intoto/v0.0.2"
rekorVerify "github.com/sigstore/rekor/pkg/verify"
"github.com/sigstore/sigstore/pkg/signature"
"google.golang.org/protobuf/encoding/protojson"
"github.com/sigstore/sigstore-go/pkg/root"
)
type Entry struct {
kind string
version string
rekorV1Entry types.EntryImpl
rekorV2Entry *rekortilespb.Entry
signedEntryTimestamp []byte
tle *v1.TransparencyLogEntry
}
type RekorPayload struct {
Body interface{} `json:"body"`
IntegratedTime int64 `json:"integratedTime"`
LogIndex int64 `json:"logIndex"`
LogID string `json:"logID"` //nolint:tagliatelle
}
var ErrNilValue = errors.New("validation error: nil value in transaction log entry")
var ErrInvalidRekorV2Entry = errors.New("type error: object is not a Rekor v2 type, try parsing as Rekor v1")
// Deprecated: use NewTlogEntry. NewEntry only parses a Rekor v1 entry.
func NewEntry(body []byte, integratedTime int64, logIndex int64, logID []byte, signedEntryTimestamp []byte, inclusionProof *models.InclusionProof) (*Entry, error) {
pe, err := models.UnmarshalProposedEntry(bytes.NewReader(body), runtime.JSONConsumer())
if err != nil {
return nil, err
}
rekorEntry, err := types.UnmarshalEntry(pe)
if err != nil {
return nil, err
}
entry := &Entry{
rekorV1Entry: rekorEntry,
tle: &v1.TransparencyLogEntry{
LogIndex: logIndex,
LogId: &protocommon.LogId{
KeyId: logID,
},
IntegratedTime: integratedTime,
CanonicalizedBody: body,
},
kind: pe.Kind(),
version: rekorEntry.APIVersion(),
}
if len(signedEntryTimestamp) > 0 {
entry.signedEntryTimestamp = signedEntryTimestamp
}
if inclusionProof != nil {
hashes := make([][]byte, len(inclusionProof.Hashes))
for i, s := range inclusionProof.Hashes {
hashes[i], err = hex.DecodeString(s)
if err != nil {
return nil, err
}
}
rootHashDec, err := hex.DecodeString(*inclusionProof.RootHash)
if err != nil {
return nil, err
}
entry.tle.InclusionProof = &v1.InclusionProof{
LogIndex: logIndex,
RootHash: rootHashDec,
TreeSize: *inclusionProof.TreeSize,
Hashes: hashes,
Checkpoint: &v1.Checkpoint{
Envelope: *inclusionProof.Checkpoint,
},
}
}
return entry, nil
}
func NewTlogEntry(tle *v1.TransparencyLogEntry) (*Entry, error) {
var rekorV2Entry *rekortilespb.Entry
var rekorV1Entry types.EntryImpl
var err error
body := tle.CanonicalizedBody
rekorV2Entry, err = unmarshalRekorV2Entry(body)
if err != nil {
rekorV1Entry, err = unmarshalRekorV1Entry(body)
if err != nil {
return nil, fmt.Errorf("entry body is not a recognizable Rekor v1 or Rekor v2 type: %w", err)
}
}
entry := &Entry{
rekorV1Entry: rekorV1Entry,
rekorV2Entry: rekorV2Entry,
kind: tle.KindVersion.Kind,
version: tle.KindVersion.Version,
}
signedEntryTimestamp := []byte{}
if tle.InclusionPromise != nil && tle.InclusionPromise.SignedEntryTimestamp != nil {
signedEntryTimestamp = tle.InclusionPromise.SignedEntryTimestamp
}
if len(signedEntryTimestamp) > 0 {
entry.signedEntryTimestamp = signedEntryTimestamp
}
entry.tle = tle
return entry, nil
}
func ParseTransparencyLogEntry(tle *v1.TransparencyLogEntry) (*Entry, error) {
if tle == nil {
return nil, ErrNilValue
}
if tle.CanonicalizedBody == nil ||
tle.LogIndex < 0 ||
tle.LogId == nil ||
tle.LogId.KeyId == nil ||
tle.KindVersion == nil {
return nil, ErrNilValue
}
if tle.InclusionProof != nil {
if tle.InclusionProof.Checkpoint == nil {
return nil, fmt.Errorf("inclusion proof missing required checkpoint")
}
if tle.InclusionProof.Checkpoint.Envelope == "" {
return nil, fmt.Errorf("inclusion proof checkpoint empty")
}
}
entry, err := NewTlogEntry(tle)
if err != nil {
return nil, err
}
if entry.kind != tle.KindVersion.Kind || entry.version != tle.KindVersion.Version {
return nil, fmt.Errorf("kind and version mismatch: %s/%s != %s/%s", entry.kind, entry.version, tle.KindVersion.Kind, tle.KindVersion.Version)
}
return entry, nil
}
// Deprecated: use ParseTransparencyLogEntry. ParseEntry only parses Rekor v1 type entries.
// ParseEntry decodes the entry bytes to a specific entry type (types.EntryImpl).
func ParseEntry(protoEntry *v1.TransparencyLogEntry) (entry *Entry, err error) {
if protoEntry == nil ||
protoEntry.CanonicalizedBody == nil ||
protoEntry.IntegratedTime == 0 ||
protoEntry.LogIndex < 0 ||
protoEntry.LogId == nil ||
protoEntry.LogId.KeyId == nil ||
protoEntry.KindVersion == nil {
return nil, ErrNilValue
}
signedEntryTimestamp := []byte{}
if protoEntry.InclusionPromise != nil && protoEntry.InclusionPromise.SignedEntryTimestamp != nil {
signedEntryTimestamp = protoEntry.InclusionPromise.SignedEntryTimestamp
}
var inclusionProof *models.InclusionProof
if protoEntry.InclusionProof != nil {
var hashes []string
for _, v := range protoEntry.InclusionProof.Hashes {
hashes = append(hashes, hex.EncodeToString(v))
}
rootHash := hex.EncodeToString(protoEntry.InclusionProof.RootHash)
if protoEntry.InclusionProof.Checkpoint == nil {
return nil, fmt.Errorf("inclusion proof missing required checkpoint")
}
if protoEntry.InclusionProof.Checkpoint.Envelope == "" {
return nil, fmt.Errorf("inclusion proof checkpoint empty")
}
inclusionProof = &models.InclusionProof{
LogIndex: conv.Pointer(protoEntry.InclusionProof.LogIndex),
RootHash: &rootHash,
TreeSize: conv.Pointer(protoEntry.InclusionProof.TreeSize),
Hashes: hashes,
Checkpoint: conv.Pointer(protoEntry.InclusionProof.Checkpoint.Envelope),
}
}
entry, err = NewEntry(protoEntry.CanonicalizedBody, protoEntry.IntegratedTime, protoEntry.LogIndex, protoEntry.LogId.KeyId, signedEntryTimestamp, inclusionProof)
if err != nil {
return nil, err
}
if entry.kind != protoEntry.KindVersion.Kind || entry.version != protoEntry.KindVersion.Version {
return nil, fmt.Errorf("kind and version mismatch: %s/%s != %s/%s", entry.kind, entry.version, protoEntry.KindVersion.Kind, protoEntry.KindVersion.Version)
}
entry.tle = protoEntry
return entry, nil
}
func ValidateEntry(entry *Entry) error {
if entry.rekorV1Entry != nil {
switch e := entry.rekorV1Entry.(type) {
case *dsse_v001.V001Entry:
err := e.DSSEObj.Validate(strfmt.Default)
if err != nil {
return err
}
case *hashedrekord_v001.V001Entry:
err := e.HashedRekordObj.Validate(strfmt.Default)
if err != nil {
return err
}
case *intoto_v002.V002Entry:
err := e.IntotoObj.Validate(strfmt.Default)
if err != nil {
return err
}
default:
return fmt.Errorf("unsupported entry type: %T", e)
}
}
if entry.rekorV2Entry != nil {
switch e := entry.rekorV2Entry.GetSpec().GetSpec().(type) {
case *rekortilespb.Spec_HashedRekordV002:
err := validateHashedRekordV002Entry(e.HashedRekordV002)
if err != nil {
return err
}
case *rekortilespb.Spec_DsseV002:
err := validateDSSEV002Entry(e.DsseV002)
if err != nil {
return err
}
}
}
return nil
}
func validateHashedRekordV002Entry(hr *rekortilespb.HashedRekordLogEntryV002) error {
if hr.GetSignature() == nil || len(hr.GetSignature().GetContent()) == 0 {
return fmt.Errorf("missing signature")
}
if hr.GetSignature().GetVerifier() == nil {
return fmt.Errorf("missing verifier")
}
if hr.GetData() == nil {
return fmt.Errorf("missing digest")
}
return typesverifier.Validate(hr.GetSignature().GetVerifier())
}
func validateDSSEV002Entry(d *rekortilespb.DSSELogEntryV002) error {
if d.GetPayloadHash() == nil {
return fmt.Errorf("missing payload")
}
if len(d.GetSignatures()) == 0 {
return fmt.Errorf("missing signatures")
}
return typesverifier.Validate(d.GetSignatures()[0].GetVerifier())
}
func (entry *Entry) IntegratedTime() time.Time {
if entry.tle.IntegratedTime == 0 {
return time.Time{}
}
return time.Unix(entry.tle.IntegratedTime, 0)
}
func (entry *Entry) Signature() []byte {
if entry.rekorV1Entry != nil {
switch e := entry.rekorV1Entry.(type) {
case *dsse_v001.V001Entry:
sigBytes, err := base64.StdEncoding.DecodeString(*e.DSSEObj.Signatures[0].Signature)
if err != nil {
return []byte{}
}
return sigBytes
case *hashedrekord_v001.V001Entry:
return e.HashedRekordObj.Signature.Content
case *intoto_v002.V002Entry:
sigBytes, err := base64.StdEncoding.DecodeString(string(*e.IntotoObj.Content.Envelope.Signatures[0].Sig))
if err != nil {
return []byte{}
}
return sigBytes
}
}
if entry.rekorV2Entry != nil {
switch e := entry.rekorV2Entry.GetSpec().GetSpec().(type) {
case *rekortilespb.Spec_HashedRekordV002:
return e.HashedRekordV002.GetSignature().GetContent()
case *rekortilespb.Spec_DsseV002:
return e.DsseV002.GetSignatures()[0].GetContent()
}
}
return []byte{}
}
func (entry *Entry) PublicKey() any {
var pk any
var certBytes []byte
if entry.rekorV1Entry != nil {
var pemString []byte
switch e := entry.rekorV1Entry.(type) {
case *dsse_v001.V001Entry:
pemString = []byte(*e.DSSEObj.Signatures[0].Verifier)
case *hashedrekord_v001.V001Entry:
pemString = []byte(e.HashedRekordObj.Signature.PublicKey.Content)
case *intoto_v002.V002Entry:
pemString = []byte(*e.IntotoObj.Content.Envelope.Signatures[0].PublicKey)
}
certBlock, _ := pem.Decode(pemString)
certBytes = certBlock.Bytes
} else if entry.rekorV2Entry != nil {
var verifier *rekortilespb.Verifier
switch e := entry.rekorV2Entry.GetSpec().GetSpec().(type) {
case *rekortilespb.Spec_HashedRekordV002:
verifier = e.HashedRekordV002.GetSignature().GetVerifier()
case *rekortilespb.Spec_DsseV002:
verifier = e.DsseV002.GetSignatures()[0].GetVerifier()
}
switch verifier.Verifier.(type) {
case *rekortilespb.Verifier_PublicKey:
certBytes = verifier.GetPublicKey().GetRawBytes()
case *rekortilespb.Verifier_X509Certificate:
certBytes = verifier.GetX509Certificate().GetRawBytes()
}
}
var err error
pk, err = x509.ParseCertificate(certBytes)
if err != nil {
pk, err = x509.ParsePKIXPublicKey(certBytes)
if err != nil {
return nil
}
}
return pk
}
func (entry *Entry) LogKeyID() string {
return string(entry.tle.GetLogId().GetKeyId())
}
func (entry *Entry) LogIndex() int64 {
return entry.tle.GetLogIndex()
}
func (entry *Entry) Body() any {
return base64.StdEncoding.EncodeToString(entry.tle.CanonicalizedBody)
}
func (entry *Entry) HasInclusionPromise() bool {
return entry.signedEntryTimestamp != nil
}
func (entry *Entry) HasInclusionProof() bool {
return entry.tle.InclusionProof != nil
}
func (entry *Entry) TransparencyLogEntry() *v1.TransparencyLogEntry {
return entry.tle
}
// VerifyInclusion verifies a Rekor v1-style checkpoint and the entry's inclusion in the Rekor v1 log.
func VerifyInclusion(entry *Entry, verifier signature.Verifier) error {
hashes := make([]string, len(entry.tle.InclusionProof.Hashes))
for i, b := range entry.tle.InclusionProof.Hashes {
hashes[i] = hex.EncodeToString(b)
}
rootHash := hex.EncodeToString(entry.tle.GetInclusionProof().GetRootHash())
logEntry := models.LogEntryAnon{
IntegratedTime: &entry.tle.IntegratedTime,
LogID: conv.Pointer(string(entry.tle.GetLogId().KeyId)),
LogIndex: conv.Pointer(entry.tle.GetInclusionProof().GetLogIndex()),
Body: base64.StdEncoding.EncodeToString(entry.tle.GetCanonicalizedBody()),
Verification: &models.LogEntryAnonVerification{
InclusionProof: &models.InclusionProof{
Checkpoint: conv.Pointer(entry.tle.GetInclusionProof().GetCheckpoint().GetEnvelope()),
Hashes: hashes,
LogIndex: conv.Pointer(entry.tle.GetInclusionProof().GetLogIndex()),
RootHash: &rootHash,
TreeSize: conv.Pointer(entry.tle.GetInclusionProof().GetTreeSize()),
},
SignedEntryTimestamp: strfmt.Base64(entry.signedEntryTimestamp),
},
}
err := rekorVerify.VerifyInclusion(context.Background(), &logEntry)
if err != nil {
return err
}
err = rekorVerify.VerifyCheckpointSignature(&logEntry, verifier)
if err != nil {
return err
}
return nil
}
// VerifyCheckpointAndInclusion verifies a checkpoint and the entry's inclusion in the transparency log.
// This function is compatible with Rekor v1 and Rekor v2.
func VerifyCheckpointAndInclusion(entry *Entry, verifier signature.Verifier, origin string) error {
noteVerifier, err := note.NewNoteVerifier(origin, verifier)
if err != nil {
return fmt.Errorf("loading note verifier: %w", err)
}
err = verify.VerifyLogEntry(entry.TransparencyLogEntry(), noteVerifier)
if err != nil {
return fmt.Errorf("verifying log entry: %w", err)
}
return nil
}
func VerifySET(entry *Entry, verifiers map[string]*root.TransparencyLog) error {
if entry.rekorV1Entry == nil {
return fmt.Errorf("can only verify SET for Rekor v1 entry")
}
rekorPayload := RekorPayload{
Body: entry.Body(),
IntegratedTime: entry.tle.IntegratedTime,
LogIndex: entry.LogIndex(),
LogID: hex.EncodeToString([]byte(entry.LogKeyID())),
}
verifier, ok := verifiers[hex.EncodeToString([]byte(entry.LogKeyID()))]
if !ok {
return errors.New("rekor log public key not found for payload")
}
if verifier.ValidityPeriodStart.IsZero() {
return errors.New("rekor validity period start time not set")
}
if (verifier.ValidityPeriodStart.After(entry.IntegratedTime())) ||
(!verifier.ValidityPeriodEnd.IsZero() && verifier.ValidityPeriodEnd.Before(entry.IntegratedTime())) {
return errors.New("rekor log public key not valid at payload integrated time")
}
contents, err := json.Marshal(rekorPayload)
if err != nil {
return fmt.Errorf("marshaling: %w", err)
}
canonicalized, err := jsoncanonicalizer.Transform(contents)
if err != nil {
return fmt.Errorf("canonicalizing: %w", err)
}
hash := sha256.Sum256(canonicalized)
if ecdsaPublicKey, ok := verifier.PublicKey.(*ecdsa.PublicKey); !ok {
return fmt.Errorf("unsupported public key type: %T", verifier.PublicKey)
} else if !ecdsa.VerifyASN1(ecdsaPublicKey, hash[:], entry.signedEntryTimestamp) {
return errors.New("unable to verify SET")
}
return nil
}
func unmarshalRekorV1Entry(body []byte) (types.EntryImpl, error) {
pe, err := models.UnmarshalProposedEntry(bytes.NewReader(body), runtime.JSONConsumer())
if err != nil {
return nil, err
}
rekorEntry, err := types.UnmarshalEntry(pe)
if err != nil {
return nil, err
}
return rekorEntry, nil
}
func unmarshalRekorV2Entry(body []byte) (*rekortilespb.Entry, error) {
logEntryBody := rekortilespb.Entry{}
err := protojson.Unmarshal(body, &logEntryBody)
if err != nil {
return nil, ErrInvalidRekorV2Entry
}
return &logEntryBody, nil
}
// Copyright 2023 The Sigstore Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tuf
import (
"fmt"
"path/filepath"
"strings"
"time"
"github.com/theupdateframework/go-tuf/v2/metadata/config"
"github.com/theupdateframework/go-tuf/v2/metadata/fetcher"
"github.com/theupdateframework/go-tuf/v2/metadata/updater"
"github.com/sigstore/sigstore-go/pkg/util"
)
// Client is a Sigstore TUF client
type Client struct {
cfg *config.UpdaterConfig
up *updater.Updater
opts *Options
}
// New returns a new client with custom options
func New(opts *Options) (*Client, error) {
var c = Client{
opts: opts,
}
dir := filepath.Join(opts.CachePath, URLToPath(opts.RepositoryBaseURL))
var err error
if c.cfg, err = config.New(opts.RepositoryBaseURL, opts.Root); err != nil {
return nil, fmt.Errorf("failed to create TUF client: %w", err)
}
c.cfg.LocalMetadataDir = dir
c.cfg.LocalTargetsDir = filepath.Join(dir, "targets")
c.cfg.DisableLocalCache = c.opts.DisableLocalCache
c.cfg.PrefixTargetsWithHash = !c.opts.DisableConsistentSnapshot
if c.cfg.DisableLocalCache {
c.opts.CachePath = ""
c.opts.CacheValidity = 0
c.opts.ForceCache = false
}
if opts.Fetcher != nil {
c.cfg.Fetcher = opts.Fetcher
} else {
fetcher := fetcher.NewDefaultFetcher()
fetcher.SetHTTPUserAgent(util.ConstructUserAgent())
c.cfg.Fetcher = fetcher
}
// Upon client creation, we may not perform a full TUF update,
// based on the cache control configuration. Start with a local
// client (only reads content on disk) and then decide if we
// must perform a full TUF update.
tmpCfg := *c.cfg
// Create a temporary config for the first use where UnsafeLocalMode
// is true. This means that when we first initialize the client,
// we are guaranteed to only read the metadata on disk.
// Based on that metadata we take a decision if a full TUF
// refresh should be done or not. As so, the tmpCfg is only needed
// here and not in future invocations.
tmpCfg.UnsafeLocalMode = true
c.up, err = updater.New(&tmpCfg)
if err != nil {
return nil, fmt.Errorf("failed to create initial TUF updater: %w", err)
}
if err = c.loadMetadata(); err != nil {
return nil, fmt.Errorf("failed to load metadata: %w", err)
}
return &c, nil
}
// DefaultClient returns a Sigstore TUF client for the public good instance
func DefaultClient() (*Client, error) {
opts := DefaultOptions()
return New(opts)
}
// loadMetadata controls if the client actually should perform a TUF refresh.
// The TUF specification mandates so, but for certain Sigstore clients, it
// may be beneficial to rely on the cache, or in air-gapped deployments it
// it may not even be possible.
func (c *Client) loadMetadata() error {
// Load the metadata into memory and verify it
if err := c.up.Refresh(); err != nil {
// this is most likely due to the lack of metadata files
// on disk. Perform a full update and return.
return c.Refresh()
}
if c.opts.ForceCache {
return nil
} else if c.opts.CacheValidity > 0 {
cfg, err := LoadConfig(c.configPath())
if err != nil {
// Config may not exist, don't error
// create a new empty config
cfg = &Config{}
}
cacheValidUntil := cfg.LastTimestamp.AddDate(0, 0, c.opts.CacheValidity)
if time.Now().Before(cacheValidUntil) {
// No need to update
return nil
}
}
return c.Refresh()
}
func (c *Client) configPath() string {
var p = filepath.Join(
c.opts.CachePath,
fmt.Sprintf("%s.json", URLToPath(c.opts.RepositoryBaseURL)),
)
return p
}
// Refresh forces a refresh of the underlying TUF client.
// As the tuf client updater does not support multiple refreshes during
// its life-time, this will replace the TUF client updater with a new one.
func (c *Client) Refresh() error {
var err error
c.up, err = updater.New(c.cfg)
if err != nil {
return fmt.Errorf("failed to create tuf updater: %w", err)
}
err = c.up.Refresh()
if err != nil {
return fmt.Errorf("tuf refresh failed: %w", err)
}
// If cache is disabled, we don't need to persist the last timestamp
if c.cfg.DisableLocalCache {
return nil
}
// Update config with last update
cfg, err := LoadConfig(c.configPath())
if err != nil {
// Likely config file did not exit, create it
cfg = &Config{}
}
cfg.LastTimestamp = time.Now()
// ignore error writing update config file
_ = cfg.Persist(c.configPath())
return nil
}
// GetTarget returns a target file from the TUF repository
func (c *Client) GetTarget(target string) ([]byte, error) {
// Set filepath to the empty string. When we get targets,
// we rely in the target info struct instead.
const filePath = ""
ti, err := c.up.GetTargetInfo(target)
if err != nil {
return nil, fmt.Errorf("getting info for target \"%s\": %w", target, err)
}
path, tb, err := c.up.FindCachedTarget(ti, filePath)
if err != nil {
return nil, fmt.Errorf("getting target cache: %w", err)
}
if path != "" {
// Cached version found
return tb, nil
}
// Download of target is needed
// Ignore targetsBaseURL, set to empty string
const targetsBaseURL = ""
_, tb, err = c.up.DownloadTarget(ti, filePath, targetsBaseURL)
if err != nil {
return nil, fmt.Errorf("failed to download target file %s - %w", target, err)
}
return tb, nil
}
// URLToPath converts a URL to a filename-compatible string
func URLToPath(url string) string {
// Strip scheme, replace slashes with dashes
// e.g. https://github.github.com/prod-tuf-root -> github.github.com-prod-tuf-root
fn := url
fn, _ = strings.CutPrefix(fn, "https://")
fn, _ = strings.CutPrefix(fn, "http://")
fn = strings.ReplaceAll(fn, "/", "-")
fn = strings.ReplaceAll(fn, ":", "-")
return strings.ToLower(fn)
}
// Copyright 2023 The Sigstore Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tuf
import (
"encoding/json"
"fmt"
"os"
"time"
)
type Config struct {
LastTimestamp time.Time `json:"lastTimestamp"`
}
func LoadConfig(p string) (*Config, error) {
var c Config
b, err := os.ReadFile(p)
if err != nil {
return nil, fmt.Errorf("failed to read config: %w", err)
}
err = json.Unmarshal(b, &c)
if err != nil {
return nil, fmt.Errorf("malformed config file: %w", err)
}
return &c, nil
}
func (c *Config) Persist(p string) error {
b, err := json.Marshal(c)
if err != nil {
return fmt.Errorf("failed to JSON marshal config: %w", err)
}
err = os.WriteFile(p, b, 0600)
if err != nil {
return fmt.Errorf("failed to write config: %w", err)
}
return nil
}
// Copyright 2023 The Sigstore Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package tuf
import (
"embed"
"math"
"os"
"path/filepath"
"github.com/sigstore/sigstore-go/pkg/util"
"github.com/theupdateframework/go-tuf/v2/metadata/fetcher"
)
//go:embed repository
var embeddedRepo embed.FS
const (
DefaultMirror = "https://tuf-repo-cdn.sigstore.dev"
StagingMirror = "https://tuf-repo-cdn.sigstage.dev"
// The following caching values can be used for the CacheValidity option
NoCache = 0
MaxCache = math.MaxInt
)
// Options represent the various options for a Sigstore TUF Client
type Options struct {
// CacheValidity period in days (default 0). The client will persist a
// timestamp with the cache after refresh. Note that the client will
// always refresh the cache if the metadata is expired or if the client is
// unable to find a persisted timestamp, so this is not an optimal control
// for air-gapped environments. Use const MaxCache to update the cache when
// the metadata is expired, though the first initialization will still
// refresh the cache.
CacheValidity int
// ForceCache controls if the cache should be used without update
// as long as the metadata is valid. Use ForceCache over CacheValidity
// if you want to always use the cache up until its expiration. Note that
// the client will refresh the cache once the metadata has expired, so this
// is not an optimal control for air-gapped environments. Clients instead
// should provide a trust root file directly to the client to bypass TUF.
ForceCache bool
// Root is the TUF trust anchor
Root []byte
// CachePath is the location on disk for TUF cache
// (default $HOME/.sigstore/tuf)
CachePath string
// RepositoryBaseURL is the TUF repository location URL
// (default https://tuf-repo-cdn.sigstore.dev)
RepositoryBaseURL string
// DisableLocalCache mode allows a client to work on a read-only
// files system if this is set, cache path is ignored.
DisableLocalCache bool
// DisableConsistentSnapshot
DisableConsistentSnapshot bool
// Fetcher is the metadata fetcher
Fetcher fetcher.Fetcher
}
// WithCacheValidity sets the cache validity period in days
func (o *Options) WithCacheValidity(days int) *Options {
o.CacheValidity = days
return o
}
// WithForceCache forces the client to use the cache without updating
func (o *Options) WithForceCache() *Options {
o.ForceCache = true
return o
}
// WithRoot sets the TUF trust anchor
func (o *Options) WithRoot(root []byte) *Options {
o.Root = root
return o
}
// WithCachePath sets the location on disk for TUF cache
func (o *Options) WithCachePath(path string) *Options {
o.CachePath = path
return o
}
// WithRepositoryBaseURL sets the TUF repository location URL
func (o *Options) WithRepositoryBaseURL(url string) *Options {
o.RepositoryBaseURL = url
return o
}
// WithDisableLocalCache sets the client to work on a read-only file system
func (o *Options) WithDisableLocalCache() *Options {
o.DisableLocalCache = true
return o
}
// WithDisableConsistentSnapshot sets the client to disable consistent snapshot
func (o *Options) WithDisableConsistentSnapshot() *Options {
o.DisableConsistentSnapshot = true
return o
}
// WithFetcher sets the metadata fetcher
func (o *Options) WithFetcher(f fetcher.Fetcher) *Options {
o.Fetcher = f
return o
}
// DefaultOptions returns an options struct for the public good instance
func DefaultOptions() *Options {
var opts Options
var err error
opts.Root = DefaultRoot()
home, err := os.UserHomeDir()
if err != nil {
// Fall back to using a TUF repository in the temp location
home = os.TempDir()
}
opts.CachePath = filepath.Join(home, ".sigstore", "root")
opts.RepositoryBaseURL = DefaultMirror
fetcher := fetcher.NewDefaultFetcher()
fetcher.SetHTTPUserAgent(util.ConstructUserAgent())
opts.Fetcher = fetcher
return &opts
}
// DefaultRoot returns the root.json for the public good instance
func DefaultRoot() []byte {
// The embed file system always uses forward slashes as path separators,
// even on Windows
p := "repository/root.json"
b, err := embeddedRepo.ReadFile(p)
if err != nil {
// This should never happen.
// ReadFile from an embedded FS will never fail as long as
// the path is correct. If it fails, it would mean
// that the binary is not assembled as it should, and there
// is no way to recover from that.
panic(err)
}
return b
}
// StagingRoot returns the root.json for the staging instance
func StagingRoot() []byte {
// The embed file system always uses forward slashes as path separators,
// even on Windows
p := "repository/staging_root.json"
b, err := embeddedRepo.ReadFile(p)
if err != nil {
// This should never happen.
// ReadFile from an embedded FS will never fail as long as
// the path is correct. If it fails, it would mean
// that the binary is not assembled as it should, and there
// is no way to recover from that.
panic(err)
}
return b
}
// Copyright 2024 The Sigstore Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package util //nolint:revive
import (
"runtime/debug"
)
func ConstructUserAgent() string {
userAgent := "sigstore-go"
buildInfo, ok := debug.ReadBuildInfo()
if !ok {
return userAgent
}
for _, eachDep := range buildInfo.Deps {
if eachDep.Path == "github.com/sigstore/sigstore-go" {
userAgent += "/"
userAgent += eachDep.Version
}
}
return userAgent
}
// Copyright 2023 The Sigstore Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package verify
import (
"crypto/x509"
"errors"
"time"
"github.com/sigstore/sigstore-go/pkg/root"
)
func VerifyLeafCertificate(observerTimestamp time.Time, leafCert *x509.Certificate, trustedMaterial root.TrustedMaterial) ([][]*x509.Certificate, error) { // nolint: revive
for _, ca := range trustedMaterial.FulcioCertificateAuthorities() {
chains, err := ca.Verify(leafCert, observerTimestamp)
if err == nil {
return chains, nil
}
}
return nil, errors.New("leaf certificate verification failed")
}
// Copyright 2023 The Sigstore Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package verify
import (
"encoding/json"
"errors"
"fmt"
"regexp"
"github.com/sigstore/sigstore-go/pkg/fulcio/certificate"
)
type SubjectAlternativeNameMatcher struct {
SubjectAlternativeName string `json:"subjectAlternativeName"`
Regexp regexp.Regexp `json:"regexp,omitempty"`
}
type IssuerMatcher struct {
Issuer string `json:"issuer"`
Regexp regexp.Regexp `json:"regexp,omitempty"`
}
type CertificateIdentity struct {
SubjectAlternativeName SubjectAlternativeNameMatcher `json:"subjectAlternativeName"`
Issuer IssuerMatcher `json:"issuer"`
certificate.Extensions
}
type CertificateIdentities []CertificateIdentity
type ErrValueMismatch struct {
object string
expected string
actual string
}
func (e *ErrValueMismatch) Error() string {
return fmt.Sprintf("expected %s value \"%s\", got \"%s\"", e.object, e.expected, e.actual)
}
type ErrValueRegexMismatch struct {
object string
regex string
value string
}
func (e *ErrValueRegexMismatch) Error() string {
return fmt.Sprintf("expected %s value to match regex \"%s\", got \"%s\"", e.object, e.regex, e.value)
}
type ErrNoMatchingCertificateIdentity struct {
errors []error
}
func (e *ErrNoMatchingCertificateIdentity) Error() string {
if len(e.errors) > 0 {
return fmt.Sprintf("no matching CertificateIdentity found, last error: %v", e.errors[len(e.errors)-1])
}
return "no matching CertificateIdentity found"
}
func (e *ErrNoMatchingCertificateIdentity) Unwrap() []error {
return e.errors
}
// NewSANMatcher provides an easier way to create a SubjectAlternativeNameMatcher.
// If the regexpStr fails to compile into a Regexp, an error is returned.
func NewSANMatcher(sanValue string, regexpStr string) (SubjectAlternativeNameMatcher, error) {
r, err := regexp.Compile(regexpStr)
if err != nil {
return SubjectAlternativeNameMatcher{}, err
}
return SubjectAlternativeNameMatcher{
SubjectAlternativeName: sanValue,
Regexp: *r}, nil
}
// The default Regexp json marshal is quite ugly, so we override it here.
func (s *SubjectAlternativeNameMatcher) MarshalJSON() ([]byte, error) {
return json.Marshal(&struct {
SubjectAlternativeName string `json:"subjectAlternativeName"`
Regexp string `json:"regexp,omitempty"`
}{
SubjectAlternativeName: s.SubjectAlternativeName,
Regexp: s.Regexp.String(),
})
}
// Verify checks if the actualCert matches the SANMatcher's Value and
// Regexp – if those values have been provided.
func (s SubjectAlternativeNameMatcher) Verify(actualCert certificate.Summary) error {
if s.SubjectAlternativeName != "" &&
actualCert.SubjectAlternativeName != s.SubjectAlternativeName {
return &ErrValueMismatch{"SAN", string(s.SubjectAlternativeName), string(actualCert.SubjectAlternativeName)}
}
if s.Regexp.String() != "" &&
!s.Regexp.MatchString(actualCert.SubjectAlternativeName) {
return &ErrValueRegexMismatch{"SAN", string(s.Regexp.String()), string(actualCert.SubjectAlternativeName)}
}
return nil
}
func NewIssuerMatcher(issuerValue, regexpStr string) (IssuerMatcher, error) {
r, err := regexp.Compile(regexpStr)
if err != nil {
return IssuerMatcher{}, err
}
return IssuerMatcher{Issuer: issuerValue, Regexp: *r}, nil
}
func (i *IssuerMatcher) MarshalJSON() ([]byte, error) {
return json.Marshal(&struct {
Issuer string `json:"issuer"`
Regexp string `json:"regexp,omitempty"`
}{
Issuer: i.Issuer,
Regexp: i.Regexp.String(),
})
}
func (i IssuerMatcher) Verify(actualCert certificate.Summary) error {
if i.Issuer != "" &&
actualCert.Issuer != i.Issuer {
return &ErrValueMismatch{"issuer", string(i.Issuer), string(actualCert.Issuer)}
}
if i.Regexp.String() != "" &&
!i.Regexp.MatchString(actualCert.Issuer) {
return &ErrValueRegexMismatch{"issuer", string(i.Regexp.String()), string(actualCert.Issuer)}
}
return nil
}
func NewCertificateIdentity(sanMatcher SubjectAlternativeNameMatcher, issuerMatcher IssuerMatcher, extensions certificate.Extensions) (CertificateIdentity, error) {
if sanMatcher.SubjectAlternativeName == "" && sanMatcher.Regexp.String() == "" {
return CertificateIdentity{}, errors.New("when verifying a certificate identity, there must be subject alternative name criteria")
}
if issuerMatcher.Issuer == "" && issuerMatcher.Regexp.String() == "" {
return CertificateIdentity{}, errors.New("when verifying a certificate identity, must specify Issuer criteria")
}
if extensions.Issuer != "" {
return CertificateIdentity{}, errors.New("please specify issuer in IssuerMatcher, not Extensions")
}
certID := CertificateIdentity{
SubjectAlternativeName: sanMatcher,
Issuer: issuerMatcher,
Extensions: extensions,
}
return certID, nil
}
// NewShortCertificateIdentity provides a more convenient way of initializing
// a CertificiateIdentity with a SAN and the Issuer OID extension. If you need
// to check more OID extensions, use NewCertificateIdentity instead.
func NewShortCertificateIdentity(issuer, issuerRegex, sanValue, sanRegex string) (CertificateIdentity, error) {
sanMatcher, err := NewSANMatcher(sanValue, sanRegex)
if err != nil {
return CertificateIdentity{}, err
}
issuerMatcher, err := NewIssuerMatcher(issuer, issuerRegex)
if err != nil {
return CertificateIdentity{}, err
}
return NewCertificateIdentity(sanMatcher, issuerMatcher, certificate.Extensions{})
}
// Verify verifies the CertificateIdentities, and if ANY of them match the cert,
// it returns the CertificateIdentity that matched. If none match, it returns an
// error.
func (i CertificateIdentities) Verify(cert certificate.Summary) (*CertificateIdentity, error) {
multierr := &ErrNoMatchingCertificateIdentity{}
var err error
for _, ci := range i {
if err = ci.Verify(cert); err == nil {
return &ci, nil
}
multierr.errors = append(multierr.errors, err)
}
return nil, multierr
}
// Verify checks if the actualCert matches the CertificateIdentity's SAN and
// any of the provided OID extension values. Any empty values are ignored.
func (c CertificateIdentity) Verify(actualCert certificate.Summary) error {
var err error
if err = c.SubjectAlternativeName.Verify(actualCert); err != nil {
return err
}
if err = c.Issuer.Verify(actualCert); err != nil {
return err
}
return certificate.CompareExtensions(c.Extensions, actualCert.Extensions)
}
// Copyright 2023 The Sigstore Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package verify
import (
"fmt"
)
type ErrVerification struct {
err error
}
func NewVerificationError(e error) ErrVerification {
return ErrVerification{e}
}
func (e ErrVerification) Unwrap() error {
return e.err
}
func (e ErrVerification) String() string {
return fmt.Sprintf("verification error: %s", e.err.Error())
}
func (e ErrVerification) Error() string {
return e.String()
}
// Copyright 2023 The Sigstore Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package verify
import (
"crypto/x509"
"errors"
"time"
in_toto "github.com/in-toto/attestation/go/v1"
"github.com/secure-systems-lab/go-securesystemslib/dsse"
"github.com/sigstore/sigstore-go/pkg/root"
"github.com/sigstore/sigstore-go/pkg/tlog"
)
var errNotImplemented = errors.New("not implemented")
type HasInclusionPromise interface {
HasInclusionPromise() bool
}
type HasInclusionProof interface {
HasInclusionProof() bool
}
type SignatureProvider interface {
SignatureContent() (SignatureContent, error)
}
type SignedTimestampProvider interface {
Timestamps() ([][]byte, error)
}
type TlogEntryProvider interface {
TlogEntries() ([]*tlog.Entry, error)
}
type VerificationProvider interface {
VerificationContent() (VerificationContent, error)
}
type VersionProvider interface {
Version() (string, error)
}
type SignedEntity interface {
HasInclusionPromise
HasInclusionProof
SignatureProvider
SignedTimestampProvider
TlogEntryProvider
VerificationProvider
VersionProvider
}
type VerificationContent interface {
CompareKey(any, root.TrustedMaterial) bool
ValidAtTime(time.Time, root.TrustedMaterial) bool
Certificate() *x509.Certificate
PublicKey() PublicKeyProvider
}
type SignatureContent interface {
Signature() []byte
EnvelopeContent() EnvelopeContent
MessageSignatureContent() MessageSignatureContent
}
type PublicKeyProvider interface {
Hint() string
}
type MessageSignatureContent interface {
Digest() []byte
DigestAlgorithm() string
Signature() []byte
}
type EnvelopeContent interface {
RawEnvelope() *dsse.Envelope
Statement() (*in_toto.Statement, error)
}
// BaseSignedEntity is a helper struct that implements all the interfaces
// of SignedEntity. It can be embedded in a struct to implement the SignedEntity
// interface. This may be useful for testing, or for implementing a SignedEntity
// that only implements a subset of the interfaces.
type BaseSignedEntity struct{}
var _ SignedEntity = &BaseSignedEntity{}
func (b *BaseSignedEntity) HasInclusionPromise() bool {
return false
}
func (b *BaseSignedEntity) HasInclusionProof() bool {
return false
}
func (b *BaseSignedEntity) VerificationContent() (VerificationContent, error) {
return nil, errNotImplemented
}
func (b *BaseSignedEntity) SignatureContent() (SignatureContent, error) {
return nil, errNotImplemented
}
func (b *BaseSignedEntity) Timestamps() ([][]byte, error) {
return nil, errNotImplemented
}
func (b *BaseSignedEntity) TlogEntries() ([]*tlog.Entry, error) {
return nil, errNotImplemented
}
func (b *BaseSignedEntity) Version() (string, error) {
return "", errNotImplemented
}
// Copyright 2023 The Sigstore Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package verify
import (
"crypto/x509"
"encoding/hex"
"errors"
"fmt"
ct "github.com/google/certificate-transparency-go"
"github.com/google/certificate-transparency-go/ctutil"
ctx509 "github.com/google/certificate-transparency-go/x509"
"github.com/google/certificate-transparency-go/x509util"
"github.com/sigstore/sigstore-go/pkg/root"
)
// VerifySignedCertificateTimestamp, given a threshold, TrustedMaterial, and a
// leaf certificate, will extract SCTs from the leaf certificate and verify the
// timestamps using the TrustedMaterial's FulcioCertificateAuthorities() and
// CTLogs()
func VerifySignedCertificateTimestamp(chains [][]*x509.Certificate, threshold int, trustedMaterial root.TrustedMaterial) error { // nolint: revive
if len(chains) == 0 || len(chains[0]) == 0 || chains[0][0] == nil {
return errors.New("no chains provided")
}
// The first certificate in the chain is always the leaf certificate
leaf := chains[0][0]
ctlogs := trustedMaterial.CTLogs()
scts, err := x509util.ParseSCTsFromCertificate(leaf.Raw)
if err != nil {
return err
}
leafCTCert, err := ctx509.ParseCertificates(leaf.Raw)
if err != nil {
return err
}
verified := 0
for _, sct := range scts {
encodedKeyID := hex.EncodeToString(sct.LogID.KeyID[:])
key, ok := ctlogs[encodedKeyID]
if !ok {
// skip entries the trust root cannot verify
continue
}
// Ensure sct is within ctlog validity window
sctTime := ct.TimestampToTime(sct.Timestamp)
if !key.ValidityPeriodStart.IsZero() && sctTime.Before(key.ValidityPeriodStart) {
// skip entries that were before ctlog key start time
continue
}
if !key.ValidityPeriodEnd.IsZero() && sctTime.After(key.ValidityPeriodEnd) {
// skip entries that were after ctlog key end time
continue
}
for _, chain := range chains {
fulcioChain := make([]*ctx509.Certificate, len(leafCTCert))
copy(fulcioChain, leafCTCert)
if len(chain) < 2 {
continue
}
parentCert := chain[1].Raw
fulcioIssuer, err := ctx509.ParseCertificates(parentCert)
if err != nil {
continue
}
fulcioChain = append(fulcioChain, fulcioIssuer...)
err = ctutil.VerifySCT(key.PublicKey, fulcioChain, sct, true)
if err == nil {
verified++
}
}
}
if verified < threshold {
return fmt.Errorf("only able to verify %d SCT entries; unable to meet threshold of %d", verified, threshold)
}
return nil
}
// Copyright 2023 The Sigstore Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package verify
import (
"bytes"
"context"
"crypto"
"crypto/ecdsa"
"crypto/elliptic"
"crypto/x509"
"encoding/hex"
"errors"
"fmt"
"hash"
"io"
"os"
"slices"
in_toto "github.com/in-toto/attestation/go/v1"
"github.com/secure-systems-lab/go-securesystemslib/dsse"
v1 "github.com/sigstore/protobuf-specs/gen/pb-go/common/v1"
"github.com/sigstore/sigstore-go/pkg/root"
"github.com/sigstore/sigstore/pkg/signature"
sigdsse "github.com/sigstore/sigstore/pkg/signature/dsse"
"github.com/sigstore/sigstore/pkg/signature/options"
)
const maxAllowedSubjects = 1024
const maxAllowedSubjectDigests = 32
var ErrDSSEInvalidSignatureCount = errors.New("exactly one signature is required")
func VerifySignature(sigContent SignatureContent, verificationContent VerificationContent, trustedMaterial root.TrustedMaterial) error { // nolint: revive
verifier, err := getSignatureVerifier(sigContent, verificationContent, trustedMaterial, false)
if err != nil {
return fmt.Errorf("could not load signature verifier: %w", err)
}
return verifySignatureWithVerifier(verifier, sigContent, verificationContent, trustedMaterial)
}
func verifySignatureWithVerifier(verifier signature.Verifier, sigContent SignatureContent, verificationContent VerificationContent, trustedMaterial root.TrustedMaterial) error { // nolint: revive
if envelope := sigContent.EnvelopeContent(); envelope != nil {
return verifyEnvelope(verifier, envelope)
} else if msg := sigContent.MessageSignatureContent(); msg != nil {
return errors.New("artifact must be provided to verify message signature")
}
// handle an invalid signature content message
return fmt.Errorf("signature content has neither an envelope or a message")
}
func VerifySignatureWithArtifacts(sigContent SignatureContent, verificationContent VerificationContent, trustedMaterial root.TrustedMaterial, artifacts []io.Reader) error { // nolint: revive
verifier, err := getSignatureVerifier(sigContent, verificationContent, trustedMaterial, false)
if err != nil {
return fmt.Errorf("could not load signature verifier: %w", err)
}
return verifySignatureWithVerifierAndArtifacts(verifier, sigContent, verificationContent, trustedMaterial, artifacts)
}
func verifySignatureWithVerifierAndArtifacts(verifier signature.Verifier, sigContent SignatureContent, verificationContent VerificationContent, trustedMaterial root.TrustedMaterial, artifacts []io.Reader) error { // nolint: revive
envelope := sigContent.EnvelopeContent()
msg := sigContent.MessageSignatureContent()
if envelope == nil && msg == nil {
return fmt.Errorf("signature content has neither an envelope or a message")
}
// If there is only one artifact and no envelope,
// attempt to verify the message signature with the artifact.
if envelope == nil {
if len(artifacts) != 1 {
return fmt.Errorf("only one artifact can be verified with a message signature")
}
return verifyMessageSignature(verifier, msg, artifacts[0])
}
// Otherwise, verify the envelope with the provided artifacts
return verifyEnvelopeWithArtifacts(verifier, envelope, artifacts)
}
func VerifySignatureWithArtifactDigests(sigContent SignatureContent, verificationContent VerificationContent, trustedMaterial root.TrustedMaterial, digests []ArtifactDigest) error { // nolint: revive
verifier, err := getSignatureVerifier(sigContent, verificationContent, trustedMaterial, false)
if err != nil {
return fmt.Errorf("could not load signature verifier: %w", err)
}
return verifySignatureWithVerifierAndArtifactDigests(verifier, sigContent, verificationContent, trustedMaterial, digests)
}
func verifySignatureWithVerifierAndArtifactDigests(verifier signature.Verifier, sigContent SignatureContent, verificationContent VerificationContent, trustedMaterial root.TrustedMaterial, digests []ArtifactDigest) error { // nolint: revive
envelope := sigContent.EnvelopeContent()
msg := sigContent.MessageSignatureContent()
if envelope == nil && msg == nil {
return fmt.Errorf("signature content has neither an envelope or a message")
}
// If there is only one artifact and no envelope,
// attempt to verify the message signature with the artifact.
if envelope == nil {
if len(digests) != 1 {
return fmt.Errorf("only one artifact can be verified with a message signature")
}
return verifyMessageSignatureWithArtifactDigest(verifier, msg, digests[0].Digest)
}
return verifyEnvelopeWithArtifactDigests(verifier, envelope, digests)
}
// compatVerifier is a signature.Verifier that tries multiple verifiers
// and returns nil if any of them verify the signature. This is used to
// verify signatures that were generated with old clients that used SHA256
// for ECDSA P384/P521 keys.
type compatVerifier struct {
verifiers []signature.Verifier
}
func (v *compatVerifier) VerifySignature(signature, message io.Reader, opts ...signature.VerifyOption) error {
// Create a buffer to store the signature bytes
sigBuf := &bytes.Buffer{}
sigTee := io.TeeReader(signature, sigBuf)
sigBytes, err := io.ReadAll(sigTee)
if err != nil {
return fmt.Errorf("failed to read signature: %w", err)
}
// Create a buffer to store the message bytes
msgBuf := &bytes.Buffer{}
msgTee := io.TeeReader(message, msgBuf)
msgBytes, err := io.ReadAll(msgTee)
if err != nil {
return fmt.Errorf("failed to read message: %w", err)
}
for idx, verifier := range v.verifiers {
if idx != 0 {
fmt.Fprint(os.Stderr, "Failed to verify signature with default verifier, trying compatibility verifier\n")
}
err := verifier.VerifySignature(bytes.NewReader(sigBytes), bytes.NewReader(msgBytes), opts...)
if err == nil {
return nil
}
}
return fmt.Errorf("no compatible verifier found")
}
func (v *compatVerifier) PublicKey(opts ...signature.PublicKeyOption) (crypto.PublicKey, error) {
return v.verifiers[0].PublicKey(opts...)
}
func compatSignatureVerifier(leafCert *x509.Certificate, enableCompat bool, isDSSE bool) (signature.Verifier, error) {
// LoadDefaultSigner/Verifier functions accept a few options to select
// the default signer/verifier when there are ambiguities, like for
// ED25519 keys, which could be used with PureEd25519 or Ed25519ph.
//
// When dealing with DSSE, use ED25519, but when we are working with
// hashedrekord entries, use ED25519ph by default, because this is the
// only option.
var defaultOpts []signature.LoadOption
if !isDSSE {
defaultOpts = []signature.LoadOption{options.WithED25519ph()}
}
verifiers := make([]signature.Verifier, 0)
verifier, err := signature.LoadDefaultVerifier(leafCert.PublicKey, defaultOpts...)
if err != nil {
return nil, err
}
// If compatibility is not enabled, return only the default verifier
if !enableCompat {
return verifier, nil
}
verifiers = append(verifiers, verifier)
// Add a compatibility verifier for ECDSA P384/P521, because we still want
// to verify signatures generated with old clients that used SHA256
var algorithmDetails signature.AlgorithmDetails
if pubKey, ok := leafCert.PublicKey.(*ecdsa.PublicKey); ok {
switch pubKey.Curve {
case elliptic.P384():
//nolint:staticcheck // Need to use deprecated field for backwards compatibility
algorithmDetails, err = signature.GetAlgorithmDetails(v1.PublicKeyDetails_PKIX_ECDSA_P384_SHA_256)
case elliptic.P521():
//nolint:staticcheck // Need to use deprecated field for backwards compatibility
algorithmDetails, err = signature.GetAlgorithmDetails(v1.PublicKeyDetails_PKIX_ECDSA_P521_SHA_256)
default:
return verifier, nil
}
if err != nil {
return nil, err
}
verifier, err = signature.LoadVerifierFromAlgorithmDetails(leafCert.PublicKey, algorithmDetails, defaultOpts...)
}
if err != nil {
return nil, err
}
verifiers = append(verifiers, verifier)
return &compatVerifier{verifiers: verifiers}, nil
}
func getSignatureVerifier(sigContent SignatureContent, verificationContent VerificationContent, tm root.TrustedMaterial, enableCompat bool) (signature.Verifier, error) {
if leafCert := verificationContent.Certificate(); leafCert != nil {
isDSSE := sigContent.EnvelopeContent() != nil
return compatSignatureVerifier(leafCert, enableCompat, isDSSE)
} else if pk := verificationContent.PublicKey(); pk != nil {
return tm.PublicKeyVerifier(pk.Hint())
}
return nil, fmt.Errorf("no public key or certificate found")
}
func verifyEnvelope(verifier signature.Verifier, envelope EnvelopeContent) error {
dsseEnv := envelope.RawEnvelope()
// A DSSE envelope in a Sigstore bundle MUST only contain one
// signature, even though DSSE is more permissive.
if len(dsseEnv.Signatures) != 1 {
return ErrDSSEInvalidSignatureCount
}
pub, err := verifier.PublicKey()
if err != nil {
return fmt.Errorf("could not fetch verifier public key: %w", err)
}
envVerifier, err := dsse.NewEnvelopeVerifier(&sigdsse.VerifierAdapter{
SignatureVerifier: verifier,
Pub: pub,
})
if err != nil {
return fmt.Errorf("could not load envelope verifier: %w", err)
}
_, err = envVerifier.Verify(context.Background(), dsseEnv)
if err != nil {
return fmt.Errorf("could not verify envelope: %w", err)
}
return nil
}
func verifyEnvelopeWithArtifacts(verifier signature.Verifier, envelope EnvelopeContent, artifacts []io.Reader) error {
if err := verifyEnvelope(verifier, envelope); err != nil {
return err
}
statement, err := envelope.Statement()
if err != nil {
return fmt.Errorf("could not verify artifact: unable to extract statement from envelope: %w", err)
}
if err = limitSubjects(statement); err != nil {
return err
}
// Sanity check (no subjects)
if len(statement.Subject) == 0 {
return errors.New("no subjects found in statement")
}
// determine which hash functions to use
hashFuncs, err := getHashFunctions(statement)
if err != nil {
return fmt.Errorf("unable to determine hash functions: %w", err)
}
hashedArtifacts := make([]map[crypto.Hash][]byte, len(artifacts))
for i, artifact := range artifacts {
// Compute digest of the artifact.
hasher, err := newMultihasher(hashFuncs)
if err != nil {
return fmt.Errorf("could not verify artifact: unable to create hasher: %w", err)
}
if _, err = io.Copy(hasher, artifact); err != nil {
return fmt.Errorf("could not verify artifact: unable to calculate digest: %w", err)
}
hashedArtifacts[i] = hasher.Sum(nil)
}
// create a map based on the digests present in the statement
// the map key is the hash algorithm and the field is a slice of digests
// created using that hash algorithm
subjectDigests := make(map[crypto.Hash][][]byte)
for _, subject := range statement.Subject {
for alg, hexdigest := range subject.Digest {
hf, err := algStringToHashFunc(alg)
if err != nil {
continue
}
if _, ok := subjectDigests[hf]; !ok {
subjectDigests[hf] = make([][]byte, 0)
}
digest, err := hex.DecodeString(hexdigest)
if err != nil {
continue
}
subjectDigests[hf] = append(subjectDigests[hf], digest)
}
}
// now loop over the provided artifact digests and try to compare them
// to the mapped subject digests
// if we cannot find a match, exit with an error
for _, ha := range hashedArtifacts {
matchFound := false
for key, value := range ha {
statementDigests, ok := subjectDigests[key]
if !ok {
return fmt.Errorf("no matching artifact hash algorithm found in subject digests")
}
if ok := isDigestInSlice(value, statementDigests); ok {
matchFound = true
break
}
}
if !matchFound {
return fmt.Errorf("provided artifact digests do not match digests in statement")
}
}
return nil
}
func verifyEnvelopeWithArtifactDigests(verifier signature.Verifier, envelope EnvelopeContent, digests []ArtifactDigest) error {
if err := verifyEnvelope(verifier, envelope); err != nil {
return err
}
statement, err := envelope.Statement()
if err != nil {
return fmt.Errorf("could not verify artifact: unable to extract statement from envelope: %w", err)
}
if err = limitSubjects(statement); err != nil {
return err
}
// create a map based on the digests present in the statement
// the map key is the hash algorithm and the field is a slice of digests
// created using that hash algorithm
subjectDigests := make(map[string][][]byte)
for _, subject := range statement.Subject {
for alg, digest := range subject.Digest {
if _, ok := subjectDigests[alg]; !ok {
subjectDigests[alg] = make([][]byte, 0)
}
hexdigest, err := hex.DecodeString(digest)
if err != nil {
return fmt.Errorf("could not verify artifact: unable to decode subject digest: %w", err)
}
subjectDigests[alg] = append(subjectDigests[alg], hexdigest)
}
}
// now loop over the provided artifact digests and compare them to the mapped subject digests
// if we cannot find a match, exit with an error
for _, artifactDigest := range digests {
statementDigests, ok := subjectDigests[artifactDigest.Algorithm]
if !ok {
return fmt.Errorf("provided artifact digests does not match digests in statement")
}
if ok := isDigestInSlice(artifactDigest.Digest, statementDigests); !ok {
return fmt.Errorf("provided artifact digest does not match any digest in statement")
}
}
return nil
}
func isDigestInSlice(digest []byte, digestSlice [][]byte) bool {
for _, el := range digestSlice {
if bytes.Equal(digest, el) {
return true
}
}
return false
}
func verifyMessageSignature(verifier signature.Verifier, msg MessageSignatureContent, artifact io.Reader) error {
err := verifier.VerifySignature(bytes.NewReader(msg.Signature()), artifact)
if err != nil {
return fmt.Errorf("could not verify message: %w", err)
}
return nil
}
func verifyMessageSignatureWithArtifactDigest(verifier signature.Verifier, msg MessageSignatureContent, artifactDigest []byte) error {
if !bytes.Equal(artifactDigest, msg.Digest()) {
return errors.New("artifact does not match digest")
}
if _, ok := verifier.(*signature.ED25519Verifier); ok {
return errors.New("message signatures with ed25519 signatures can only be verified with artifacts, and not just their digest")
}
err := verifier.VerifySignature(bytes.NewReader(msg.Signature()), bytes.NewReader([]byte{}), options.WithDigest(artifactDigest))
if err != nil {
return fmt.Errorf("could not verify message: %w", err)
}
return nil
}
// limitSubjects limits the number of subjects and digests in a statement to prevent DoS.
func limitSubjects(statement *in_toto.Statement) error {
if len(statement.Subject) > maxAllowedSubjects {
return fmt.Errorf("too many subjects: %d > %d", len(statement.Subject), maxAllowedSubjects)
}
for _, subject := range statement.Subject {
// limit the number of digests too
if len(subject.Digest) > maxAllowedSubjectDigests {
return fmt.Errorf("too many digests: %d > %d", len(subject.Digest), maxAllowedSubjectDigests)
}
}
return nil
}
type multihasher struct {
io.Writer
hashfuncs []crypto.Hash
hashes []io.Writer
}
func newMultihasher(hashfuncs []crypto.Hash) (*multihasher, error) {
if len(hashfuncs) == 0 {
return nil, errors.New("no hash functions specified")
}
hashes := make([]io.Writer, len(hashfuncs))
for i := range hashfuncs {
hashes[i] = hashfuncs[i].New()
}
return &multihasher{
Writer: io.MultiWriter(hashes...),
hashfuncs: hashfuncs,
hashes: hashes,
}, nil
}
func (m *multihasher) Sum(b []byte) map[crypto.Hash][]byte {
sums := make(map[crypto.Hash][]byte, len(m.hashes))
for i := range m.hashes {
sums[m.hashfuncs[i]] = m.hashes[i].(hash.Hash).Sum(b)
}
return sums
}
func algStringToHashFunc(alg string) (crypto.Hash, error) {
switch alg {
case "sha256":
return crypto.SHA256, nil
case "sha384":
return crypto.SHA384, nil
case "sha512":
return crypto.SHA512, nil
default:
return 0, errors.New("unsupported digest algorithm")
}
}
// getHashFunctions returns the smallest subset of supported hash functions
// that are needed to verify all subjects in a statement.
func getHashFunctions(statement *in_toto.Statement) ([]crypto.Hash, error) {
if len(statement.Subject) == 0 {
return nil, errors.New("no subjects found in statement")
}
supportedHashFuncs := []crypto.Hash{crypto.SHA512, crypto.SHA384, crypto.SHA256}
chosenHashFuncs := make([]crypto.Hash, 0, len(supportedHashFuncs))
subjectHashFuncs := make([][]crypto.Hash, len(statement.Subject))
// go through the statement and make a simple data structure to hold the
// list of hash funcs for each subject (subjectHashFuncs)
for i, subject := range statement.Subject {
for alg := range subject.Digest {
hf, err := algStringToHashFunc(alg)
if err != nil {
continue
}
subjectHashFuncs[i] = append(subjectHashFuncs[i], hf)
}
}
// for each subject, see if we have chosen a compatible hash func, and if
// not, add the first one that is supported
for _, hfs := range subjectHashFuncs {
// if any of the hash funcs are already in chosenHashFuncs, skip
if len(intersection(hfs, chosenHashFuncs)) > 0 {
continue
}
// check each supported hash func and add it if the subject
// has a digest for it
for _, hf := range supportedHashFuncs {
if slices.Contains(hfs, hf) {
chosenHashFuncs = append(chosenHashFuncs, hf)
break
}
}
}
if len(chosenHashFuncs) == 0 {
return nil, errors.New("no supported digest algorithms found")
}
return chosenHashFuncs, nil
}
func intersection(a, b []crypto.Hash) []crypto.Hash {
var result []crypto.Hash
for _, x := range a {
if slices.Contains(b, x) {
result = append(result, x)
}
}
return result
}
// Copyright 2023 The Sigstore Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package verify
import (
"crypto/x509"
"encoding/asn1"
"encoding/json"
"errors"
"fmt"
"io"
"time"
in_toto "github.com/in-toto/attestation/go/v1"
"github.com/sigstore/sigstore-go/pkg/fulcio/certificate"
"github.com/sigstore/sigstore-go/pkg/root"
"github.com/sigstore/sigstore/pkg/cryptoutils"
"google.golang.org/protobuf/encoding/protojson"
)
const (
VerificationResultMediaType01 = "application/vnd.dev.sigstore.verificationresult+json;version=0.1"
)
type Verifier struct {
trustedMaterial root.TrustedMaterial
config VerifierConfig
}
type VerifierConfig struct { // nolint: revive
// requireSignedTimestamps requires RFC3161 timestamps to verify
// short-lived certificates
requireSignedTimestamps bool
// signedTimestampThreshold is the minimum number of verified
// RFC3161 timestamps in a bundle
signedTimestampThreshold int
// requireIntegratedTimestamps requires log entry integrated timestamps to
// verify short-lived certificates
requireIntegratedTimestamps bool
// integratedTimeThreshold is the minimum number of log entry
// integrated timestamps in a bundle
integratedTimeThreshold int
// requireObserverTimestamps requires RFC3161 timestamps and/or log
// integrated timestamps to verify short-lived certificates
requireObserverTimestamps bool
// observerTimestampThreshold is the minimum number of verified
// RFC3161 timestamps and/or log integrated timestamps in a bundle
observerTimestampThreshold int
// requireTlogEntries requires log inclusion proofs in a bundle
requireTlogEntries bool
// tlogEntriesThreshold is the minimum number of verified inclusion
// proofs in a bundle
tlogEntriesThreshold int
// requireSCTs requires SCTs in Fulcio certificates
requireSCTs bool
// ctlogEntriesThreshold is the minimum number of verified SCTs in
// a Fulcio certificate
ctlogEntriesThreshold int
// useCurrentTime uses the current time rather than a provided signed
// or log timestamp. Most workflows will not use this option
useCurrentTime bool
// allowNoTimestamp can be used to skip timestamp checks when a key
// is used rather than a certificate.
allowNoTimestamp bool
}
type VerifierOption func(*VerifierConfig) error
// NewVerifier creates a new Verifier. It takes a
// root.TrustedMaterial, which contains a set of trusted public keys and
// certificates, and a set of VerifierConfigurators, which set the config
// that determines the behaviour of the Verify function.
//
// VerifierConfig's set of options should match the properties of a given
// Sigstore deployment, i.e. whether to expect SCTs, Tlog entries, or signed
// timestamps.
func NewVerifier(trustedMaterial root.TrustedMaterial, options ...VerifierOption) (*Verifier, error) {
var err error
c := VerifierConfig{}
for _, opt := range options {
err = opt(&c)
if err != nil {
return nil, fmt.Errorf("failed to configure verifier: %w", err)
}
}
err = c.Validate()
if err != nil {
return nil, err
}
v := &Verifier{
trustedMaterial: trustedMaterial,
config: c,
}
return v, nil
}
// TODO: Remove the following deprecated functions in a future release before sigstore-go 2.0.
// Deprecated: Use Verifier instead
type SignedEntityVerifier = Verifier
// Deprecated: Use NewVerifier instead
func NewSignedEntityVerifier(trustedMaterial root.TrustedMaterial, options ...VerifierOption) (*Verifier, error) {
return NewVerifier(trustedMaterial, options...)
}
// WithSignedTimestamps configures the Verifier to expect RFC 3161
// timestamps from a Timestamp Authority, verify them using the TrustedMaterial's
// TimestampingAuthorities(), and, if it exists, use the resulting timestamp(s)
// to verify the Fulcio certificate.
func WithSignedTimestamps(threshold int) VerifierOption {
return func(c *VerifierConfig) error {
if threshold < 1 {
return errors.New("signed timestamp threshold must be at least 1")
}
c.requireSignedTimestamps = true
c.signedTimestampThreshold = threshold
return nil
}
}
// WithObserverTimestamps configures the Verifier to expect
// timestamps from either an RFC3161 timestamp authority or a log's
// SignedEntryTimestamp. These are verified using the TrustedMaterial's
// TimestampingAuthorities() or RekorLogs(), and used to verify
// the Fulcio certificate.
func WithObserverTimestamps(threshold int) VerifierOption {
return func(c *VerifierConfig) error {
if threshold < 1 {
return errors.New("observer timestamp threshold must be at least 1")
}
c.requireObserverTimestamps = true
c.observerTimestampThreshold = threshold
return nil
}
}
// WithTransparencyLog configures the Verifier to expect
// Transparency Log inclusion proofs or SignedEntryTimestamps, verifying them
// using the TrustedMaterial's RekorLogs().
func WithTransparencyLog(threshold int) VerifierOption {
return func(c *VerifierConfig) error {
if threshold < 1 {
return errors.New("transparency log entry threshold must be at least 1")
}
c.requireTlogEntries = true
c.tlogEntriesThreshold = threshold
return nil
}
}
// WithIntegratedTimestamps configures the Verifier to
// expect log entry integrated timestamps from either SignedEntryTimestamps
// or live log lookups.
func WithIntegratedTimestamps(threshold int) VerifierOption {
return func(c *VerifierConfig) error {
c.requireIntegratedTimestamps = true
c.integratedTimeThreshold = threshold
return nil
}
}
// WithSignedCertificateTimestamps configures the Verifier to
// expect the Fulcio certificate to have a SignedCertificateTimestamp, and
// verify it using the TrustedMaterial's CTLogAuthorities().
func WithSignedCertificateTimestamps(threshold int) VerifierOption {
return func(c *VerifierConfig) error {
if threshold < 1 {
return errors.New("ctlog entry threshold must be at least 1")
}
c.requireSCTs = true
c.ctlogEntriesThreshold = threshold
return nil
}
}
// WithCurrentTime configures the Verifier to not expect
// any timestamps from either a Timestamp Authority or a Transparency Log.
// This option should not be enabled when verifying short-lived certificates,
// as an observer timestamp is needed. This option is useful primarily for
// private deployments with long-lived code signing certificates.
func WithCurrentTime() VerifierOption {
return func(c *VerifierConfig) error {
c.useCurrentTime = true
return nil
}
}
// WithNoObserverTimestamps configures the Verifier to not expect
// any timestamps from either a Timestamp Authority or a Transparency Log
// and to not use the current time to verify a certificate. This may only
// be used when verifying with keys rather than certificates.
func WithNoObserverTimestamps() VerifierOption {
return func(c *VerifierConfig) error {
c.allowNoTimestamp = true
return nil
}
}
func (c *VerifierConfig) Validate() error {
if c.allowNoTimestamp && (c.requireObserverTimestamps || c.requireSignedTimestamps || c.requireIntegratedTimestamps || c.useCurrentTime) {
return errors.New("specify WithNoObserverTimestamps() without any other verifier options")
}
if !c.requireObserverTimestamps && !c.requireSignedTimestamps && !c.requireIntegratedTimestamps && !c.useCurrentTime && !c.allowNoTimestamp {
return errors.New("when initializing a new Verifier, you must specify at least one of " +
"WithObserverTimestamps(), WithSignedTimestamps(), WithIntegratedTimestamps() or WithCurrentTime(), " +
"or exclusively specify WithNoObserverTimestamps()")
}
return nil
}
type VerificationResult struct {
MediaType string `json:"mediaType"`
Statement *in_toto.Statement `json:"statement,omitempty"`
Signature *SignatureVerificationResult `json:"signature,omitempty"`
VerifiedTimestamps []TimestampVerificationResult `json:"verifiedTimestamps"`
VerifiedIdentity *CertificateIdentity `json:"verifiedIdentity,omitempty"`
}
type SignatureVerificationResult struct {
PublicKeyID *[]byte `json:"publicKeyId,omitempty"`
Certificate *certificate.Summary `json:"certificate,omitempty"`
}
type TimestampVerificationResult struct {
Type string `json:"type"`
URI string `json:"uri"`
Timestamp time.Time `json:"timestamp"`
}
func NewVerificationResult() *VerificationResult {
return &VerificationResult{
MediaType: VerificationResultMediaType01,
}
}
// MarshalJSON deals with protojson needed for the Statement.
// Can be removed when https://github.com/in-toto/attestation/pull/403 is merged.
func (b *VerificationResult) MarshalJSON() ([]byte, error) {
statement, err := protojson.Marshal(b.Statement)
if err != nil {
return nil, err
}
// creating a type alias to avoid infinite recursion, as MarshalJSON is
// not copied into the alias.
type Alias VerificationResult
return json.Marshal(struct {
Alias
Statement json.RawMessage `json:"statement,omitempty"`
}{
Alias: Alias(*b),
Statement: statement,
})
}
func (b *VerificationResult) UnmarshalJSON(data []byte) error {
b.Statement = &in_toto.Statement{}
type Alias VerificationResult
aux := &struct {
Alias
Statement json.RawMessage `json:"statement,omitempty"`
}{
Alias: Alias(*b),
}
if err := json.Unmarshal(data, aux); err != nil {
return err
}
return protojson.Unmarshal(aux.Statement, b.Statement)
}
type PolicyOption func(*PolicyConfig) error
type ArtifactPolicyOption func(*PolicyConfig) error
// PolicyBuilder is responsible for building & validating a PolicyConfig
type PolicyBuilder struct {
artifactPolicy ArtifactPolicyOption
policyOptions []PolicyOption
}
func (pc PolicyBuilder) options() []PolicyOption {
arr := []PolicyOption{PolicyOption(pc.artifactPolicy)}
return append(arr, pc.policyOptions...)
}
func (pc PolicyBuilder) BuildConfig() (*PolicyConfig, error) {
var err error
policy := &PolicyConfig{}
for _, applyOption := range pc.options() {
err = applyOption(policy)
if err != nil {
return nil, err
}
}
if err := policy.validate(); err != nil {
return nil, err
}
return policy, nil
}
type ArtifactDigest struct {
Algorithm string
Digest []byte
}
type PolicyConfig struct {
ignoreArtifact bool
ignoreIdentities bool
requireSigningKey bool
certificateIdentities CertificateIdentities
verifyArtifacts bool
artifacts []io.Reader
verifyArtifactDigests bool
artifactDigests []ArtifactDigest
}
func (p *PolicyConfig) withVerifyAlreadyConfigured() error {
if p.verifyArtifacts || p.verifyArtifactDigests {
return errors.New("only one invocation of WithArtifact/WithArtifacts/WithArtifactDigest/WithArtifactDigests is allowed")
}
return nil
}
func (p *PolicyConfig) validate() error {
if p.RequireIdentities() && len(p.certificateIdentities) == 0 {
return errors.New("can't verify identities without providing at least one identity")
}
return nil
}
// RequireArtifact returns true if the Verify algorithm should perform
// signature verification with an an artifact provided by either the
// WithArtifact or the WithArtifactDigest functions.
//
// By default, unless explicitly turned off, we should always expect to verify
// a SignedEntity's signature using an artifact. Bools are initialized to false,
// so this behaviour is therefore controlled by the ignoreArtifact field.
//
// Double negatives are confusing, though. To aid with comprehension of the
// main Verify loop, this function therefore just wraps the double negative.
func (p *PolicyConfig) RequireArtifact() bool {
return !p.ignoreArtifact
}
// RequireIdentities returns true if the Verify algorithm should check
// whether the SignedEntity's certificate was created by one of the identities
// provided by the WithCertificateIdentity function.
//
// By default, unless explicitly turned off, we should always expect to enforce
// that a SignedEntity's certificate was created by an Identity we trust. Bools
// are initialized to false, so this behaviour is therefore controlled by the
// ignoreIdentities field.
//
// Double negatives are confusing, though. To aid with comprehension of the
// main Verify loop, this function therefore just wraps the double negative.
func (p *PolicyConfig) RequireIdentities() bool {
return !p.ignoreIdentities
}
// RequireSigningKey returns true if we expect the SignedEntity to be signed
// with a key and not a certificate.
func (p *PolicyConfig) RequireSigningKey() bool {
return p.requireSigningKey
}
func NewPolicy(artifactOpt ArtifactPolicyOption, options ...PolicyOption) PolicyBuilder {
return PolicyBuilder{artifactPolicy: artifactOpt, policyOptions: options}
}
// WithoutIdentitiesUnsafe allows the caller of Verify to skip enforcing any
// checks on the identity that created the SignedEntity being verified.
//
// Do not use this option unless you know what you are doing!
//
// As the name implies, using WithoutIdentitiesUnsafe is not safe: outside of
// exceptional circumstances, we should always enforce that the SignedEntity
// being verified was signed by a trusted CertificateIdentity.
//
// For more information, consult WithCertificateIdentity.
func WithoutIdentitiesUnsafe() PolicyOption {
return func(p *PolicyConfig) error {
if len(p.certificateIdentities) > 0 {
return errors.New("can't use WithoutIdentitiesUnsafe while specifying CertificateIdentities")
}
p.ignoreIdentities = true
return nil
}
}
// WithCertificateIdentity allows the caller of Verify to enforce that the
// SignedEntity being verified was signed by the given identity, as defined by
// the Fulcio certificate embedded in the entity. If this policy is enabled,
// but the SignedEntity does not have a certificate, verification will fail.
//
// Providing this function multiple times will concatenate the provided
// CertificateIdentity to the list of identities being checked.
//
// If all of the provided CertificateIdentities fail to match the Fulcio
// certificate, then verification will fail. If *any* CertificateIdentity
// matches, then verification will succeed. Therefore, each CertificateIdentity
// provided to this function must define a "sufficient" identity to trust.
//
// The CertificateIdentity struct allows callers to specify:
// - The exact value, or Regexp, of the SubjectAlternativeName
// - The exact value of any Fulcio OID X.509 extension, i.e. Issuer
//
// For convenience, consult the NewShortCertificateIdentity function.
func WithCertificateIdentity(identity CertificateIdentity) PolicyOption {
return func(p *PolicyConfig) error {
if p.ignoreIdentities {
return errors.New("can't use WithCertificateIdentity while using WithoutIdentitiesUnsafe")
}
if p.requireSigningKey {
return errors.New("can't use WithCertificateIdentity while using WithKey")
}
p.certificateIdentities = append(p.certificateIdentities, identity)
return nil
}
}
// WithKey allows the caller of Verify to require the SignedEntity being
// verified was signed with a key and not a certificate.
func WithKey() PolicyOption {
return func(p *PolicyConfig) error {
if len(p.certificateIdentities) > 0 {
return errors.New("can't use WithKey while using WithCertificateIdentity")
}
p.requireSigningKey = true
p.ignoreIdentities = true
return nil
}
}
// WithoutArtifactUnsafe allows the caller of Verify to skip checking whether
// the SignedEntity was created from, or references, an artifact.
//
// WithoutArtifactUnsafe can only be used with SignedEntities that contain a
// DSSE envelope. If the the SignedEntity has a MessageSignature, providing
// this policy option will cause verification to always fail, since
// MessageSignatures can only be verified in the presence of an Artifact or
// artifact digest. See WithArtifact/WithArtifactDigest for more information.
//
// Do not use this function unless you know what you are doing!
//
// As the name implies, using WithoutArtifactUnsafe is not safe: outside of
// exceptional circumstances, SignedEntities should always be verified with
// an artifact.
func WithoutArtifactUnsafe() ArtifactPolicyOption {
return func(p *PolicyConfig) error {
if err := p.withVerifyAlreadyConfigured(); err != nil {
return err
}
p.ignoreArtifact = true
return nil
}
}
// WithArtifact allows the caller of Verify to enforce that the SignedEntity
// being verified was created from, or references, a given artifact.
//
// If the SignedEntity contains a DSSE envelope, then the artifact digest is
// calculated from the given artifact, and compared to the digest in the
// envelope's statement.
func WithArtifact(artifact io.Reader) ArtifactPolicyOption {
return func(p *PolicyConfig) error {
if err := p.withVerifyAlreadyConfigured(); err != nil {
return err
}
if p.ignoreArtifact {
return errors.New("can't use WithArtifact while using WithoutArtifactUnsafe")
}
p.verifyArtifacts = true
p.artifacts = []io.Reader{artifact}
return nil
}
}
// WithArtifacts allows the caller of Verify to enforce that the SignedEntity
// being verified was created from, or references, a slice of artifacts.
//
// If the SignedEntity contains a DSSE envelope, then the artifact digest is
// calculated from the given artifact, and compared to the digest in the
// envelope's statement.
func WithArtifacts(artifacts []io.Reader) ArtifactPolicyOption {
return func(p *PolicyConfig) error {
if err := p.withVerifyAlreadyConfigured(); err != nil {
return err
}
if p.ignoreArtifact {
return errors.New("can't use WithArtifacts while using WithoutArtifactUnsafe")
}
p.verifyArtifacts = true
p.artifacts = artifacts
return nil
}
}
// WithArtifactDigest allows the caller of Verify to enforce that the
// SignedEntity being verified was created for a given artifact digest.
//
// If the SignedEntity contains a MessageSignature that was signed using the
// ED25519 algorithm, then providing only an artifactDigest will fail; the
// whole artifact must be provided. Use WithArtifact instead.
//
// If the SignedEntity contains a DSSE envelope, then the artifact digest is
// compared to the digest in the envelope's statement.
func WithArtifactDigest(algorithm string, artifactDigest []byte) ArtifactPolicyOption {
return func(p *PolicyConfig) error {
if err := p.withVerifyAlreadyConfigured(); err != nil {
return err
}
if p.ignoreArtifact {
return errors.New("can't use WithArtifactDigest while using WithoutArtifactUnsafe")
}
p.verifyArtifactDigests = true
p.artifactDigests = []ArtifactDigest{{
Algorithm: algorithm,
Digest: artifactDigest,
}}
return nil
}
}
// WithArtifactDigests allows the caller of Verify to enforce that the
// SignedEntity being verified was created for a given array of artifact digests.
//
// If the SignedEntity contains a DSSE envelope, then the artifact digests
// are compared to the digests in the envelope's statement.
//
// If the SignedEntity does not contain a DSSE envelope, verification fails.
func WithArtifactDigests(digests []ArtifactDigest) ArtifactPolicyOption {
return func(p *PolicyConfig) error {
if err := p.withVerifyAlreadyConfigured(); err != nil {
return err
}
if p.ignoreArtifact {
return errors.New("can't use WithArtifactDigests while using WithoutArtifactUnsafe")
}
p.verifyArtifactDigests = true
p.artifactDigests = digests
return nil
}
}
// Verify checks the cryptographic integrity of a given SignedEntity according
// to the options configured in the NewVerifier. Its purpose is to
// determine whether the SignedEntity was created by a Sigstore deployment we
// trust, as defined by keys in our TrustedMaterial.
//
// If the SignedEntity contains a MessageSignature, then the artifact or its
// digest must be provided to the Verify function, as it is required to verify
// the signature. See WithArtifact and WithArtifactDigest for more details.
//
// If and only if verification is successful, Verify will return a
// VerificationResult struct whose contents' integrity have been verified.
// Verify may then verify the contents of the VerificationResults using supplied
// PolicyOptions. See WithCertificateIdentity for more details.
//
// Callers of this function SHOULD ALWAYS:
// - (if the signed entity has a certificate) verify that its Subject Alternate
// Name matches a trusted identity, and that its OID Issuer field matches an
// expected value
// - (if the signed entity has a dsse envelope) verify that the envelope's
// statement's subject matches the artifact being verified
func (v *Verifier) Verify(entity SignedEntity, pb PolicyBuilder) (*VerificationResult, error) {
policy, err := pb.BuildConfig()
if err != nil {
return nil, fmt.Errorf("failed to build policy: %w", err)
}
// Let's go by the spec: https://docs.google.com/document/d/1kbhK2qyPPk8SLavHzYSDM8-Ueul9_oxIMVFuWMWKz0E/edit#heading=h.g11ovq2s1jxh
// > ## Transparency Log Entry
verifiedTlogTimestamps, err := v.VerifyTransparencyLogInclusion(entity)
if err != nil {
return nil, fmt.Errorf("failed to verify log inclusion: %w", err)
}
// > ## Establishing a Time for the Signature
// > First, establish a time for the signature. This timestamp is required to validate the certificate chain, so this step comes first.
verifiedTimestamps, err := v.VerifyObserverTimestamps(entity, verifiedTlogTimestamps)
if err != nil {
return nil, fmt.Errorf("failed to verify timestamps: %w", err)
}
verificationContent, err := entity.VerificationContent()
if err != nil {
return nil, fmt.Errorf("failed to fetch verification content: %w", err)
}
var signedWithCertificate bool
var certSummary certificate.Summary
// If the bundle was signed with a long-lived key, and does not have a Fulcio certificate,
// then skip the certificate verification steps
if leafCert := verificationContent.Certificate(); leafCert != nil {
if policy.RequireSigningKey() {
return nil, errors.New("expected key signature, not certificate")
}
if v.config.allowNoTimestamp {
return nil, errors.New("must provide timestamp to verify certificate")
}
signedWithCertificate = true
// Get the summary before modifying the cert extensions
certSummary, err = certificate.SummarizeCertificate(leafCert)
if err != nil {
return nil, fmt.Errorf("failed to summarize certificate: %w", err)
}
// From spec:
// > ## Certificate
// > …
// > The Verifier MUST perform certification path validation (RFC 5280 §6) of the certificate chain with the pre-distributed Fulcio root certificate(s) as a trust anchor, but with a fake “current time.” If a timestamp from the timestamping service is available, the Verifier MUST perform path validation using the timestamp from the Timestamping Service. If a timestamp from the Transparency Service is available, the Verifier MUST perform path validation using the timestamp from the Transparency Service. If both are available, the Verifier performs path validation twice. If either fails, verification fails.
// Go does not support the OtherName GeneralName SAN extension. If
// Fulcio issued the certificate with an OtherName SAN, it will be
// handled by SummarizeCertificate above, and it must be removed here
// or the X.509 verification will fail.
if len(leafCert.UnhandledCriticalExtensions) > 0 {
var unhandledExts []asn1.ObjectIdentifier
for _, oid := range leafCert.UnhandledCriticalExtensions {
if !oid.Equal(cryptoutils.SANOID) {
unhandledExts = append(unhandledExts, oid)
}
}
leafCert.UnhandledCriticalExtensions = unhandledExts
}
var chains [][]*x509.Certificate
for _, verifiedTs := range verifiedTimestamps {
// verify the leaf certificate against the root
chains, err = VerifyLeafCertificate(verifiedTs.Timestamp, leafCert, v.trustedMaterial)
if err != nil {
return nil, fmt.Errorf("failed to verify leaf certificate: %w", err)
}
}
// From spec:
// > Unless performing online verification (see §Alternative Workflows), the Verifier MUST extract the SignedCertificateTimestamp embedded in the leaf certificate, and verify it as in RFC 9162 §8.1.3, using the verification key from the Certificate Transparency Log.
if v.config.requireSCTs {
err = VerifySignedCertificateTimestamp(chains, v.config.ctlogEntriesThreshold, v.trustedMaterial)
if err != nil {
return nil, fmt.Errorf("failed to verify signed certificate timestamp: %w", err)
}
}
}
// If SCTs are required, ensure the bundle is certificate-signed not public key-signed
if v.config.requireSCTs {
if verificationContent.PublicKey() != nil {
return nil, errors.New("SCTs required but bundle is signed with a public key, which cannot contain SCTs")
}
}
// From spec:
// > ## Signature Verification
// > The Verifier MUST verify the provided signature for the constructed payload against the key in the leaf of the certificate chain.
sigContent, err := entity.SignatureContent()
if err != nil {
return nil, fmt.Errorf("failed to fetch signature content: %w", err)
}
entityVersion, err := entity.Version()
if err != nil {
return nil, fmt.Errorf("failed to fetch entity version: %w", err)
}
var enableCompat bool
switch entityVersion {
case "v0.1":
fallthrough
case "0.1":
fallthrough
case "v0.2":
fallthrough
case "0.2":
fallthrough
case "v0.3":
fallthrough
case "0.3":
enableCompat = true
}
verifier, err := getSignatureVerifier(sigContent, verificationContent, v.trustedMaterial, enableCompat)
if err != nil {
return nil, fmt.Errorf("failed to get signature verifier: %w", err)
}
if policy.RequireArtifact() {
switch {
case policy.verifyArtifacts:
err = verifySignatureWithVerifierAndArtifacts(verifier, sigContent, verificationContent, v.trustedMaterial, policy.artifacts)
case policy.verifyArtifactDigests:
err = verifySignatureWithVerifierAndArtifactDigests(verifier, sigContent, verificationContent, v.trustedMaterial, policy.artifactDigests)
default:
// should never happen, but just in case:
err = errors.New("no artifact or artifact digest provided")
}
} else {
// verifying with artifact has been explicitly turned off, so just check
// the signature on the dsse envelope:
err = verifySignatureWithVerifier(verifier, sigContent, verificationContent, v.trustedMaterial)
}
if err != nil {
return nil, fmt.Errorf("failed to verify signature: %w", err)
}
// Hooray! We've verified all of the entity's constituent parts! 🎉 🥳
// Now we can construct the results object accordingly.
result := NewVerificationResult()
if signedWithCertificate {
result.Signature = &SignatureVerificationResult{
Certificate: &certSummary,
}
} else {
pubKeyID := []byte(verificationContent.PublicKey().Hint())
result.Signature = &SignatureVerificationResult{
PublicKeyID: &pubKeyID,
}
}
// SignatureContent can be either an Envelope or a MessageSignature.
// If it's an Envelope, let's pop the Statement for our results:
if envelope := sigContent.EnvelopeContent(); envelope != nil {
stmt, err := envelope.Statement()
if err != nil {
return nil, fmt.Errorf("failed to fetch envelope statement: %w", err)
}
result.Statement = stmt
}
result.VerifiedTimestamps = verifiedTimestamps
// Now that the signed entity's crypto material has been verified, and the
// result struct has been constructed, we can optionally enforce some
// additional policies:
// --------------------
// From ## Certificate section,
// >The Verifier MUST then check the certificate against the verification policy. Details on how to do this depend on the verification policy, but the Verifier SHOULD check the Issuer X.509 extension (OID 1.3.6.1.4.1.57264.1.1) at a minimum, and will in most cases check the SubjectAlternativeName as well. See Spec: Fulcio §TODO for example checks on the certificate.
if policy.RequireIdentities() {
if !signedWithCertificate {
// We got asked to verify identities, but the entity was not signed with
// a certificate. That's a problem!
return nil, errors.New("can't verify certificate identities: entity was not signed with a certificate")
}
if len(policy.certificateIdentities) == 0 {
return nil, errors.New("can't verify certificate identities: no identities provided")
}
matchingCertID, err := policy.certificateIdentities.Verify(certSummary)
if err != nil {
return nil, fmt.Errorf("failed to verify certificate identity: %w", err)
}
result.VerifiedIdentity = matchingCertID
}
return result, nil
}
// VerifyTransparencyLogInclusion verifies TlogEntries if expected. Optionally returns
// a list of verified timestamps from the log integrated timestamps when verifying
// with observer timestamps.
// TODO: Return a different verification result for logs specifically (also for #48)
func (v *Verifier) VerifyTransparencyLogInclusion(entity SignedEntity) ([]TimestampVerificationResult, error) {
verifiedTimestamps := []TimestampVerificationResult{}
if v.config.requireTlogEntries {
// log timestamps should be verified if with WithIntegratedTimestamps or WithObserverTimestamps is used
verifiedTlogTimestamps, err := VerifyTlogEntry(entity, v.trustedMaterial, v.config.tlogEntriesThreshold,
v.config.requireIntegratedTimestamps || v.config.requireObserverTimestamps)
if err != nil {
return nil, err
}
for _, vts := range verifiedTlogTimestamps {
verifiedTimestamps = append(verifiedTimestamps, TimestampVerificationResult{Type: "Tlog", URI: vts.URI, Timestamp: vts.Time})
}
}
return verifiedTimestamps, nil
}
// VerifyObserverTimestamps verifies RFC3161 signed timestamps, and verifies
// that timestamp thresholds are met with log entry integrated timestamps,
// signed timestamps, or a combination of both. The returned timestamps
// can be used to verify short-lived certificates.
// logTimestamps may be populated with verified log entry integrated timestamps
// In order to be verifiable, a SignedEntity must have at least one verified
// "observer timestamp".
func (v *Verifier) VerifyObserverTimestamps(entity SignedEntity, logTimestamps []TimestampVerificationResult) ([]TimestampVerificationResult, error) {
verifiedTimestamps := []TimestampVerificationResult{}
// From spec:
// > … if verification or timestamp parsing fails, the Verifier MUST abort
if v.config.requireSignedTimestamps {
verifiedSignedTimestamps, err := VerifySignedTimestampWithThreshold(entity, v.trustedMaterial, v.config.signedTimestampThreshold)
if err != nil {
return nil, err
}
for _, vts := range verifiedSignedTimestamps {
verifiedTimestamps = append(verifiedTimestamps, TimestampVerificationResult{Type: "TimestampAuthority", URI: vts.URI, Timestamp: vts.Time})
}
}
if v.config.requireIntegratedTimestamps {
if len(logTimestamps) < v.config.integratedTimeThreshold {
return nil, fmt.Errorf("threshold not met for verified log entry integrated timestamps: %d < %d", len(logTimestamps), v.config.integratedTimeThreshold)
}
verifiedTimestamps = append(verifiedTimestamps, logTimestamps...)
}
if v.config.requireObserverTimestamps {
verifiedSignedTimestamps, verificationErrors, err := VerifySignedTimestamp(entity, v.trustedMaterial)
if err != nil {
return nil, fmt.Errorf("failed to verify signed timestamps: %w", err)
}
// check threshold for both RFC3161 and log timestamps
tsCount := len(verifiedSignedTimestamps) + len(logTimestamps)
if tsCount < v.config.observerTimestampThreshold {
return nil, fmt.Errorf("threshold not met for verified signed & log entry integrated timestamps: %d < %d; error: %w",
tsCount, v.config.observerTimestampThreshold, errors.Join(verificationErrors...))
}
// append all timestamps
verifiedTimestamps = append(verifiedTimestamps, logTimestamps...)
for _, vts := range verifiedSignedTimestamps {
verifiedTimestamps = append(verifiedTimestamps, TimestampVerificationResult{Type: "TimestampAuthority", URI: vts.URI, Timestamp: vts.Time})
}
}
if v.config.useCurrentTime {
// use current time to verify certificate if no signed timestamps are provided
verifiedTimestamps = append(verifiedTimestamps, TimestampVerificationResult{Type: "CurrentTime", URI: "", Timestamp: time.Now()})
}
if len(verifiedTimestamps) == 0 && !v.config.allowNoTimestamp {
return nil, fmt.Errorf("no valid observer timestamps found")
}
return verifiedTimestamps, nil
}
// Copyright 2023 The Sigstore Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package verify
import (
"bytes"
"crypto"
"encoding/hex"
"errors"
"fmt"
"net/url"
"regexp"
"strings"
"github.com/sigstore/sigstore-go/pkg/root"
"github.com/sigstore/sigstore-go/pkg/tlog"
"github.com/sigstore/sigstore/pkg/signature"
)
const maxAllowedTlogEntries = 32
// VerifyTlogEntry verifies that the given entity has been logged
// in the transparency log and that the log entry is valid.
//
// The threshold parameter is the number of unique transparency log entries
// that must be verified.
func VerifyTlogEntry(entity SignedEntity, trustedMaterial root.TrustedMaterial, logThreshold int, trustIntegratedTime bool) ([]root.Timestamp, error) { //nolint:revive
entries, err := entity.TlogEntries()
if err != nil {
return nil, err
}
// limit the number of tlog entries to prevent DoS
if len(entries) > maxAllowedTlogEntries {
return nil, fmt.Errorf("too many tlog entries: %d > %d", len(entries), maxAllowedTlogEntries)
}
// disallow duplicate entries, as a malicious actor could use duplicates to bypass the threshold
for i := 0; i < len(entries); i++ {
for j := i + 1; j < len(entries); j++ {
if entries[i].LogKeyID() == entries[j].LogKeyID() && entries[i].LogIndex() == entries[j].LogIndex() {
return nil, errors.New("duplicate tlog entries found")
}
}
}
sigContent, err := entity.SignatureContent()
if err != nil {
return nil, err
}
entitySignature := sigContent.Signature()
verificationContent, err := entity.VerificationContent()
if err != nil {
return nil, err
}
verifiedTimestamps := []root.Timestamp{}
logEntriesVerified := 0
for _, entry := range entries {
err := tlog.ValidateEntry(entry)
if err != nil {
return nil, err
}
rekorLogs := trustedMaterial.RekorLogs()
keyID := entry.LogKeyID()
hex64Key := hex.EncodeToString([]byte(keyID))
tlogVerifier, ok := trustedMaterial.RekorLogs()[hex64Key]
if !ok {
// skip entries the trust root cannot verify
continue
}
if !entry.HasInclusionPromise() && !entry.HasInclusionProof() {
return nil, fmt.Errorf("entry must contain an inclusion proof and/or promise")
}
if entry.HasInclusionPromise() {
err = tlog.VerifySET(entry, rekorLogs)
if err != nil {
// skip entries the trust root cannot verify
continue
}
if trustIntegratedTime {
verifiedTimestamps = append(verifiedTimestamps, root.Timestamp{Time: entry.IntegratedTime(), URI: tlogVerifier.BaseURL})
}
}
if entry.HasInclusionProof() {
verifier, err := getVerifier(tlogVerifier.PublicKey, tlogVerifier.SignatureHashFunc)
if err != nil {
return nil, err
}
if hasRekorV1STH(entry) {
err = tlog.VerifyInclusion(entry, *verifier)
} else {
if tlogVerifier.BaseURL == "" {
return nil, fmt.Errorf("cannot verify Rekor v2 entry without baseUrl in transparency log's trusted root")
}
u, err := url.Parse(tlogVerifier.BaseURL)
if err != nil {
return nil, err
}
err = tlog.VerifyCheckpointAndInclusion(entry, *verifier, u.Hostname())
if err != nil {
return nil, err
}
}
if err != nil {
return nil, err
}
// DO NOT use timestamp with only an inclusion proof, because it is not signed metadata
}
// Ensure entry signature matches signature from bundle
if !bytes.Equal(entry.Signature(), entitySignature) {
return nil, errors.New("transparency log signature does not match")
}
// Ensure entry certificate matches bundle certificate
if !verificationContent.CompareKey(entry.PublicKey(), trustedMaterial) {
return nil, errors.New("transparency log certificate does not match")
}
// TODO: if you have access to artifact, check that it matches body subject
// Check tlog entry time against bundle certificates
if !entry.IntegratedTime().IsZero() {
if !verificationContent.ValidAtTime(entry.IntegratedTime(), trustedMaterial) {
return nil, errors.New("integrated time outside certificate validity")
}
}
// successful log entry verification
logEntriesVerified++
}
if logEntriesVerified < logThreshold {
return nil, fmt.Errorf("not enough verified log entries from transparency log: %d < %d", logEntriesVerified, logThreshold)
}
return verifiedTimestamps, nil
}
func getVerifier(publicKey crypto.PublicKey, hashFunc crypto.Hash) (*signature.Verifier, error) {
verifier, err := signature.LoadVerifier(publicKey, hashFunc)
if err != nil {
return nil, err
}
return &verifier, nil
}
// TODO: remove this deprecated function before 2.0
// Deprecated: use VerifyTlogEntry instead
func VerifyArtifactTransparencyLog(entity SignedEntity, trustedMaterial root.TrustedMaterial, logThreshold int, trustIntegratedTime bool) ([]root.Timestamp, error) { //nolint:revive
return VerifyTlogEntry(entity, trustedMaterial, logThreshold, trustIntegratedTime)
}
var treeIDSuffixRegex = regexp.MustCompile(".* - [0-9]+$")
// hasRekorV1STH checks if the checkpoint has a Rekor v1-style Signed Tree Head
// which contains a numeric Tree ID as part of its checkpoint origin.
func hasRekorV1STH(entry *tlog.Entry) bool {
tle := entry.TransparencyLogEntry()
checkpointBody := tle.GetInclusionProof().GetCheckpoint().GetEnvelope()
checkpointLines := strings.Split(checkpointBody, "\n")
if len(checkpointLines) < 4 {
return false
}
return treeIDSuffixRegex.MatchString(checkpointLines[0])
}
// Copyright 2023 The Sigstore Authors.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package verify
import (
"errors"
"fmt"
"github.com/sigstore/sigstore-go/pkg/root"
)
const maxAllowedTimestamps = 32
// VerifySignedTimestamp verifies that the given entity has been timestamped
// by a trusted timestamp authority and that the timestamp is valid.
func VerifySignedTimestamp(entity SignedEntity, trustedMaterial root.TrustedMaterial) ([]*root.Timestamp, []error, error) { //nolint:revive
signedTimestamps, err := entity.Timestamps()
if err != nil {
return nil, nil, err
}
// limit the number of timestamps to prevent DoS
if len(signedTimestamps) > maxAllowedTimestamps {
return nil, nil, fmt.Errorf("too many signed timestamps: %d > %d", len(signedTimestamps), maxAllowedTimestamps)
}
sigContent, err := entity.SignatureContent()
if err != nil {
return nil, nil, err
}
signatureBytes := sigContent.Signature()
verifiedTimestamps := []*root.Timestamp{}
var verificationErrors []error
for _, timestamp := range signedTimestamps {
verifiedSignedTimestamp, err := verifySignedTimestamp(timestamp, signatureBytes, trustedMaterial)
if err != nil {
verificationErrors = append(verificationErrors, err)
continue
}
if isDuplicateTSA(verifiedTimestamps, verifiedSignedTimestamp) {
verificationErrors = append(verificationErrors, fmt.Errorf("duplicate timestamps from the same authority, ignoring %s", verifiedSignedTimestamp.URI))
continue
}
verifiedTimestamps = append(verifiedTimestamps, verifiedSignedTimestamp)
}
return verifiedTimestamps, verificationErrors, nil
}
// isDuplicateTSA checks if the given verified signed timestamp is a duplicate
// of any of the verified timestamps.
// This is used to prevent replay attacks and ensure a single compromised TSA
// cannot meet the threshold.
func isDuplicateTSA(verifiedTimestamps []*root.Timestamp, verifiedSignedTimestamp *root.Timestamp) bool {
for _, ts := range verifiedTimestamps {
if ts.URI == verifiedSignedTimestamp.URI {
return true
}
}
return false
}
// VerifySignedTimestamp verifies that the given entity has been timestamped
// by a trusted timestamp authority and that the timestamp is valid.
//
// The threshold parameter is the number of unique timestamps that must be
// verified.
func VerifySignedTimestampWithThreshold(entity SignedEntity, trustedMaterial root.TrustedMaterial, threshold int) ([]*root.Timestamp, error) { //nolint:revive
verifiedTimestamps, verificationErrors, err := VerifySignedTimestamp(entity, trustedMaterial)
if err != nil {
return nil, err
}
if len(verifiedTimestamps) < threshold {
return nil, fmt.Errorf("threshold not met for verified signed timestamps: %d < %d; error: %w", len(verifiedTimestamps), threshold, errors.Join(verificationErrors...))
}
return verifiedTimestamps, nil
}
func verifySignedTimestamp(signedTimestamp []byte, signatureBytes []byte, trustedMaterial root.TrustedMaterial) (*root.Timestamp, error) {
timestampAuthorities := trustedMaterial.TimestampingAuthorities()
var errs []error
// Iterate through TSA certificate authorities to find one that verifies
for _, tsa := range timestampAuthorities {
ts, err := tsa.Verify(signedTimestamp, signatureBytes)
if err == nil {
return ts, nil
}
errs = append(errs, err)
}
return nil, fmt.Errorf("unable to verify signed timestamps: %w", errors.Join(errs...))
}
// TODO: remove below deprecated functions before 2.0
// Deprecated: use VerifySignedTimestamp instead.
func VerifyTimestampAuthority(entity SignedEntity, trustedMaterial root.TrustedMaterial) ([]*root.Timestamp, []error, error) { //nolint:revive
return VerifySignedTimestamp(entity, trustedMaterial)
}
// Deprecated: use VerifySignedTimestampWithThreshold instead.
func VerifyTimestampAuthorityWithThreshold(entity SignedEntity, trustedMaterial root.TrustedMaterial, threshold int) ([]*root.Timestamp, error) { //nolint:revive
return VerifySignedTimestampWithThreshold(entity, trustedMaterial, threshold)
}