package cryptoservice
import (
"crypto"
"crypto/rand"
"crypto/x509"
"fmt"
"time"
"github.com/theupdateframework/notary/tuf/data"
"github.com/theupdateframework/notary/tuf/utils"
)
// GenerateCertificate generates an X509 Certificate from a template, given a GUN and validity interval
func GenerateCertificate(rootKey data.PrivateKey, gun data.GUN, startTime, endTime time.Time) (*x509.Certificate, error) {
signer := rootKey.CryptoSigner()
if signer == nil {
return nil, fmt.Errorf("key type not supported for Certificate generation: %s", rootKey.Algorithm())
}
return generateCertificate(signer, gun, startTime, endTime)
}
func generateCertificate(signer crypto.Signer, gun data.GUN, startTime, endTime time.Time) (*x509.Certificate, error) {
template, err := utils.NewCertificate(gun.String(), startTime, endTime)
if err != nil {
return nil, fmt.Errorf("failed to create the certificate template for: %s (%v)", gun, err)
}
derBytes, err := x509.CreateCertificate(rand.Reader, template, template, signer.Public(), signer)
if err != nil {
return nil, fmt.Errorf("failed to create the certificate for: %s (%v)", gun, err)
}
cert, err := x509.ParseCertificate(derBytes)
if err != nil {
return nil, fmt.Errorf("failed to parse the certificate for key: %s (%v)", gun, err)
}
return cert, nil
}
package cryptoservice
import (
"crypto/x509"
"encoding/pem"
"errors"
"fmt"
"github.com/sirupsen/logrus"
"github.com/theupdateframework/notary"
"github.com/theupdateframework/notary/trustmanager"
"github.com/theupdateframework/notary/tuf/data"
"github.com/theupdateframework/notary/tuf/utils"
)
var (
// ErrNoValidPrivateKey is returned if a key being imported doesn't
// look like a private key
ErrNoValidPrivateKey = errors.New("no valid private key found")
// ErrRootKeyNotEncrypted is returned if a root key being imported is
// unencrypted
ErrRootKeyNotEncrypted = errors.New("only encrypted root keys may be imported")
// EmptyService is an empty crypto service
EmptyService = NewCryptoService()
)
// CryptoService implements Sign and Create, holding a specific GUN and keystore to
// operate on
type CryptoService struct {
keyStores []trustmanager.KeyStore
}
// NewCryptoService returns an instance of CryptoService
func NewCryptoService(keyStores ...trustmanager.KeyStore) *CryptoService {
return &CryptoService{keyStores: keyStores}
}
// Create is used to generate keys for targets, snapshots and timestamps
func (cs *CryptoService) Create(role data.RoleName, gun data.GUN, algorithm string) (data.PublicKey, error) {
if algorithm == data.RSAKey {
return nil, fmt.Errorf("%s keys can only be imported", data.RSAKey)
}
privKey, err := utils.GenerateKey(algorithm)
if err != nil {
return nil, fmt.Errorf("failed to generate %s key: %v", algorithm, err)
}
logrus.Debugf("generated new %s key for role: %s and keyID: %s", algorithm, role.String(), privKey.ID())
pubKey := data.PublicKeyFromPrivate(privKey)
return pubKey, cs.AddKey(role, gun, privKey)
}
// GetPrivateKey returns a private key and role if present by ID.
func (cs *CryptoService) GetPrivateKey(keyID string) (k data.PrivateKey, role data.RoleName, err error) {
for _, ks := range cs.keyStores {
if k, role, err = ks.GetKey(keyID); err == nil {
return
}
switch err.(type) {
case trustmanager.ErrPasswordInvalid, trustmanager.ErrAttemptsExceeded:
return
default:
continue
}
}
return // returns whatever the final values were
}
// GetKey returns a key by ID
func (cs *CryptoService) GetKey(keyID string) data.PublicKey {
privKey, _, err := cs.GetPrivateKey(keyID)
if err != nil {
return nil
}
return data.PublicKeyFromPrivate(privKey)
}
// GetKeyInfo returns role and GUN info of a key by ID
func (cs *CryptoService) GetKeyInfo(keyID string) (trustmanager.KeyInfo, error) {
for _, store := range cs.keyStores {
if info, err := store.GetKeyInfo(keyID); err == nil {
return info, nil
}
}
return trustmanager.KeyInfo{}, fmt.Errorf("could not find info for keyID %s", keyID)
}
// RemoveKey deletes a key by ID
func (cs *CryptoService) RemoveKey(keyID string) (err error) {
for _, ks := range cs.keyStores {
ks.RemoveKey(keyID)
}
return // returns whatever the final values were
}
// AddKey adds a private key to a specified role.
// The GUN is inferred from the cryptoservice itself for non-root roles
func (cs *CryptoService) AddKey(role data.RoleName, gun data.GUN, key data.PrivateKey) (err error) {
// First check if this key already exists in any of our keystores
for _, ks := range cs.keyStores {
if keyInfo, err := ks.GetKeyInfo(key.ID()); err == nil {
if keyInfo.Role != role {
return fmt.Errorf("key with same ID already exists for role: %s", keyInfo.Role.String())
}
logrus.Debugf("key with same ID %s and role %s already exists", key.ID(), keyInfo.Role.String())
return nil
}
}
// If the key didn't exist in any of our keystores, add and return on the first successful keystore
for _, ks := range cs.keyStores {
// Try to add to this keystore, return if successful
if err = ks.AddKey(trustmanager.KeyInfo{Role: role, Gun: gun}, key); err == nil {
return nil
}
}
return // returns whatever the final values were
}
// ListKeys returns a list of key IDs valid for the given role
func (cs *CryptoService) ListKeys(role data.RoleName) []string {
var res []string
for _, ks := range cs.keyStores {
for k, r := range ks.ListKeys() {
if r.Role == role {
res = append(res, k)
}
}
}
return res
}
// ListAllKeys returns a map of key IDs to role
func (cs *CryptoService) ListAllKeys() map[string]data.RoleName {
res := make(map[string]data.RoleName)
for _, ks := range cs.keyStores {
for k, r := range ks.ListKeys() {
res[k] = r.Role // keys are content addressed so don't care about overwrites
}
}
return res
}
// CheckRootKeyIsEncrypted makes sure the root key is encrypted. We have
// internal assumptions that depend on this.
func CheckRootKeyIsEncrypted(pemBytes []byte) error {
block, _ := pem.Decode(pemBytes)
if block == nil {
return ErrNoValidPrivateKey
}
if block.Type == "ENCRYPTED PRIVATE KEY" {
return nil
}
//lint:ignore SA1019 Needed for legacy keys.
if !notary.FIPSEnabled() && x509.IsEncryptedPEMBlock(block) {
return nil
}
return ErrRootKeyNotEncrypted
}
// +build gofuzz
package fuzz
import (
"github.com/theupdateframework/notary/cryptoservice"
"github.com/theupdateframework/notary/passphrase"
"github.com/theupdateframework/notary/trustmanager"
)
// Fuzz implements the fuzzer that targets GetPrivateKey
func Fuzz(data []byte) int {
cryptos := cryptoservice.NewCryptoService(trustmanager.NewKeyMemoryStore(passphrase.ConstantRetriever("pass")))
_, _, err := cryptos.GetPrivateKey(string(data))
if err != nil {
return 0
}
return 1
}
package notary
import (
"crypto"
// Need to import md5 so can test availability.
_ "crypto/md5" // #nosec
)
// FIPSEnabled returns true if running in FIPS mode.
// If compiled in FIPS mode the md5 hash function is never available
// even when imported. This seems to be the best test we have for it.
func FIPSEnabled() bool {
return !crypto.MD5.Available()
}
// Package passphrase is a utility function for managing passphrase
// for TUF and Notary keys.
package passphrase
import (
"bufio"
"errors"
"fmt"
"io"
"os"
"path/filepath"
"strings"
"github.com/theupdateframework/notary"
"golang.org/x/term"
)
const (
idBytesToDisplay = 7
tufRootAlias = "root"
tufRootKeyGenerationWarning = `You are about to create a new root signing key passphrase. This passphrase
will be used to protect the most sensitive key in your signing system. Please
choose a long, complex passphrase and be careful to keep the password and the
key file itself secure and backed up. It is highly recommended that you use a
password manager to generate the passphrase and keep it safe. There will be no
way to recover this key. You can find the key in your config directory.`
)
var (
// ErrTooShort is returned if the passphrase entered for a new key is
// below the minimum length
ErrTooShort = errors.New("passphrase too short")
// ErrDontMatch is returned if the two entered passphrases don't match.
// new key is below the minimum length
ErrDontMatch = errors.New("the entered passphrases do not match")
// ErrTooManyAttempts is returned if the maximum number of passphrase
// entry attempts is reached.
ErrTooManyAttempts = errors.New("too many attempts")
// ErrNoInput is returned if we do not have a valid input method for passphrases
ErrNoInput = errors.New("please either use environment variables or STDIN with a terminal to provide key passphrases")
)
// PromptRetriever returns a new Retriever which will provide a prompt on stdin
// and stdout to retrieve a passphrase. stdin will be checked if it is a terminal,
// else the PromptRetriever will error when attempting to retrieve a passphrase.
// Upon successful passphrase retrievals, the passphrase will be cached such that
// subsequent prompts will produce the same passphrase.
func PromptRetriever() notary.PassRetriever {
if !term.IsTerminal(int(os.Stdin.Fd())) {
return func(string, string, bool, int) (string, bool, error) {
return "", false, ErrNoInput
}
}
return PromptRetrieverWithInOut(os.Stdin, os.Stdout, nil)
}
type boundRetriever struct {
in io.Reader
out io.Writer
aliasMap map[string]string
passphraseCache map[string]string
}
func (br *boundRetriever) getPassphrase(keyName, alias string, createNew bool, numAttempts int) (string, bool, error) {
if numAttempts == 0 {
if alias == tufRootAlias && createNew {
fmt.Fprintln(br.out, tufRootKeyGenerationWarning)
}
if pass, ok := br.passphraseCache[alias]; ok {
return pass, false, nil
}
} else if !createNew { // per `if`, numAttempts > 0 if we're at this `else`
if numAttempts > 3 {
return "", true, ErrTooManyAttempts
}
fmt.Fprintln(br.out, "Passphrase incorrect. Please retry.")
}
// passphrase not cached and we're not aborting, get passphrase from user!
return br.requestPassphrase(keyName, alias, createNew, numAttempts)
}
func (br *boundRetriever) requestPassphrase(keyName, alias string, createNew bool, numAttempts int) (string, bool, error) {
// Figure out if we should display a different string for this alias
displayAlias := alias
if val, ok := br.aliasMap[alias]; ok {
displayAlias = val
}
indexOfLastSeparator := strings.LastIndex(keyName, string(filepath.Separator))
if indexOfLastSeparator == -1 {
indexOfLastSeparator = 0
}
var shortName string
if len(keyName) > indexOfLastSeparator+idBytesToDisplay {
if indexOfLastSeparator > 0 {
keyNamePrefix := keyName[:indexOfLastSeparator]
keyNameID := keyName[indexOfLastSeparator+1 : indexOfLastSeparator+idBytesToDisplay+1]
shortName = keyNameID + " (" + keyNamePrefix + ")"
} else {
shortName = keyName[indexOfLastSeparator : indexOfLastSeparator+idBytesToDisplay]
}
}
withID := fmt.Sprintf(" with ID %s", shortName)
if shortName == "" {
withID = ""
}
switch {
case createNew:
fmt.Fprintf(br.out, "Enter passphrase for new %s key%s: ", displayAlias, withID)
case displayAlias == "yubikey":
fmt.Fprintf(br.out, "Enter the %s for the attached Yubikey: ", keyName)
default:
fmt.Fprintf(br.out, "Enter passphrase for %s key%s: ", displayAlias, withID)
}
stdin := bufio.NewReader(br.in)
passphrase, err := GetPassphrase(stdin)
fmt.Fprintln(br.out)
if err != nil {
return "", false, err
}
retPass := strings.TrimSpace(string(passphrase))
if createNew {
err = br.verifyAndConfirmPassword(stdin, retPass, displayAlias, withID)
if err != nil {
return "", false, err
}
}
br.cachePassword(alias, retPass)
return retPass, false, nil
}
func (br *boundRetriever) verifyAndConfirmPassword(stdin *bufio.Reader, retPass, displayAlias, withID string) error {
if len(retPass) < 8 {
fmt.Fprintln(br.out, "Passphrase is too short. Please use a password manager to generate and store a good random passphrase.")
return ErrTooShort
}
fmt.Fprintf(br.out, "Repeat passphrase for new %s key%s: ", displayAlias, withID)
confirmation, err := GetPassphrase(stdin)
fmt.Fprintln(br.out)
if err != nil {
return err
}
confirmationStr := strings.TrimSpace(string(confirmation))
if retPass != confirmationStr {
fmt.Fprintln(br.out, "Passphrases do not match. Please retry.")
return ErrDontMatch
}
return nil
}
func (br *boundRetriever) cachePassword(alias, retPass string) {
br.passphraseCache[alias] = retPass
}
// PromptRetrieverWithInOut returns a new Retriever which will provide a
// prompt using the given in and out readers. The passphrase will be cached
// such that subsequent prompts will produce the same passphrase.
// aliasMap can be used to specify display names for TUF key aliases. If aliasMap
// is nil, a sensible default will be used.
func PromptRetrieverWithInOut(in io.Reader, out io.Writer, aliasMap map[string]string) notary.PassRetriever {
bound := &boundRetriever{
in: in,
out: out,
aliasMap: aliasMap,
passphraseCache: make(map[string]string),
}
return bound.getPassphrase
}
// ConstantRetriever returns a new Retriever which will return a constant string
// as a passphrase.
func ConstantRetriever(constantPassphrase string) notary.PassRetriever {
return func(k, a string, c bool, n int) (string, bool, error) {
return constantPassphrase, false, nil
}
}
// GetPassphrase get the passphrase from bufio.Reader or from terminal.
// If typing on the terminal, we disable terminal to echo the passphrase.
func GetPassphrase(in *bufio.Reader) ([]byte, error) {
var (
passphrase []byte
err error
)
if term.IsTerminal(int(os.Stdin.Fd())) {
passphrase, err = term.ReadPassword(int(os.Stdin.Fd()))
} else {
passphrase, err = in.ReadBytes('\n')
}
return passphrase, err
}
package handlers
import (
"encoding/json"
"fmt"
"net/http"
"strconv"
ctxu "github.com/docker/distribution/context"
"github.com/gorilla/mux"
"golang.org/x/net/context"
"github.com/theupdateframework/notary"
"github.com/theupdateframework/notary/server/errors"
"github.com/theupdateframework/notary/server/storage"
)
type changefeedResponse struct {
NumberOfRecords int `json:"count"`
Records []storage.Change `json:"records"`
}
// Changefeed returns a list of changes according to the provided filters
func Changefeed(ctx context.Context, w http.ResponseWriter, r *http.Request) error {
var (
vars = mux.Vars(r)
logger = ctxu.GetLogger(ctx)
qs = r.URL.Query()
gun = vars["gun"]
changeID = qs.Get("change_id")
store, records, err = checkChangefeedInputs(logger, ctx.Value(notary.CtxKeyMetaStore), qs.Get("records"))
)
if err != nil {
// err already logged and in correct format.
return err
}
out, err := changefeed(logger, store, gun, changeID, records)
if err == nil {
w.Write(out)
}
return err
}
func changefeed(logger ctxu.Logger, store storage.MetaStore, gun, changeID string, records int64) ([]byte, error) {
changes, err := store.GetChanges(changeID, int(records), gun)
switch err.(type) {
case nil:
// no error to return
case storage.ErrBadQuery:
return nil, errors.ErrInvalidParams.WithDetail(err)
default:
logger.Errorf("%d GET could not retrieve records: %s", http.StatusInternalServerError, err.Error())
return nil, errors.ErrUnknown.WithDetail(err)
}
out, err := json.Marshal(&changefeedResponse{
NumberOfRecords: len(changes),
Records: changes,
})
if err != nil {
logger.Errorf("%d GET could not json.Marshal changefeedResponse", http.StatusInternalServerError)
return nil, errors.ErrUnknown.WithDetail(err)
}
return out, nil
}
func checkChangefeedInputs(logger ctxu.Logger, s interface{}, r string) (
store storage.MetaStore, pageSize int64, err error) {
store, ok := s.(storage.MetaStore)
if !ok {
logger.Errorf("%d GET unable to retrieve storage", http.StatusInternalServerError)
err = errors.ErrNoStorage.WithDetail(nil)
return
}
pageSize, err = strconv.ParseInt(r, 10, 32)
if err != nil {
logger.Errorf("%d GET invalid pageSize: %s", http.StatusBadRequest, r)
err = errors.ErrInvalidParams.WithDetail(
fmt.Sprintf("invalid records parameter: %s", err.Error()),
)
return
}
if pageSize == 0 {
pageSize = notary.DefaultPageSize
}
return
}
package handlers
import (
"bytes"
"crypto/sha256"
"encoding/hex"
"encoding/json"
"io"
"mime"
"net/http"
"strings"
ctxu "github.com/docker/distribution/context"
"github.com/gorilla/mux"
"golang.org/x/net/context"
"github.com/theupdateframework/notary"
"github.com/theupdateframework/notary/server/errors"
"github.com/theupdateframework/notary/server/snapshot"
"github.com/theupdateframework/notary/server/storage"
"github.com/theupdateframework/notary/server/timestamp"
"github.com/theupdateframework/notary/tuf/data"
"github.com/theupdateframework/notary/tuf/signed"
"github.com/theupdateframework/notary/tuf/validation"
"github.com/theupdateframework/notary/utils"
)
// MainHandler is the default handler for the server
func MainHandler(ctx context.Context, w http.ResponseWriter, r *http.Request) error {
// For now it only supports `GET`
if r.Method != "GET" {
return errors.ErrGenericNotFound.WithDetail(nil)
}
if _, err := w.Write([]byte("{}")); err != nil {
return errors.ErrUnknown.WithDetail(err)
}
return nil
}
// AtomicUpdateHandler will accept multiple TUF files and ensure that the storage
// backend is atomically updated with all the new records.
func AtomicUpdateHandler(ctx context.Context, w http.ResponseWriter, r *http.Request) error {
defer r.Body.Close()
vars := mux.Vars(r)
return atomicUpdateHandler(ctx, w, r, vars)
}
func atomicUpdateHandler(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
gun := data.GUN(vars["gun"])
s := ctx.Value(notary.CtxKeyMetaStore)
logger := ctxu.GetLoggerWithField(ctx, gun, "gun")
store, ok := s.(storage.MetaStore)
if !ok {
logger.Error("500 POST unable to retrieve storage")
return errors.ErrNoStorage.WithDetail(nil)
}
cryptoServiceVal := ctx.Value(notary.CtxKeyCryptoSvc)
cryptoService, ok := cryptoServiceVal.(signed.CryptoService)
if !ok {
logger.Error("500 POST unable to retrieve signing service")
return errors.ErrNoCryptoService.WithDetail(nil)
}
reader, err := r.MultipartReader()
if err != nil {
logger.Info("400 POST unable to parse TUF data")
return errors.ErrMalformedUpload.WithDetail(nil)
}
var updates []storage.MetaUpdate
for {
part, err := reader.NextPart()
if err == io.EOF {
break
}
_, params, err := mime.ParseMediaType(part.Header.Get("Content-Disposition"))
if err != nil {
logger.Infof("400 POST error parsing Content-Disposition header: %s", err)
return errors.ErrNoFilename.WithDetail(nil)
}
role := data.RoleName(strings.TrimSuffix(params["filename"], ".json"))
if role.String() == "" {
logger.Info("400 POST empty role")
return errors.ErrNoFilename.WithDetail(nil)
} else if !data.ValidRole(role) {
logger.Infof("400 POST invalid role: %s", role)
return errors.ErrInvalidRole.WithDetail(role)
}
meta := &data.SignedMeta{}
var input []byte
inBuf := bytes.NewBuffer(input)
dec := json.NewDecoder(io.TeeReader(part, inBuf))
err = dec.Decode(meta)
if err != nil {
logger.Info("400 POST malformed update JSON")
return errors.ErrMalformedJSON.WithDetail(nil)
}
version := meta.Signed.Version
updates = append(updates, storage.MetaUpdate{
Role: role,
Version: version,
Data: inBuf.Bytes(),
})
}
updates, err = validateUpdate(cryptoService, gun, updates, store)
if err != nil {
serializable, serializableError := validation.NewSerializableError(err)
if serializableError != nil {
logger.Info("400 POST error validating update")
return errors.ErrInvalidUpdate.WithDetail(nil)
}
return errors.ErrInvalidUpdate.WithDetail(serializable)
}
err = store.UpdateMany(gun, updates)
if err != nil {
// If we have an old version error, surface to user with error code
if _, ok := err.(storage.ErrOldVersion); ok {
logger.Info("400 POST old version error")
return errors.ErrOldVersion.WithDetail(err)
}
// More generic storage update error, possibly due to attempted rollback
logger.Errorf("500 POST error applying update request: %v", err)
return errors.ErrUpdating.WithDetail(nil)
}
logTS(logger, gun.String(), updates)
return nil
}
// logTS logs the timestamp update at Info level
func logTS(logger ctxu.Logger, gun string, updates []storage.MetaUpdate) {
for _, update := range updates {
if update.Role == data.CanonicalTimestampRole {
checksumBin := sha256.Sum256(update.Data)
checksum := hex.EncodeToString(checksumBin[:])
logger.Infof("updated %s to timestamp version %d, checksum %s", gun, update.Version, checksum)
break
}
}
}
// GetHandler returns the json for a specified role and GUN.
func GetHandler(ctx context.Context, w http.ResponseWriter, r *http.Request) error {
defer r.Body.Close()
vars := mux.Vars(r)
return getHandler(ctx, w, r, vars)
}
func getHandler(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
gun := data.GUN(vars["gun"])
checksum := vars["checksum"]
version := vars["version"]
tufRole := vars["tufRole"]
s := ctx.Value(notary.CtxKeyMetaStore)
logger := ctxu.GetLoggerWithField(ctx, gun, "gun")
store, ok := s.(storage.MetaStore)
if !ok {
logger.Error("500 GET: no storage exists")
return errors.ErrNoStorage.WithDetail(nil)
}
lastModified, output, err := getRole(ctx, store, gun, data.RoleName(tufRole), checksum, version)
if err != nil {
logger.Infof("404 GET %s role", tufRole)
return err
}
if lastModified != nil {
// This shouldn't always be true, but in case it is nil, and the last modified headers
// are not set, the cache control handler should set the last modified date to the beginning
// of time.
utils.SetLastModifiedHeader(w.Header(), *lastModified)
} else {
logger.Warnf("Got bytes out for %s's %s (checksum: %s), but missing lastModified date",
gun, tufRole, checksum)
}
w.Write(output)
return nil
}
// DeleteHandler deletes all data for a GUN. A 200 responses indicates success.
func DeleteHandler(ctx context.Context, w http.ResponseWriter, r *http.Request) error {
vars := mux.Vars(r)
gun := data.GUN(vars["gun"])
logger := ctxu.GetLoggerWithField(ctx, gun, "gun")
s := ctx.Value(notary.CtxKeyMetaStore)
store, ok := s.(storage.MetaStore)
if !ok {
logger.Error("500 DELETE repository: no storage exists")
return errors.ErrNoStorage.WithDetail(nil)
}
err := store.Delete(gun)
if err != nil {
logger.Error("500 DELETE repository")
return errors.ErrUnknown.WithDetail(err)
}
logger.Infof("trust data deleted for %s", gun)
return nil
}
// GetKeyHandler returns a public key for the specified role, creating a new key-pair
// it if it doesn't yet exist
func GetKeyHandler(ctx context.Context, w http.ResponseWriter, r *http.Request) error {
defer r.Body.Close()
vars := mux.Vars(r)
return getKeyHandler(ctx, w, r, vars)
}
func getKeyHandler(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
role, gun, keyAlgorithm, store, crypto, err := setupKeyHandler(ctx, w, r, vars, http.MethodGet)
if err != nil {
return err
}
var key data.PublicKey
logger := ctxu.GetLoggerWithField(ctx, gun, "gun")
switch role {
case data.CanonicalTimestampRole:
key, err = timestamp.GetOrCreateTimestampKey(gun, store, crypto, keyAlgorithm)
case data.CanonicalSnapshotRole:
key, err = snapshot.GetOrCreateSnapshotKey(gun, store, crypto, keyAlgorithm)
default:
logger.Infof("400 GET %s key: %v", role, err)
return errors.ErrInvalidRole.WithDetail(role)
}
if err != nil {
logger.Errorf("500 GET %s key: %v", role, err)
return errors.ErrUnknown.WithDetail(err)
}
out, err := json.Marshal(key)
if err != nil {
logger.Errorf("500 GET %s key", role)
return errors.ErrUnknown.WithDetail(err)
}
logger.Debugf("200 GET %s key", role)
w.Write(out)
return nil
}
// RotateKeyHandler rotates the remote key for the specified role, returning the public key
func RotateKeyHandler(ctx context.Context, w http.ResponseWriter, r *http.Request) error {
defer r.Body.Close()
vars := mux.Vars(r)
return rotateKeyHandler(ctx, w, r, vars)
}
func rotateKeyHandler(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string) error {
role, gun, keyAlgorithm, store, crypto, err := setupKeyHandler(ctx, w, r, vars, http.MethodPost)
if err != nil {
return err
}
var key data.PublicKey
logger := ctxu.GetLoggerWithField(ctx, gun, "gun")
switch role {
case data.CanonicalTimestampRole:
key, err = timestamp.RotateTimestampKey(gun, store, crypto, keyAlgorithm)
case data.CanonicalSnapshotRole:
key, err = snapshot.RotateSnapshotKey(gun, store, crypto, keyAlgorithm)
default:
logger.Infof("400 POST %s key: %v", role, err)
return errors.ErrInvalidRole.WithDetail(role)
}
if err != nil {
logger.Errorf("500 POST %s key: %v", role, err)
return errors.ErrUnknown.WithDetail(err)
}
out, err := json.Marshal(key)
if err != nil {
logger.Errorf("500 POST %s key", role)
return errors.ErrUnknown.WithDetail(err)
}
logger.Debugf("200 POST %s key", role)
w.Write(out)
return nil
}
// To be called before getKeyHandler or rotateKeyHandler
func setupKeyHandler(ctx context.Context, w http.ResponseWriter, r *http.Request, vars map[string]string, actionVerb string) (data.RoleName, data.GUN, string, storage.MetaStore, signed.CryptoService, error) {
gun := data.GUN(vars["gun"])
logger := ctxu.GetLoggerWithField(ctx, gun, "gun")
if gun == "" {
logger.Infof("400 %s no gun in request", actionVerb)
return "", "", "", nil, nil, errors.ErrUnknown.WithDetail("no gun")
}
role := data.RoleName(vars["tufRole"])
if role == "" {
logger.Infof("400 %s no role in request", actionVerb)
return "", "", "", nil, nil, errors.ErrUnknown.WithDetail("no role")
}
s := ctx.Value(notary.CtxKeyMetaStore)
store, ok := s.(storage.MetaStore)
if !ok || store == nil {
logger.Errorf("500 %s storage not configured", actionVerb)
return "", "", "", nil, nil, errors.ErrNoStorage.WithDetail(nil)
}
c := ctx.Value(notary.CtxKeyCryptoSvc)
crypto, ok := c.(signed.CryptoService)
if !ok || crypto == nil {
logger.Errorf("500 %s crypto service not configured", actionVerb)
return "", "", "", nil, nil, errors.ErrNoCryptoService.WithDetail(nil)
}
algo := ctx.Value(notary.CtxKeyKeyAlgo)
keyAlgo, ok := algo.(string)
if !ok || keyAlgo == "" {
logger.Errorf("500 %s key algorithm not configured", actionVerb)
return "", "", "", nil, nil, errors.ErrNoKeyAlgorithm.WithDetail(nil)
}
return role, gun, keyAlgo, store, crypto, nil
}
// NotFoundHandler is used as a generic catch all handler to return the ErrMetadataNotFound
// 404 response
func NotFoundHandler(ctx context.Context, w http.ResponseWriter, r *http.Request) error {
return errors.ErrMetadataNotFound.WithDetail(nil)
}
// Copyright 2023 the cncf-fuzzing authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package handlers
import (
"bytes"
"context"
"io"
"net/http"
"strings"
"testing"
fuzz "github.com/AdaLogics/go-fuzz-headers"
ctxu "github.com/docker/distribution/context"
"github.com/sirupsen/logrus"
"github.com/theupdateframework/notary"
"github.com/theupdateframework/notary/server/storage"
store "github.com/theupdateframework/notary/storage"
"github.com/theupdateframework/notary/tuf"
"github.com/theupdateframework/notary/tuf/data"
"github.com/theupdateframework/notary/tuf/signed"
"github.com/theupdateframework/notary/tuf/testutils"
)
func getFuzzContext(h handlerStateFuzz) context.Context {
ctx := context.Background()
ctx = context.WithValue(ctx, notary.CtxKeyMetaStore, h.store)
ctx = context.WithValue(ctx, notary.CtxKeyKeyAlgo, h.keyAlgo)
ctx = context.WithValue(ctx, notary.CtxKeyCryptoSvc, h.crypto)
return ctxu.WithLogger(ctx, ctxu.GetRequestLogger(ctx))
}
type handlerStateFuzz struct {
store interface{}
crypto interface{}
keyAlgo interface{}
}
type MockWriter struct {
}
func (m MockWriter) Header() http.Header {
return http.Header{}
}
func (m MockWriter) Write(data []byte) (int, error) {
return 0, nil
}
func (m MockWriter) WriteHeader(statusCode int) {
}
func mustCopyKeysFuzz(from signed.CryptoService, roles ...data.RoleName) (signed.CryptoService, error) {
return testutils.CopyKeys(from, roles...)
}
var (
metaStore *storage.MemStorage
vars map[string]string
repo *tuf.Repo
cs, crypto signed.CryptoService
)
func init() {
metaStore = storage.NewMemStorage()
var gun data.GUN = "testGUN"
vars := make(map[string]string)
vars["gun"] = gun.String()
_, cs, err := testutils.EmptyRepo(gun)
if err != nil {
panic(err)
}
crypto, err = mustCopyKeysFuzz(cs, data.CanonicalTimestampRole)
if err != nil {
panic(err)
}
logrus.SetLevel(logrus.PanicLevel)
}
func FuzzAtomicUpdateHandler(f *testing.F) {
f.Fuzz(func(t *testing.T, body, headerData []byte) {
ff := fuzz.NewConsumer(headerData)
r, err := http.NewRequest("POST", "", bytes.NewReader(body))
if err != nil {
t.Skip()
}
noOfHeaders, err := ff.GetInt()
if err != nil {
t.Skip()
}
for i := 0; i < noOfHeaders%5; i++ {
key, err := ff.GetString()
if err != nil {
t.Skip()
}
value, err := ff.GetString()
if err != nil {
t.Skip()
}
r.Header.Add(key, value)
}
boundary, err := ff.GetString()
if err != nil {
t.Skip()
}
var mp strings.Builder
mp.WriteString("multipart/form-data; boundary=")
mp.WriteString(boundary)
r.Header.Add("Content-Type", mp.String())
state := handlerStateFuzz{store: metaStore, crypto: crypto}
AtomicUpdateHandler(getFuzzContext(state), MockWriter{}, r)
})
}
func FuzzAtomicUpdateHandlerMultipart(f *testing.F) {
f.Fuzz(func(t *testing.T, body, headerData []byte) {
ff := fuzz.NewConsumer(headerData)
metas := make(map[string][]byte)
ff.FuzzMap(&metas)
r, err := store.NewMultiPartMetaRequest("", metas)
if err != nil {
t.Skip()
}
reader, err := r.MultipartReader()
if err != nil {
t.Skip()
}
for {
part, err := reader.NextPart()
if err == io.EOF {
break
}
if part == nil {
t.Skip()
}
if part.Header == nil {
t.Skip()
}
}
state := handlerStateFuzz{store: metaStore, crypto: crypto}
AtomicUpdateHandler(getFuzzContext(state), MockWriter{}, r)
})
}
func FuzzGetKeyHandler(f *testing.F) {
f.Fuzz(func(t *testing.T, body, headerData []byte) {
ff := fuzz.NewConsumer(headerData)
r, err := http.NewRequest("POST", "", bytes.NewReader(body))
if err != nil {
t.Skip()
}
noOfHeaders, err := ff.GetInt()
if err != nil {
t.Skip()
}
for i := 0; i < noOfHeaders%5; i++ {
key, err := ff.GetString()
if err != nil {
t.Skip()
}
value, err := ff.GetString()
if err != nil {
t.Skip()
}
r.Header.Add(key, value)
}
boundary, err := ff.GetString()
if err != nil {
t.Skip()
}
var mp strings.Builder
mp.WriteString("multipart/form-data; boundary=")
mp.WriteString(boundary)
r.Header.Add("Content-Type", mp.String())
state := handlerStateFuzz{store: metaStore, crypto: crypto}
GetKeyHandler(getFuzzContext(state), MockWriter{}, r)
})
}
func FuzzChangefeed(f *testing.F) {
f.Fuzz(func(t *testing.T, body, headerData []byte, urlString string) {
ff := fuzz.NewConsumer(headerData)
r, err := http.NewRequest("POST", urlString, bytes.NewReader(body))
if err != nil {
t.Skip()
}
noOfHeaders, err := ff.GetInt()
if err != nil {
t.Skip()
}
for i := 0; i < noOfHeaders%5; i++ {
key, err := ff.GetString()
if err != nil {
t.Skip()
}
value, err := ff.GetString()
if err != nil {
t.Skip()
}
r.Header.Add(key, value)
}
boundary, err := ff.GetString()
if err != nil {
t.Skip()
}
var mp strings.Builder
mp.WriteString("multipart/form-data; boundary=")
mp.WriteString(boundary)
r.Header.Add("Content-Type", mp.String())
state := handlerStateFuzz{store: metaStore, crypto: crypto}
Changefeed(getFuzzContext(state), MockWriter{}, r)
})
}
func FuzzRotateKeyHandler(f *testing.F) {
f.Fuzz(func(t *testing.T, body, headerData []byte, urlString string) {
ff := fuzz.NewConsumer(headerData)
r, err := http.NewRequest("POST", urlString, bytes.NewReader(body))
if err != nil {
t.Skip()
}
noOfHeaders, err := ff.GetInt()
if err != nil {
t.Skip()
}
for i := 0; i < noOfHeaders%5; i++ {
key, err := ff.GetString()
if err != nil {
t.Skip()
}
value, err := ff.GetString()
if err != nil {
t.Skip()
}
r.Header.Add(key, value)
}
boundary, err := ff.GetString()
if err != nil {
t.Skip()
}
var mp strings.Builder
mp.WriteString("multipart/form-data; boundary=")
mp.WriteString(boundary)
r.Header.Add("Content-Type", mp.String())
state := handlerStateFuzz{store: metaStore, crypto: crypto}
RotateKeyHandler(getFuzzContext(state), MockWriter{}, r)
})
}
func FuzzDeleteHandler(f *testing.F) {
f.Fuzz(func(t *testing.T, body, headerData []byte, urlString string) {
ff := fuzz.NewConsumer(headerData)
r, err := http.NewRequest("POST", urlString, bytes.NewReader(body))
if err != nil {
t.Skip()
}
noOfHeaders, err := ff.GetInt()
if err != nil {
t.Skip()
}
for i := 0; i < noOfHeaders%5; i++ {
key, err := ff.GetString()
if err != nil {
t.Skip()
}
value, err := ff.GetString()
if err != nil {
t.Skip()
}
r.Header.Add(key, value)
}
boundary, err := ff.GetString()
if err != nil {
t.Skip()
}
var mp strings.Builder
mp.WriteString("multipart/form-data; boundary=")
mp.WriteString(boundary)
r.Header.Add("Content-Type", mp.String())
state := handlerStateFuzz{store: metaStore, crypto: crypto}
DeleteHandler(getFuzzContext(state), MockWriter{}, r)
})
}
package handlers
import (
"strconv"
"time"
"golang.org/x/net/context"
"encoding/hex"
"encoding/json"
"fmt"
"github.com/theupdateframework/notary"
"github.com/theupdateframework/notary/server/errors"
"github.com/theupdateframework/notary/server/storage"
"github.com/theupdateframework/notary/server/timestamp"
"github.com/theupdateframework/notary/tuf/data"
"github.com/theupdateframework/notary/tuf/signed"
)
func getRole(ctx context.Context, store storage.MetaStore, gun data.GUN, role data.RoleName, checksum, version string) (*time.Time, []byte, error) {
var (
lastModified *time.Time
out []byte
err error
)
if checksum != "" {
lastModified, out, err = store.GetChecksum(gun, role, checksum)
} else if version != "" {
v, vErr := strconv.Atoi(version)
if vErr != nil {
return nil, nil, errors.ErrMetadataNotFound.WithDetail(vErr)
}
lastModified, out, err = store.GetVersion(gun, role, v)
} else {
// the timestamp and snapshot might be server signed so are
// handled specially
switch role {
case data.CanonicalTimestampRole, data.CanonicalSnapshotRole:
return getMaybeServerSigned(ctx, store, gun, role)
}
lastModified, out, err = store.GetCurrent(gun, role)
}
if err != nil {
if _, ok := err.(storage.ErrNotFound); ok {
return nil, nil, errors.ErrMetadataNotFound.WithDetail(err)
}
return nil, nil, errors.ErrUnknown.WithDetail(err)
}
if out == nil {
return nil, nil, errors.ErrMetadataNotFound.WithDetail(nil)
}
return lastModified, out, nil
}
// getMaybeServerSigned writes the current snapshot or timestamp (based on the
// role passed) to the provided writer or returns an error. In retrieving
// the timestamp and snapshot, based on the keys held by the server, a new one
// might be generated and signed due to expiry of the previous one or updates
// to other roles.
func getMaybeServerSigned(ctx context.Context, store storage.MetaStore, gun data.GUN, role data.RoleName) (*time.Time, []byte, error) {
cryptoServiceVal := ctx.Value(notary.CtxKeyCryptoSvc)
cryptoService, ok := cryptoServiceVal.(signed.CryptoService)
if !ok {
return nil, nil, errors.ErrNoCryptoService.WithDetail(nil)
}
var (
lastModified *time.Time
out []byte
err error
)
if role != data.CanonicalTimestampRole && role != data.CanonicalSnapshotRole {
return nil, nil, fmt.Errorf("role %s cannot be server signed", role.String())
}
lastModified, out, err = timestamp.GetOrCreateTimestamp(gun, store, cryptoService)
if err != nil {
switch err.(type) {
case *storage.ErrNoKey, storage.ErrNotFound:
return nil, nil, errors.ErrMetadataNotFound.WithDetail(err)
default:
return nil, nil, errors.ErrUnknown.WithDetail(err)
}
}
// If we wanted the snapshot, get it by checksum from the timestamp data
if role == data.CanonicalSnapshotRole {
ts := new(data.SignedTimestamp)
if err := json.Unmarshal(out, ts); err != nil {
return nil, nil, err
}
snapshotChecksums, err := ts.GetSnapshot()
if err != nil || snapshotChecksums == nil {
return nil, nil, fmt.Errorf("could not retrieve latest snapshot checksum")
}
if snapshotSHA256Bytes, ok := snapshotChecksums.Hashes[notary.SHA256]; ok {
snapshotSHA256Hex := hex.EncodeToString(snapshotSHA256Bytes[:])
return store.GetChecksum(gun, role, snapshotSHA256Hex)
}
return nil, nil, fmt.Errorf("could not retrieve sha256 snapshot checksum")
}
return lastModified, out, nil
}
package handlers
import (
"fmt"
"sort"
"github.com/sirupsen/logrus"
"github.com/docker/go/canonical/json"
"github.com/theupdateframework/notary/server/storage"
"github.com/theupdateframework/notary/trustpinning"
"github.com/theupdateframework/notary/tuf"
"github.com/theupdateframework/notary/tuf/data"
"github.com/theupdateframework/notary/tuf/signed"
"github.com/theupdateframework/notary/tuf/utils"
"github.com/theupdateframework/notary/tuf/validation"
)
// validateUpload checks that the updates being pushed
// are semantically correct and the signatures are correct
// A list of possibly modified updates are returned if all
// validation was successful. This allows the snapshot to be
// created and added if snapshotting has been delegated to the
// server
func validateUpdate(cs signed.CryptoService, gun data.GUN, updates []storage.MetaUpdate, store storage.MetaStore) ([]storage.MetaUpdate, error) {
// some delegated targets role may be invalid based on other updates
// that have been made by other clients. We'll rebuild the slice of
// updates with only the things we should actually update
updatesToApply := make([]storage.MetaUpdate, 0, len(updates))
roles := make(map[data.RoleName]storage.MetaUpdate)
for _, v := range updates {
roles[v.Role] = v
}
builder := tuf.NewRepoBuilder(gun, cs, trustpinning.TrustPinConfig{})
if err := loadFromStore(gun, data.CanonicalRootRole, builder, store); err != nil {
if _, ok := err.(storage.ErrNotFound); !ok {
return nil, err
}
}
if rootUpdate, ok := roles[data.CanonicalRootRole]; ok {
currentRootVersion := builder.GetLoadedVersion(data.CanonicalRootRole)
if rootUpdate.Version != currentRootVersion && rootUpdate.Version != currentRootVersion+1 {
msg := fmt.Sprintf("Root modifications must increment the version. Current %d, new %d", currentRootVersion, rootUpdate.Version)
return nil, validation.ErrBadRoot{Msg: msg}
}
builder = builder.BootstrapNewBuilder()
if err := builder.Load(data.CanonicalRootRole, rootUpdate.Data, currentRootVersion, false); err != nil {
return nil, validation.ErrBadRoot{Msg: err.Error()}
}
logrus.Debug("Successfully validated root")
updatesToApply = append(updatesToApply, rootUpdate)
} else if !builder.IsLoaded(data.CanonicalRootRole) {
return nil, validation.ErrValidation{Msg: "no pre-existing root and no root provided in update."}
}
targetsToUpdate, err := loadAndValidateTargets(gun, builder, roles, store)
if err != nil {
return nil, err
}
updatesToApply = append(updatesToApply, targetsToUpdate...)
// there's no need to load files from the database if no targets etc...
// were uploaded because that means they haven't been updated and
// the snapshot will already contain the correct hashes and sizes for
// those targets (incl. delegated targets)
logrus.Debug("Successfully validated targets")
// At this point, root and targets must have been loaded into the repo
if snapshotUpdate, ok := roles[data.CanonicalSnapshotRole]; ok {
if err := builder.Load(data.CanonicalSnapshotRole, snapshotUpdate.Data, 1, false); err != nil {
return nil, validation.ErrBadSnapshot{Msg: err.Error()}
}
logrus.Debug("Successfully validated snapshot")
updatesToApply = append(updatesToApply, roles[data.CanonicalSnapshotRole])
} else {
// Check:
// - we have a snapshot key
// - it matches a snapshot key signed into the root.json
// Then:
// - generate a new snapshot
// - add it to the updates
update, err := generateSnapshot(gun, builder, store)
if err != nil {
return nil, err
}
updatesToApply = append(updatesToApply, *update)
}
// generate a timestamp immediately
update, err := generateTimestamp(gun, builder, store)
if err != nil {
return nil, err
}
return append(updatesToApply, *update), nil
}
func loadAndValidateTargets(gun data.GUN, builder tuf.RepoBuilder, roles map[data.RoleName]storage.MetaUpdate, store storage.MetaStore) ([]storage.MetaUpdate, error) {
targetsRoles := make(utils.RoleList, 0)
for role := range roles {
if role == data.CanonicalTargetsRole || data.IsDelegation(role) {
targetsRoles = append(targetsRoles, role.String())
}
}
// N.B. RoleList sorts paths with fewer segments first.
// By sorting, we'll always process shallower targets updates before deeper
// ones (i.e. we'll load and validate targets before targets/foo). This
// helps ensure we only load from storage when necessary in a cleaner way.
sort.Sort(targetsRoles)
updatesToApply := make([]storage.MetaUpdate, 0, len(targetsRoles))
for _, role := range targetsRoles {
// don't load parent if current role is "targets",
// we must load all ancestor roles, starting from `targets` and working down,
// for delegations to validate the full parent chain
var parentsToLoad []data.RoleName
roleName := data.RoleName(role)
ancestorRole := roleName
for ancestorRole != data.CanonicalTargetsRole {
ancestorRole = ancestorRole.Parent()
if !builder.IsLoaded(ancestorRole) {
parentsToLoad = append(parentsToLoad, ancestorRole)
}
}
for i := len(parentsToLoad) - 1; i >= 0; i-- {
if err := loadFromStore(gun, parentsToLoad[i], builder, store); err != nil {
// if the parent doesn't exist, just keep going - loading the role will eventually fail
// due to it being an invalid role
if _, ok := err.(storage.ErrNotFound); !ok {
return nil, err
}
}
}
if err := builder.Load(roleName, roles[roleName].Data, 1, false); err != nil {
logrus.Error("ErrBadTargets: ", err.Error())
return nil, validation.ErrBadTargets{Msg: err.Error()}
}
updatesToApply = append(updatesToApply, roles[roleName])
}
return updatesToApply, nil
}
// generateSnapshot generates a new snapshot from the previous one in the store - this assumes all
// the other roles except timestamp have already been set on the repo, and will set the generated
// snapshot on the repo as well
func generateSnapshot(gun data.GUN, builder tuf.RepoBuilder, store storage.MetaStore) (*storage.MetaUpdate, error) {
var prev *data.SignedSnapshot
_, currentJSON, err := store.GetCurrent(gun, data.CanonicalSnapshotRole)
if err == nil {
prev = new(data.SignedSnapshot)
if err = json.Unmarshal(currentJSON, prev); err != nil {
logrus.Error("Failed to unmarshal existing snapshot for GUN ", gun)
return nil, err
}
}
if _, ok := err.(storage.ErrNotFound); !ok && err != nil {
return nil, err
}
meta, ver, err := builder.GenerateSnapshot(prev)
switch err.(type) {
case nil:
return &storage.MetaUpdate{
Role: data.CanonicalSnapshotRole,
Version: ver,
Data: meta,
}, nil
case signed.ErrInsufficientSignatures, signed.ErrNoKeys, signed.ErrRoleThreshold:
// If we cannot sign the snapshot, then we don't have keys for the snapshot,
// and the client should have submitted a snapshot
return nil, validation.ErrBadHierarchy{
Missing: data.CanonicalSnapshotRole.String(),
Msg: "no snapshot was included in update and server does not hold current snapshot key for repository"}
default:
return nil, validation.ErrValidation{Msg: err.Error()}
}
}
// generateTimestamp generates a new timestamp from the previous one in the store - this assumes all
// the other roles have already been set on the repo, and will set the generated timestamp on the repo as well
func generateTimestamp(gun data.GUN, builder tuf.RepoBuilder, store storage.MetaStore) (*storage.MetaUpdate, error) {
var prev *data.SignedTimestamp
_, currentJSON, err := store.GetCurrent(gun, data.CanonicalTimestampRole)
switch err.(type) {
case nil:
prev = new(data.SignedTimestamp)
if err := json.Unmarshal(currentJSON, prev); err != nil {
logrus.Error("Failed to unmarshal existing timestamp for GUN ", gun)
return nil, err
}
case storage.ErrNotFound:
break // this is the first timestamp ever for the repo
default:
return nil, err
}
meta, ver, err := builder.GenerateTimestamp(prev)
switch err.(type) {
case nil:
return &storage.MetaUpdate{
Role: data.CanonicalTimestampRole,
Version: ver,
Data: meta,
}, nil
case signed.ErrInsufficientSignatures, signed.ErrNoKeys:
// If we cannot sign the timestamp, then we don't have keys for the timestamp,
// and the client screwed up their root
return nil, validation.ErrBadRoot{
Msg: "no timestamp keys exist on the server",
}
default:
return nil, validation.ErrValidation{Msg: err.Error()}
}
}
func loadFromStore(gun data.GUN, roleName data.RoleName, builder tuf.RepoBuilder, store storage.MetaStore) error {
_, metaJSON, err := store.GetCurrent(gun, roleName)
if err != nil {
return err
}
return builder.Load(roleName, metaJSON, 1, true)
}
package storage
import (
"fmt"
)
// ErrOldVersion is returned when a newer version of TUF metadata is already available
type ErrOldVersion struct{}
// Error is returned when a newer version of TUF metadata is already available
func (err ErrOldVersion) Error() string {
return "Error updating metadata. A newer version is already available"
}
// ErrNotFound is returned when TUF metadata isn't found for a specific record
type ErrNotFound struct{}
// Error implements error
func (err ErrNotFound) Error() string {
return "No record found"
}
// ErrKeyExists is returned when a key already exists
type ErrKeyExists struct {
gun string
role string
}
// Error is returned when a key already exists
func (err ErrKeyExists) Error() string {
return fmt.Sprintf("Error, timestamp key already exists for %s:%s", err.gun, err.role)
}
// ErrNoKey is returned when no timestamp key is found
type ErrNoKey struct {
gun string
}
// Error is returned when no timestamp key is found
func (err ErrNoKey) Error() string {
return fmt.Sprintf("Error, no timestamp key found for %s", err.gun)
}
// ErrBadQuery is used when the parameters provided cannot be appropriately
// coerced.
type ErrBadQuery struct {
msg string
}
func (err ErrBadQuery) Error() string {
return fmt.Sprintf("did not recognize parameters: %s", err.msg)
}
// Copyright 2023 the cncf-fuzzing authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package storage
import (
"io/ioutil"
"os"
"path/filepath"
"strconv"
"testing"
fuzz "github.com/AdaLogics/go-fuzz-headers"
_ "github.com/mattn/go-sqlite3"
"github.com/theupdateframework/notary/tuf/data"
)
func SetupSQLDBFuzz(dbtype, dburl string) *SQLStorage {
dbStore, err := NewSQLStorage(dbtype, dburl)
if err != nil {
panic(err)
}
// Create the DB tables
err = CreateTUFTable(dbStore.DB)
if err != nil {
panic(err)
}
err = CreateChangefeedTable(dbStore.DB)
if err != nil {
panic(err)
}
// verify that the tables are empty
var count int
query := dbStore.DB.Model(&TUFFile{}).Count(&count)
if query.Error != nil {
panic(query.Error)
}
if count != 0 {
panic("count should be 0")
}
return dbStore
}
func sqlite3SetupFuzz() (*SQLStorage, func()) {
tempBaseDir, err := ioutil.TempDir("", "notary-test-")
if err != nil {
panic(err)
}
dbStore := SetupSQLDBFuzz("sqlite3", filepath.Join(tempBaseDir, "test_db"))
var cleanup = func() {
dbStore.DB.Close()
os.RemoveAll(tempBaseDir)
}
return dbStore, cleanup
}
func FuzzServerStorageSQL(f *testing.F) {
f.Fuzz(func(t *testing.T, fuzzData []byte) {
ff := fuzz.NewConsumer(fuzzData)
var noOfMetas int
var err error
var gunName string
sTufMetas := make([]StoredTUFMeta, 0)
noOfMetas, err = ff.GetInt()
if err != nil {
t.Skip()
}
noOfCalls, err := ff.GetInt()
if err != nil {
t.Skip()
}
if noOfCalls == 0 {
noOfCalls = 1
}
for i := 0; i < noOfCalls%20; i++ {
noOfMetas, err = ff.GetInt()
if err != nil {
t.Skip()
}
if noOfMetas == 0 {
noOfMetas = 1
}
for j := 0; j < noOfMetas%5; j++ {
sm := StoredTUFMeta{}
err := ff.GenerateStruct(&sm)
if err != nil {
t.Skip()
}
sTufMetas = append(sTufMetas, sm)
}
}
if len(sTufMetas) == 0 {
t.Skip()
}
noOfCalls, err = ff.GetInt()
if err != nil {
t.Skip()
}
if noOfCalls == 0 {
noOfCalls = 1
}
dbStore, cleanup := sqlite3SetupFuzz()
defer cleanup()
for i := 0; i < noOfCalls%20; i++ {
callType, err := ff.GetInt()
if err != nil {
t.Skip()
}
switch callType % 8 {
case 0:
ind, err := ff.GetInt()
if err != nil {
t.Skip()
}
gunName, err = ff.GetString()
if err != nil {
t.Skip()
}
dbStore.UpdateCurrent(data.GUN(gunName), MakeUpdate(sTufMetas[ind%len(sTufMetas)]))
dbStore.Delete(data.GUN(gunName))
case 1:
gunName, err = ff.GetString()
if err != nil {
t.Skip()
}
dbStore.Delete(data.GUN(gunName))
case 2:
dbStore.CheckHealth()
case 3:
changeID, err := ff.GetString()
if err != nil {
t.Skip()
}
_, err = strconv.ParseInt(changeID, 10, 32)
if err != nil {
continue
}
records, err := ff.GetInt()
if err != nil {
t.Skip()
}
filterName, err := ff.GetString()
if err != nil {
t.Skip()
}
_, _ = dbStore.GetChanges(changeID, records, filterName)
case 4:
noOfUpdates, err := ff.GetInt()
if err != nil {
t.Skip()
}
updates := make([]MetaUpdate, 0)
for i := 0; i < noOfUpdates%10; i++ {
update := &MetaUpdate{}
ff.GenerateStruct(update)
updates = append(updates, *update)
}
gunName, err := ff.GetString()
if err != nil {
t.Skip()
}
dbStore.UpdateMany(data.GUN(gunName), updates)
case 5:
gunName, err := ff.GetString()
if err != nil {
t.Skip()
}
role, gun := data.CanonicalRootRole, data.GUN(gunName)
cs, err := ff.GetString()
if err != nil {
t.Skip()
}
_, _, _ = dbStore.GetChecksum(gun, role, cs)
case 6:
gunName, err := ff.GetString()
if err != nil {
t.Skip()
}
role, gun := data.CanonicalRootRole, data.GUN(gunName)
version, err := ff.GetInt()
if err != nil {
t.Skip()
}
_, _, _ = dbStore.GetVersion(gun, role, version)
case 7:
gunName, err := ff.GetString()
if err != nil {
t.Skip()
}
role, gun := data.CanonicalRootRole, data.GUN(gunName)
_, _, _ = dbStore.GetCurrent(gun, role)
}
}
})
}
func FuzzServerStorageMemStorage(f *testing.F) {
f.Fuzz(func(t *testing.T, fuzzData []byte) {
ff := fuzz.NewConsumer(fuzzData)
var noOfMetas int
var err error
var gunName string
sTufMetas := make([]StoredTUFMeta, 0)
noOfMetas, err = ff.GetInt()
if err != nil {
t.Skip()
}
noOfCalls, err := ff.GetInt()
if err != nil {
t.Skip()
}
if noOfCalls == 0 {
noOfCalls = 1
}
for i := 0; i < noOfCalls%20; i++ {
noOfMetas, err = ff.GetInt()
if err != nil {
t.Skip()
}
if noOfMetas == 0 {
noOfMetas = 1
}
for j := 0; j < noOfMetas%5; j++ {
sm := StoredTUFMeta{}
err := ff.GenerateStruct(&sm)
if err != nil {
t.Skip()
}
sTufMetas = append(sTufMetas, sm)
}
}
if len(sTufMetas) == 0 {
t.Skip()
}
noOfCalls, err = ff.GetInt()
if err != nil {
t.Skip()
}
if noOfCalls == 0 {
noOfCalls = 1
}
dbStore := NewMemStorage()
for i := 0; i < noOfCalls%20; i++ {
callType, err := ff.GetInt()
if err != nil {
t.Skip()
}
switch callType % 6 {
case 0:
ind, err := ff.GetInt()
if err != nil {
t.Skip()
}
gunName, err = ff.GetString()
if err != nil {
t.Skip()
}
dbStore.UpdateCurrent(data.GUN(gunName), MakeUpdate(sTufMetas[ind%len(sTufMetas)]))
dbStore.Delete(data.GUN(gunName))
case 1:
gunName, err = ff.GetString()
if err != nil {
t.Skip()
}
dbStore.Delete(data.GUN(gunName))
case 2:
noOfUpdates, err := ff.GetInt()
if err != nil {
t.Skip()
}
updates := make([]MetaUpdate, 0)
for i := 0; i < noOfUpdates%10; i++ {
update := &MetaUpdate{}
ff.GenerateStruct(update)
updates = append(updates, *update)
}
gunName, err := ff.GetString()
if err != nil {
t.Skip()
}
dbStore.UpdateMany(data.GUN(gunName), updates)
case 3:
changeID, err := ff.GetString()
if err != nil {
t.Skip()
}
_, err = strconv.ParseInt(changeID, 10, 32)
if err != nil {
continue
}
records, err := ff.GetInt()
if err != nil {
t.Skip()
}
filterName, err := ff.GetString()
if err != nil {
t.Skip()
}
_, _ = dbStore.GetChanges(changeID, records, filterName)
case 4:
gunName, err := ff.GetString()
if err != nil {
t.Skip()
}
role, gun := data.CanonicalRootRole, data.GUN(gunName)
cs, err := ff.GetString()
if err != nil {
t.Skip()
}
_, _, _ = dbStore.GetChecksum(gun, role, cs)
case 5:
gunName, err := ff.GetString()
if err != nil {
t.Skip()
}
role, gun := data.CanonicalRootRole, data.GUN(gunName)
version, err := ff.GetInt()
if err != nil {
t.Skip()
}
_, _, _ = dbStore.GetVersion(gun, role, version)
}
}
})
}
func FuzzServerStorageTufStorage(f *testing.F) {
f.Fuzz(func(t *testing.T, fuzzData []byte, dataName string) {
role, gun := data.CanonicalRootRole, data.GUN(dataName)
rec := SampleCustomTUFObj(gun, role, 1, nil)
dbStore, cleanup := sqlite3SetupFuzz()
defer cleanup()
s := NewTUFMetaStorage(dbStore)
_, _, _ = s.GetCurrent(rec.Gun, rec.Role)
_, _, _ = s.MetaStore.GetCurrent(rec.Gun, rec.Role)
})
}
package storage
import (
"crypto/sha256"
"encoding/hex"
"fmt"
"sort"
"strconv"
"strings"
"sync"
"time"
"github.com/theupdateframework/notary/tuf/data"
)
type key struct {
//lint:ignore U1000 TODO check if this can be deleted
algorithm string
//lint:ignore U1000 TODO check if this can be deleted
public []byte
}
type ver struct {
version int
data []byte
createupdate time.Time
}
// we want to keep these sorted by version so that it's in increasing version
// order
type verList []ver
func (k verList) Len() int { return len(k) }
func (k verList) Swap(i, j int) { k[i], k[j] = k[j], k[i] }
func (k verList) Less(i, j int) bool {
return k[i].version < k[j].version
}
// MemStorage is really just designed for dev and testing. It is very
// inefficient in many scenarios
type MemStorage struct {
lock sync.Mutex
tufMeta map[string]verList
keys map[string]map[string]*key
checksums map[string]map[string]ver
changes []Change
}
// NewMemStorage instantiates a memStorage instance
func NewMemStorage() *MemStorage {
return &MemStorage{
tufMeta: make(map[string]verList),
keys: make(map[string]map[string]*key),
checksums: make(map[string]map[string]ver),
}
}
// UpdateCurrent updates the meta data for a specific role
func (st *MemStorage) UpdateCurrent(gun data.GUN, update MetaUpdate) error {
id := entryKey(gun, update.Role)
st.lock.Lock()
defer st.lock.Unlock()
if space, ok := st.tufMeta[id]; ok {
for _, v := range space {
if v.version >= update.Version {
return ErrOldVersion{}
}
}
}
version := ver{version: update.Version, data: update.Data, createupdate: time.Now()}
st.tufMeta[id] = append(st.tufMeta[id], version)
checksumBytes := sha256.Sum256(update.Data)
checksum := hex.EncodeToString(checksumBytes[:])
_, ok := st.checksums[gun.String()]
if !ok {
st.checksums[gun.String()] = make(map[string]ver)
}
st.checksums[gun.String()][checksum] = version
if update.Role == data.CanonicalTimestampRole {
st.writeChange(gun, update.Version, checksum)
}
return nil
}
// writeChange must only be called by a function already holding a lock on
// the MemStorage. Behaviour is undefined otherwise
func (st *MemStorage) writeChange(gun data.GUN, version int, checksum string) {
c := Change{
ID: strconv.Itoa(len(st.changes) + 1),
GUN: gun.String(),
Version: version,
SHA256: checksum,
CreatedAt: time.Now(),
Category: changeCategoryUpdate,
}
st.changes = append(st.changes, c)
}
// UpdateMany updates multiple TUF records
func (st *MemStorage) UpdateMany(gun data.GUN, updates []MetaUpdate) error {
st.lock.Lock()
defer st.lock.Unlock()
versioner := make(map[string]map[int]struct{})
constant := struct{}{}
// ensure that we only update in one transaction
for _, u := range updates {
id := entryKey(gun, u.Role)
// prevent duplicate versions of the same role
if _, ok := versioner[u.Role.String()][u.Version]; ok {
return ErrOldVersion{}
}
if _, ok := versioner[u.Role.String()]; !ok {
versioner[u.Role.String()] = make(map[int]struct{})
}
versioner[u.Role.String()][u.Version] = constant
if space, ok := st.tufMeta[id]; ok {
for _, v := range space {
if v.version >= u.Version {
return ErrOldVersion{}
}
}
}
}
for _, u := range updates {
id := entryKey(gun, u.Role)
version := ver{version: u.Version, data: u.Data, createupdate: time.Now()}
st.tufMeta[id] = append(st.tufMeta[id], version)
sort.Sort(st.tufMeta[id]) // ensure that it's sorted
checksumBytes := sha256.Sum256(u.Data)
checksum := hex.EncodeToString(checksumBytes[:])
_, ok := st.checksums[gun.String()]
if !ok {
st.checksums[gun.String()] = make(map[string]ver)
}
st.checksums[gun.String()][checksum] = version
if u.Role == data.CanonicalTimestampRole {
st.writeChange(gun, u.Version, checksum)
}
}
return nil
}
// GetCurrent returns the createupdate date metadata for a given role, under a GUN.
func (st *MemStorage) GetCurrent(gun data.GUN, role data.RoleName) (*time.Time, []byte, error) {
id := entryKey(gun, role)
st.lock.Lock()
defer st.lock.Unlock()
space, ok := st.tufMeta[id]
if !ok || len(space) == 0 {
return nil, nil, ErrNotFound{}
}
return &(space[len(space)-1].createupdate), space[len(space)-1].data, nil
}
// GetChecksum returns the createupdate date and metadata for a given role, under a GUN.
func (st *MemStorage) GetChecksum(gun data.GUN, role data.RoleName, checksum string) (*time.Time, []byte, error) {
st.lock.Lock()
defer st.lock.Unlock()
space, ok := st.checksums[gun.String()][checksum]
if !ok || len(space.data) == 0 {
return nil, nil, ErrNotFound{}
}
return &(space.createupdate), space.data, nil
}
// GetVersion gets a specific TUF record by its version
func (st *MemStorage) GetVersion(gun data.GUN, role data.RoleName, version int) (*time.Time, []byte, error) {
st.lock.Lock()
defer st.lock.Unlock()
id := entryKey(gun, role)
for _, ver := range st.tufMeta[id] {
if ver.version == version {
return &(ver.createupdate), ver.data, nil
}
}
return nil, nil, ErrNotFound{}
}
// Delete deletes all the metadata for a given GUN
func (st *MemStorage) Delete(gun data.GUN) error {
st.lock.Lock()
defer st.lock.Unlock()
l := len(st.tufMeta)
for k := range st.tufMeta {
if strings.HasPrefix(k, gun.String()) {
delete(st.tufMeta, k)
}
}
if l == len(st.tufMeta) {
// we didn't delete anything, don't write change.
return nil
}
delete(st.checksums, gun.String())
c := Change{
ID: strconv.Itoa(len(st.changes) + 1),
GUN: gun.String(),
Category: changeCategoryDeletion,
CreatedAt: time.Now(),
}
st.changes = append(st.changes, c)
return nil
}
// GetChanges returns a []Change starting from but excluding the record
// identified by changeID. In the context of the memory store, changeID
// is simply an index into st.changes. The ID of a change is its
// index+1, both to match the SQL implementations, and so that the first
// change can be retrieved by providing ID 0.
func (st *MemStorage) GetChanges(changeID string, records int, filterName string) ([]Change, error) {
var (
id int64
err error
)
if changeID == "" {
id = 0
} else {
id, err = strconv.ParseInt(changeID, 10, 32)
if err != nil {
return nil, ErrBadQuery{msg: fmt.Sprintf("change ID expected to be integer, provided ID was: %s", changeID)}
}
}
var (
start = int(id)
toInspect []Change
)
if err != nil {
return nil, err
}
reversed := id < 0
if records < 0 {
reversed = true
records = -records
}
if len(st.changes) <= int(id) && !reversed {
// no records to return as we're essentially trying to retrieve
// changes that haven't happened yet.
return nil, nil
}
// technically only -1 is a valid negative input, but we're going to be
// broad in what we accept here to reduce the need to error and instead
// act in a "do what I mean not what I say" fashion. Same logic for
// requesting changeID < 0 but not asking for reversed, we're just going
// to force it to be reversed.
if start < 0 {
// need to add one so we don't later slice off the last element
// when calculating toInspect.
start = len(st.changes) + 1
}
// reduce to only look at changes we're interested in
if reversed {
if start > len(st.changes) {
toInspect = st.changes
} else {
toInspect = st.changes[:start-1]
}
} else {
toInspect = st.changes[start:]
}
// if we're not doing any filtering
if filterName == "" {
// if the pageSize is larger than the total records
// that could be returned, return them all
if records >= len(toInspect) {
return toInspect, nil
}
// if we're going backwards, return the last pageSize records
if reversed {
return toInspect[len(toInspect)-records:], nil
}
// otherwise return pageSize records from front
return toInspect[:records], nil
}
return getFilteredChanges(toInspect, filterName, records, reversed), nil
}
func getFilteredChanges(toInspect []Change, filterName string, records int, reversed bool) []Change {
res := make([]Change, 0, records)
if reversed {
for i := len(toInspect) - 1; i >= 0; i-- {
if toInspect[i].GUN == filterName {
res = append(res, toInspect[i])
}
if len(res) == records {
break
}
}
// results are currently newest to oldest, should be oldest to newest
for i, j := 0, len(res)-1; i < j; i, j = i+1, j-1 {
res[i], res[j] = res[j], res[i]
}
} else {
for _, c := range toInspect {
if c.GUN == filterName {
res = append(res, c)
}
if len(res) == records {
break
}
}
}
return res
}
func entryKey(gun data.GUN, role data.RoleName) string {
return fmt.Sprintf("%s.%s", gun, role)
}
package storage
import (
"crypto/sha256"
"encoding/hex"
"encoding/json"
"fmt"
"sort"
"time"
"github.com/sirupsen/logrus"
"github.com/theupdateframework/notary/storage/rethinkdb"
"github.com/theupdateframework/notary/tuf/data"
gorethink "gopkg.in/rethinkdb/rethinkdb-go.v6"
)
// RethinkDB has eventual consistency. This represents a 60 second blackout
// period of the most recent changes in the changefeed which will not be
// returned while the eventual consistency works itself out.
// It's a var not a const so that the tests can turn it down to zero rather
// than have to include a sleep.
var blackoutTime = 60
// RDBTUFFile is a TUF file record
type RDBTUFFile struct {
rethinkdb.Timing
GunRoleVersion []interface{} `gorethink:"gun_role_version"`
Gun string `gorethink:"gun"`
Role string `gorethink:"role"`
Version int `gorethink:"version"`
SHA256 string `gorethink:"sha256"`
Data []byte `gorethink:"data"`
TSchecksum string `gorethink:"timestamp_checksum"`
}
// TableName returns the table name for the record type
func (r RDBTUFFile) TableName() string {
return TUFFileTableName
}
// Change defines the fields required for an object in the changefeed
type Change struct {
ID string `gorethink:"id,omitempty" gorm:"primary_key" sql:"not null"`
CreatedAt time.Time `gorethink:"created_at"`
GUN string `gorethink:"gun" gorm:"column:gun" sql:"type:varchar(255);not null"`
Version int `gorethink:"version" sql:"not null"`
SHA256 string `gorethink:"sha256" gorm:"column:sha256" sql:"type:varchar(64);"`
Category string `gorethink:"category" sql:"type:varchar(20);not null;"`
}
// TableName sets a specific table name for Changefeed
func (rdb Change) TableName() string {
return ChangefeedTableName
}
// gorethink can't handle an UnmarshalJSON function (see https://github.com/gorethink/gorethink/issues/201),
// so do this here in an anonymous struct
func rdbTUFFileFromJSON(data []byte) (interface{}, error) {
a := struct {
CreatedAt time.Time `json:"created_at"`
UpdatedAt time.Time `json:"updated_at"`
DeletedAt time.Time `json:"deleted_at"`
Gun string `json:"gun"`
Role string `json:"role"`
Version int `json:"version"`
SHA256 string `json:"sha256"`
Data []byte `json:"data"`
TSchecksum string `json:"timestamp_checksum"`
}{}
if err := json.Unmarshal(data, &a); err != nil {
return RDBTUFFile{}, err
}
return RDBTUFFile{
Timing: rethinkdb.Timing{
CreatedAt: a.CreatedAt,
UpdatedAt: a.UpdatedAt,
DeletedAt: a.DeletedAt,
},
GunRoleVersion: []interface{}{a.Gun, a.Role, a.Version},
Gun: a.Gun,
Role: a.Role,
Version: a.Version,
SHA256: a.SHA256,
Data: a.Data,
TSchecksum: a.TSchecksum,
}, nil
}
func rdbChangeFromJSON(data []byte) (interface{}, error) {
res := Change{}
if err := json.Unmarshal(data, &res); err != nil {
return Change{}, err
}
return res, nil
}
// RethinkDB implements a MetaStore against the Rethink Database
type RethinkDB struct {
dbName string
sess *gorethink.Session
user string
password string
}
// NewRethinkDBStorage initializes a RethinkDB object
func NewRethinkDBStorage(dbName, user, password string, sess *gorethink.Session) RethinkDB {
return RethinkDB{
dbName: dbName,
sess: sess,
user: user,
password: password,
}
}
// UpdateCurrent adds new metadata version for the given GUN if and only
// if it's a new role, or the version is greater than the current version
// for the role. Otherwise an error is returned.
func (rdb RethinkDB) UpdateCurrent(gun data.GUN, update MetaUpdate) error {
// empty string is the zero value for tsChecksum in the RDBTUFFile struct.
// Therefore we can just call through to updateCurrentWithTSChecksum passing
// "" for the tsChecksum value.
if err := rdb.updateCurrentWithTSChecksum(gun.String(), "", update); err != nil {
return err
}
if update.Role == data.CanonicalTimestampRole {
tsChecksumBytes := sha256.Sum256(update.Data)
return rdb.writeChange(
gun.String(),
update.Version,
hex.EncodeToString(tsChecksumBytes[:]),
changeCategoryUpdate,
)
}
return nil
}
// updateCurrentWithTSChecksum adds new metadata version for the given GUN with an associated
// checksum for the timestamp it belongs to, to afford us transaction-like functionality
func (rdb RethinkDB) updateCurrentWithTSChecksum(gun, tsChecksum string, update MetaUpdate) error {
now := time.Now()
checksum := sha256.Sum256(update.Data)
file := RDBTUFFile{
Timing: rethinkdb.Timing{
CreatedAt: now,
UpdatedAt: now,
},
GunRoleVersion: []interface{}{gun, update.Role, update.Version},
Gun: gun,
Role: update.Role.String(),
Version: update.Version,
SHA256: hex.EncodeToString(checksum[:]),
TSchecksum: tsChecksum,
Data: update.Data,
}
_, err := gorethink.DB(rdb.dbName).Table(file.TableName()).Insert(
file,
gorethink.InsertOpts{
Conflict: "error", // default but explicit for clarity of intent
},
).RunWrite(rdb.sess)
if err != nil && gorethink.IsConflictErr(err) {
return ErrOldVersion{}
}
return err
}
// Used for sorting updates alphabetically by role name, such that timestamp is always last:
// Ordering: root, snapshot, targets, targets/* (delegations), timestamp
type updateSorter []MetaUpdate
func (u updateSorter) Len() int { return len(u) }
func (u updateSorter) Swap(i, j int) { u[i], u[j] = u[j], u[i] }
func (u updateSorter) Less(i, j int) bool {
return u[i].Role < u[j].Role
}
// UpdateMany adds multiple new metadata for the given GUN. RethinkDB does
// not support transactions, therefore we will attempt to insert the timestamp
// last as this represents a published version of the repo. However, we will
// insert all other role data in alphabetical order first, and also include the
// associated timestamp checksum so that we can easily roll back this pseudotransaction
func (rdb RethinkDB) UpdateMany(gun data.GUN, updates []MetaUpdate) error {
// find the timestamp first and save its checksum
// then apply the updates in alphabetic role order with the timestamp last
// if there are any failures, we roll back in the same alphabetic order
var (
tsChecksum string
tsVersion int
)
for _, up := range updates {
if up.Role == data.CanonicalTimestampRole {
tsChecksumBytes := sha256.Sum256(up.Data)
tsChecksum = hex.EncodeToString(tsChecksumBytes[:])
tsVersion = up.Version
break
}
}
// alphabetize the updates by Role name
sort.Stable(updateSorter(updates))
for _, up := range updates {
if err := rdb.updateCurrentWithTSChecksum(gun.String(), tsChecksum, up); err != nil {
// roll back with best-effort deletion, and then error out
rollbackErr := rdb.deleteByTSChecksum(tsChecksum)
if rollbackErr != nil {
logrus.Errorf("Unable to rollback DB conflict - items with timestamp_checksum %s: %v",
tsChecksum, rollbackErr)
}
return err
}
}
// if the update included a timestamp, write a change object
if tsChecksum != "" {
return rdb.writeChange(gun.String(), tsVersion, tsChecksum, changeCategoryUpdate)
}
return nil
}
// GetCurrent returns the modification date and data part of the metadata for
// the latest version of the given GUN and role. If there is no data for
// the given GUN and role, an error is returned.
func (rdb RethinkDB) GetCurrent(gun data.GUN, role data.RoleName) (created *time.Time, data []byte, err error) {
file := RDBTUFFile{}
res, err := gorethink.DB(rdb.dbName).Table(file.TableName(), gorethink.TableOpts{ReadMode: "majority"}).GetAllByIndex(
rdbGunRoleIdx, []string{gun.String(), role.String()},
).OrderBy(gorethink.Desc("version")).Run(rdb.sess)
if err != nil {
return nil, nil, err
}
defer res.Close()
if res.IsNil() {
return nil, nil, ErrNotFound{}
}
err = res.One(&file)
if err == gorethink.ErrEmptyResult {
return nil, nil, ErrNotFound{}
}
return &file.CreatedAt, file.Data, err
}
// GetChecksum returns the given TUF role file and creation date for the
// GUN with the provided checksum. If the given (gun, role, checksum) are
// not found, it returns storage.ErrNotFound
func (rdb RethinkDB) GetChecksum(gun data.GUN, role data.RoleName, checksum string) (created *time.Time, data []byte, err error) {
var file RDBTUFFile
res, err := gorethink.DB(rdb.dbName).Table(file.TableName(), gorethink.TableOpts{ReadMode: "majority"}).GetAllByIndex(
rdbGunRoleSHA256Idx, []string{gun.String(), role.String(), checksum},
).Run(rdb.sess)
if err != nil {
return nil, nil, err
}
defer res.Close()
if res.IsNil() {
return nil, nil, ErrNotFound{}
}
err = res.One(&file)
if err == gorethink.ErrEmptyResult {
return nil, nil, ErrNotFound{}
}
return &file.CreatedAt, file.Data, err
}
// GetVersion gets a specific TUF record by its version
func (rdb RethinkDB) GetVersion(gun data.GUN, role data.RoleName, version int) (*time.Time, []byte, error) {
var file RDBTUFFile
res, err := gorethink.DB(rdb.dbName).Table(file.TableName(), gorethink.TableOpts{ReadMode: "majority"}).Get([]interface{}{gun.String(), role.String(), version}).Run(rdb.sess)
if err != nil {
return nil, nil, err
}
defer res.Close()
if res.IsNil() {
return nil, nil, ErrNotFound{}
}
err = res.One(&file)
if err == gorethink.ErrEmptyResult {
return nil, nil, ErrNotFound{}
}
return &file.CreatedAt, file.Data, err
}
// Delete removes all metadata for a given GUN. It does not return an
// error if no metadata exists for the given GUN.
func (rdb RethinkDB) Delete(gun data.GUN) error {
resp, err := gorethink.DB(rdb.dbName).Table(RDBTUFFile{}.TableName()).GetAllByIndex(
"gun", gun.String(),
).Delete().RunWrite(rdb.sess)
if err != nil {
return fmt.Errorf("unable to delete %s from database: %s", gun.String(), err.Error())
}
if resp.Deleted > 0 {
return rdb.writeChange(gun.String(), 0, "", changeCategoryDeletion)
}
return nil
}
// deleteByTSChecksum removes all metadata by a timestamp checksum, used for rolling back a "transaction"
// from a call to rethinkdb's UpdateMany
func (rdb RethinkDB) deleteByTSChecksum(tsChecksum string) error {
_, err := gorethink.DB(rdb.dbName).Table(RDBTUFFile{}.TableName()).GetAllByIndex(
"timestamp_checksum", tsChecksum,
).Delete().RunWrite(rdb.sess)
if err != nil {
return fmt.Errorf("unable to delete timestamp checksum data: %s from database: %s", tsChecksum, err.Error())
}
// DO NOT WRITE CHANGE! THIS IS USED _ONLY_ TO ROLLBACK A FAILED INSERT
return nil
}
// Bootstrap sets up the database and tables, also creating the notary server user with appropriate db permission
func (rdb RethinkDB) Bootstrap() error {
if err := rethinkdb.SetupDB(rdb.sess, rdb.dbName, []rethinkdb.Table{
TUFFilesRethinkTable,
ChangeRethinkTable,
}); err != nil {
return err
}
return rethinkdb.CreateAndGrantDBUser(rdb.sess, rdb.dbName, rdb.user, rdb.password)
}
// CheckHealth checks that all tables and databases exist and are query-able
func (rdb RethinkDB) CheckHealth() error {
res, err := gorethink.DB(rdb.dbName).Table(TUFFilesRethinkTable.Name).Info().Run(rdb.sess)
if err != nil {
return fmt.Errorf("%s is unavailable, or missing one or more tables, or permissions are incorrectly set", rdb.dbName)
}
defer res.Close()
return nil
}
func (rdb RethinkDB) writeChange(gun string, version int, sha256, category string) error {
now := time.Now()
ch := Change{
CreatedAt: now,
GUN: gun,
Version: version,
SHA256: sha256,
Category: category,
}
_, err := gorethink.DB(rdb.dbName).Table(ch.TableName()).Insert(
ch,
gorethink.InsertOpts{
Conflict: "error", // default but explicit for clarity of intent
},
).RunWrite(rdb.sess)
return err
}
// GetChanges returns up to pageSize changes starting from changeID. It uses the
// blackout to account for RethinkDB's eventual consistency model
func (rdb RethinkDB) GetChanges(changeID string, pageSize int, filterName string) ([]Change, error) {
var (
lower, upper, bound []interface{}
idx = "rdb_created_at_id"
max = []interface{}{gorethink.Now().Sub(blackoutTime), gorethink.MaxVal}
min = []interface{}{gorethink.MinVal, gorethink.MinVal}
order gorethink.OrderByOpts
reversed bool
)
if filterName != "" {
idx = "rdb_gun_created_at_id"
max = append([]interface{}{filterName}, max...)
min = append([]interface{}{filterName}, min...)
}
switch changeID {
case "0", "-1":
lower = min
upper = max
default:
bound, idx = rdb.bound(changeID, filterName)
if pageSize < 0 {
lower = min
upper = bound
} else {
lower = bound
upper = max
}
}
if changeID == "-1" || pageSize < 0 {
reversed = true
order = gorethink.OrderByOpts{Index: gorethink.Desc(idx)}
} else {
order = gorethink.OrderByOpts{Index: gorethink.Asc(idx)}
}
if pageSize < 0 {
pageSize = pageSize * -1
}
changes := make([]Change, 0, pageSize)
// Between returns a slice of results from the rethinkdb table.
// The results are ordered using BetweenOpts.Index, which will
// default to the index of the immediately preceding OrderBy.
// The lower and upper are the start and end points for the slice
// and the Left/RightBound values determine whether the lower and
// upper values are included in the result per normal set semantics
// of "open" and "closed"
res, err := gorethink.DB(rdb.dbName).
Table(Change{}.TableName(), gorethink.TableOpts{ReadMode: "majority"}).
OrderBy(order).
Between(
lower,
upper,
gorethink.BetweenOpts{
LeftBound: "open",
RightBound: "open",
},
).Limit(pageSize).Run(rdb.sess)
if err != nil {
return nil, err
}
defer res.Close()
defer func() {
if reversed {
// results are currently newest to oldest, should be oldest to newest
for i, j := 0, len(changes)-1; i < j; i, j = i+1, j-1 {
changes[i], changes[j] = changes[j], changes[i]
}
}
}()
return changes, res.All(&changes)
}
// bound creates the correct boundary based in the index that should be used for
// querying the changefeed.
func (rdb RethinkDB) bound(changeID, filterName string) ([]interface{}, string) {
createdAtTerm := gorethink.DB(rdb.dbName).Table(Change{}.TableName()).Get(changeID).Field("created_at")
if filterName != "" {
return []interface{}{filterName, createdAtTerm, changeID}, "rdb_gun_created_at_id"
}
return []interface{}{createdAtTerm, changeID}, "rdb_created_at_id"
}
package storage
import (
"time"
"github.com/jinzhu/gorm"
)
const (
changeCategoryUpdate = "update"
changeCategoryDeletion = "deletion"
)
// TUFFileTableName returns the name used for the tuf file table
const TUFFileTableName = "tuf_files"
// ChangefeedTableName returns the name used for the changefeed table
const ChangefeedTableName = "changefeed"
// TUFFile represents a TUF file in the database
type TUFFile struct {
gorm.Model
Gun string `sql:"type:varchar(255);not null"`
Role string `sql:"type:varchar(255);not null"`
Version int `sql:"not null"`
SHA256 string `gorm:"column:sha256" sql:"type:varchar(64);"`
Data []byte `sql:"type:longblob;not null"`
}
// TableName sets a specific table name for TUFFile
func (g TUFFile) TableName() string {
return TUFFileTableName
}
// SQLChange defines the fields required for an object in the changefeed
type SQLChange struct {
ID uint `gorm:"primary_key" sql:"not null" json:",string"`
CreatedAt time.Time
GUN string `gorm:"column:gun" sql:"type:varchar(255);not null"`
Version int `sql:"not null"`
SHA256 string `gorm:"column:sha256" sql:"type:varchar(64);"`
Category string `sql:"type:varchar(20);not null;"`
}
// TableName sets a specific table name for Changefeed
func (c SQLChange) TableName() string {
return ChangefeedTableName
}
// CreateTUFTable creates the DB table for TUFFile
func CreateTUFTable(db *gorm.DB) error {
// TODO: gorm
query := db.AutoMigrate(&TUFFile{})
if query.Error != nil {
return query.Error
}
query = db.Model(&TUFFile{}).AddUniqueIndex(
"idx_gun", "gun", "role", "version")
return query.Error
}
// CreateChangefeedTable creates the DB table for Changefeed
func CreateChangefeedTable(db *gorm.DB) error {
query := db.AutoMigrate(&SQLChange{})
return query.Error
}
package storage
import (
"crypto/sha256"
"encoding/hex"
"fmt"
"strconv"
"time"
"github.com/go-sql-driver/mysql"
"github.com/jinzhu/gorm"
"github.com/lib/pq"
"github.com/sirupsen/logrus"
"github.com/theupdateframework/notary/tuf/data"
)
// SQLStorage implements a versioned store using a relational database.
// See server/storage/models.go
type SQLStorage struct {
*gorm.DB
}
// NewSQLStorage is a convenience method to create a SQLStorage
func NewSQLStorage(dialect string, args ...interface{}) (*SQLStorage, error) {
gormDB, err := gorm.Open(dialect, args...)
if err != nil {
return nil, err
}
return &SQLStorage{
DB: gormDB,
}, nil
}
// translateOldVersionError captures DB errors, and attempts to translate
// duplicate entry
func translateOldVersionError(err error) error {
switch err := err.(type) {
case *mysql.MySQLError:
// https://dev.mysql.com/doc/refman/5.5/en/error-messages-server.html
// 1022 = Can't write; duplicate key in table '%s'
// 1062 = Duplicate entry '%s' for key %d
if err.Number == 1022 || err.Number == 1062 {
return ErrOldVersion{}
}
case pq.Error:
// https://www.postgresql.org/docs/10/errcodes-appendix.html
// 23505 = unique_violation
if err.Code == "23505" {
return ErrOldVersion{}
}
}
return err
}
// UpdateCurrent updates a single TUF.
func (db *SQLStorage) UpdateCurrent(gun data.GUN, update MetaUpdate) error {
// ensure we're not inserting an immediately old version - can't use the
// struct, because that only works with non-zero values, and Version
// can be 0.
exists := db.Where("gun = ? and role = ? and version >= ?",
gun.String(), update.Role.String(), update.Version).Take(&TUFFile{})
if exists.Error == nil {
return ErrOldVersion{}
} else if !exists.RecordNotFound() {
return exists.Error
}
// only take out the transaction once we're about to start writing
tx, rb, err := db.getTransaction()
if err != nil {
return err
}
checksum := sha256.Sum256(update.Data)
hexChecksum := hex.EncodeToString(checksum[:])
if err := func() error {
// write new TUFFile entry
if err = translateOldVersionError(tx.Create(&TUFFile{
Gun: gun.String(),
Role: update.Role.String(),
Version: update.Version,
SHA256: hexChecksum,
Data: update.Data,
}).Error); err != nil {
return err
}
// If we're publishing a timestamp, update the changefeed as this is
// technically an new version of the TUF repo
if update.Role == data.CanonicalTimestampRole {
if err := db.writeChangefeed(tx, gun, update.Version, hexChecksum); err != nil {
return err
}
}
return nil
}(); err != nil {
return rb(err)
}
return tx.Commit().Error
}
type rollback func(error) error
func (db *SQLStorage) getTransaction() (*gorm.DB, rollback, error) {
tx := db.Begin()
if tx.Error != nil {
return nil, nil, tx.Error
}
rb := func(err error) error {
if rxErr := tx.Rollback().Error; rxErr != nil {
logrus.Error("Failed on Tx rollback with error: ", rxErr.Error())
return rxErr
}
return err
}
return tx, rb, nil
}
// UpdateMany atomically updates many TUF records in a single transaction
func (db *SQLStorage) UpdateMany(gun data.GUN, updates []MetaUpdate) error {
if !allUpdatesUnique(updates) {
// We would fail with a unique constraint violation later, so just bail out now
return ErrOldVersion{}
}
minVersionsByRole := make(map[data.RoleName]int)
for _, u := range updates {
cur, ok := minVersionsByRole[u.Role]
if !ok || u.Version < cur {
minVersionsByRole[u.Role] = u.Version
}
}
for role, minVersion := range minVersionsByRole {
// If there are any files with version equal or higher than the minimum
// version we're trying to insert, bail out now
exists := db.Where("gun = ? and role = ? and version >= ?",
gun.String(), role.String(), minVersion).Take(&TUFFile{})
if exists.Error == nil {
return ErrOldVersion{}
} else if !exists.RecordNotFound() {
return exists.Error
}
}
tx, rb, err := db.getTransaction()
if err != nil {
return err
}
if err := func() error {
for _, update := range updates {
checksum := sha256.Sum256(update.Data)
hexChecksum := hex.EncodeToString(checksum[:])
result := tx.Create(&TUFFile{
Gun: gun.String(),
Role: update.Role.String(),
Version: update.Version,
Data: update.Data,
SHA256: hexChecksum,
})
if result.Error != nil {
return translateOldVersionError(result.Error)
}
if update.Role == data.CanonicalTimestampRole {
if err := db.writeChangefeed(tx, gun, update.Version, hexChecksum); err != nil {
return err
}
}
}
return nil
}(); err != nil {
return rb(err)
}
return tx.Commit().Error
}
func allUpdatesUnique(updates []MetaUpdate) bool {
type roleVersion struct {
Role data.RoleName
Version int
}
roleVersions := make(map[roleVersion]bool)
for _, u := range updates {
rv := roleVersion{u.Role, u.Version}
if roleVersions[rv] {
return false
}
roleVersions[rv] = true
}
return true
}
func (db *SQLStorage) writeChangefeed(tx *gorm.DB, gun data.GUN, version int, checksum string) error {
c := &SQLChange{
GUN: gun.String(),
Version: version,
SHA256: checksum,
Category: changeCategoryUpdate,
}
return tx.Create(c).Error
}
// GetCurrent gets a specific TUF record
func (db *SQLStorage) GetCurrent(gun data.GUN, tufRole data.RoleName) (*time.Time, []byte, error) {
var row TUFFile
q := db.Select("updated_at, data").Where(
&TUFFile{Gun: gun.String(), Role: tufRole.String()}).Order("version desc").Take(&row)
if err := isReadErr(q, row); err != nil {
return nil, nil, err
}
return &(row.UpdatedAt), row.Data, nil
}
// GetChecksum gets a specific TUF record by its hex checksum
func (db *SQLStorage) GetChecksum(gun data.GUN, tufRole data.RoleName, checksum string) (*time.Time, []byte, error) {
var row TUFFile
q := db.Select("created_at, data").Where(
&TUFFile{
Gun: gun.String(),
Role: tufRole.String(),
SHA256: checksum,
},
).Take(&row)
if err := isReadErr(q, row); err != nil {
return nil, nil, err
}
return &(row.CreatedAt), row.Data, nil
}
// GetVersion gets a specific TUF record by its version
func (db *SQLStorage) GetVersion(gun data.GUN, tufRole data.RoleName, version int) (*time.Time, []byte, error) {
var row TUFFile
q := db.Select("created_at, data").Where(
&TUFFile{
Gun: gun.String(),
Role: tufRole.String(),
Version: version,
},
).Take(&row)
if err := isReadErr(q, row); err != nil {
return nil, nil, err
}
return &(row.CreatedAt), row.Data, nil
}
func isReadErr(q *gorm.DB, row TUFFile) error {
if q.RecordNotFound() {
return ErrNotFound{}
} else if q.Error != nil {
return q.Error
}
return nil
}
// Delete deletes all the records for a specific GUN - we have to do a hard delete using Unscoped
// otherwise we can't insert for that GUN again
func (db *SQLStorage) Delete(gun data.GUN) error {
tx, rb, err := db.getTransaction()
if err != nil {
return err
}
if err := func() error {
res := tx.Unscoped().Where(&TUFFile{Gun: gun.String()}).Delete(TUFFile{})
if err := res.Error; err != nil {
return err
}
// if there weren't actually any records for the GUN, don't write
// a deletion change record.
if res.RowsAffected == 0 {
return nil
}
c := &SQLChange{
GUN: gun.String(),
Category: changeCategoryDeletion,
}
return tx.Create(c).Error
}(); err != nil {
return rb(err)
}
return tx.Commit().Error
}
// CheckHealth asserts that the tuf_files table is present
func (db *SQLStorage) CheckHealth() (err error) {
defer func() {
if r := recover(); r != nil {
err = fmt.Errorf("panic checking db health: %v", r)
}
}()
tableOk := db.HasTable(&TUFFile{})
if db.Error != nil {
return db.Error
}
if !tableOk {
return fmt.Errorf(
"cannot access table: %s", TUFFile{}.TableName())
}
return nil
}
// GetChanges returns up to pageSize changes starting from changeID.
func (db *SQLStorage) GetChanges(changeID string, records int, filterName string) ([]Change, error) {
var (
changes []Change
query = db.DB
id int64
err error
)
if changeID == "" {
id = 0
} else {
id, err = strconv.ParseInt(changeID, 10, 32)
if err != nil {
return nil, ErrBadQuery{msg: fmt.Sprintf("change ID expected to be integer, provided ID was: %s", changeID)}
}
}
// do what I mean, not what I said, i.e. if I passed a negative number for the ID
// it's assumed I mean "start from latest and go backwards"
reversed := id < 0
if records < 0 {
reversed = true
records = -records
}
if filterName != "" {
query = query.Where("gun = ?", filterName)
}
if reversed {
if id > 0 {
// only set the id check if we're not starting from "latest"
query = query.Where("id < ?", id)
}
query = query.Order("id desc")
} else {
query = query.Where("id > ?", id).Order("id asc")
}
res := query.Limit(records).Find(&changes)
if res.Error != nil {
return nil, res.Error
}
if reversed {
// results are currently newest to oldest, should be oldest to newest
for i, j := 0, len(changes)-1; i < j; i, j = i+1, j-1 {
changes[i], changes[j] = changes[j], changes[i]
}
}
return changes, nil
}
package storage
import (
"crypto/sha256"
"encoding/hex"
"fmt"
"testing"
"time"
"github.com/stretchr/testify/require"
"github.com/theupdateframework/notary/tuf/data"
)
type StoredTUFMeta struct {
Gun data.GUN
Role data.RoleName
SHA256 string
Data []byte
Version int
}
func SampleCustomTUFObj(gun data.GUN, role data.RoleName, version int, tufdata []byte) StoredTUFMeta {
if tufdata == nil {
tufdata = []byte(fmt.Sprintf("%s_%s_%d", gun, role, version))
}
checksum := sha256.Sum256(tufdata)
hexChecksum := hex.EncodeToString(checksum[:])
return StoredTUFMeta{
Gun: gun,
Role: role,
Version: version,
SHA256: hexChecksum,
Data: tufdata,
}
}
func MakeUpdate(tufObj StoredTUFMeta) MetaUpdate {
return MetaUpdate{
Role: tufObj.Role,
Version: tufObj.Version,
Data: tufObj.Data,
}
}
func assertExpectedTUFMetaInStore(t *testing.T, s MetaStore, expected []StoredTUFMeta, current bool) {
for _, tufObj := range expected {
var prevTime *time.Time
if current {
cDate, tufdata, err := s.GetCurrent(tufObj.Gun, tufObj.Role)
require.NoError(t, err)
require.Equal(t, tufObj.Data, tufdata)
// the update date was sometime wthin the last minute
require.True(t, cDate.After(time.Now().Add(-1*time.Minute)))
require.True(t, cDate.Before(time.Now().Add(5*time.Second)))
prevTime = cDate
}
checksumBytes := sha256.Sum256(tufObj.Data)
checksum := hex.EncodeToString(checksumBytes[:])
cDate, tufdata, err := s.GetChecksum(tufObj.Gun, tufObj.Role, checksum)
require.NoError(t, err)
require.Equal(t, tufObj.Data, tufdata)
if current {
require.True(t, prevTime.Equal(*cDate), "%s should be equal to %s", prevTime, cDate)
} else {
// the update date was sometime wthin the last minute
require.True(t, cDate.After(time.Now().Add(-1*time.Minute)))
require.True(t, cDate.Before(time.Now().Add(5*time.Second)))
}
}
}
// UpdateCurrent should succeed if there was no previous metadata of the same
// gun and role. They should be gettable.
func testUpdateCurrentEmptyStore(t *testing.T, s MetaStore) []StoredTUFMeta {
expected := make([]StoredTUFMeta, 0, 10)
for _, role := range append(data.BaseRoles, "targets/a") {
for _, gun := range []data.GUN{"gun1", "gun2"} {
// Adding a new TUF file should succeed
tufObj := SampleCustomTUFObj(gun, role, 1, nil)
require.NoError(t, s.UpdateCurrent(tufObj.Gun, MakeUpdate(tufObj)))
expected = append(expected, tufObj)
}
}
assertExpectedTUFMetaInStore(t, s, expected, true)
return expected
}
// UpdateCurrent will successfully add a new (higher) version of an existing TUF file,
// but will return an error if there is an older version of a TUF file. oldVersionExists
// specifies whether the older version should already exist in the DB or not.
func testUpdateCurrentVersionCheck(t *testing.T, s MetaStore, oldVersionExists bool) []StoredTUFMeta {
role, gun := data.CanonicalRootRole, data.GUN("testGUN")
expected := []StoredTUFMeta{
SampleCustomTUFObj(gun, role, 1, nil),
SampleCustomTUFObj(gun, role, 2, nil),
SampleCustomTUFObj(gun, role, 4, nil),
}
// starting meta is version 1
require.NoError(t, s.UpdateCurrent(gun, MakeUpdate(expected[0])))
// inserting meta version immediately above it and skipping ahead will succeed
require.NoError(t, s.UpdateCurrent(gun, MakeUpdate(expected[1])))
require.NoError(t, s.UpdateCurrent(gun, MakeUpdate(expected[2])))
// Inserting a version that already exists, or that is lower than the current version, will fail
version := 3
if oldVersionExists {
version = 4
}
tufObj := SampleCustomTUFObj(gun, role, version, nil)
err := s.UpdateCurrent(gun, MakeUpdate(tufObj))
require.Error(t, err, "Error should not be nil")
require.IsType(t, ErrOldVersion{}, err,
"Expected ErrOldVersion error type, got: %v", err)
assertExpectedTUFMetaInStore(t, s, expected[:2], false)
assertExpectedTUFMetaInStore(t, s, expected[2:], true)
return expected
}
// GetVersion should successfully retrieve a version of an existing TUF file,
// but will return an error if the requested version does not exist.
func testGetVersion(t *testing.T, s MetaStore) {
_, _, err := s.GetVersion("gun", "role", 2)
require.IsType(t, ErrNotFound{}, err, "Expected error to be ErrNotFound")
s.UpdateCurrent("gun", MetaUpdate{"role", 2, []byte("version2")})
_, d, err := s.GetVersion("gun", "role", 2)
require.Nil(t, err, "Expected error to be nil")
require.Equal(t, []byte("version2"), d, "Data was incorrect")
// Getting newer version fails
_, _, err = s.GetVersion("gun", "role", 3)
require.IsType(t, ErrNotFound{}, err, "Expected error to be ErrNotFound")
// Getting another gun/role fails
_, _, err = s.GetVersion("badgun", "badrole", 2)
require.IsType(t, ErrNotFound{}, err, "Expected error to be ErrNotFound")
}
// UpdateMany succeeds if the updates do not conflict with each other or with what's
// already in the DB
func testUpdateManyNoConflicts(t *testing.T, s MetaStore) []StoredTUFMeta {
var gun data.GUN = "testGUN"
firstBatch := make([]StoredTUFMeta, 4)
updates := make([]MetaUpdate, 4)
for i, role := range data.BaseRoles {
firstBatch[i] = SampleCustomTUFObj(gun, role, 1, nil)
updates[i] = MakeUpdate(firstBatch[i])
}
require.NoError(t, s.UpdateMany(gun, updates))
assertExpectedTUFMetaInStore(t, s, firstBatch, true)
secondBatch := make([]StoredTUFMeta, 4)
// no conflicts with what's in DB or with itself
for i, role := range data.BaseRoles {
secondBatch[i] = SampleCustomTUFObj(gun, role, 2, nil)
updates[i] = MakeUpdate(secondBatch[i])
}
require.NoError(t, s.UpdateMany(gun, updates))
// the first batch is still there, but are no longer the current ones
assertExpectedTUFMetaInStore(t, s, firstBatch, false)
assertExpectedTUFMetaInStore(t, s, secondBatch, true)
// and no conflicts if the same role and gun but different version is included
// in the same update. Even if they're out of order.
thirdBatch := make([]StoredTUFMeta, 2)
role := data.CanonicalRootRole
updates = updates[:2]
for i, version := range []int{4, 3} {
thirdBatch[i] = SampleCustomTUFObj(gun, role, version, nil)
updates[i] = MakeUpdate(thirdBatch[i])
}
require.NoError(t, s.UpdateMany(gun, updates))
// all the other data is still there, but are no longer the current ones
assertExpectedTUFMetaInStore(t, s, append(firstBatch, secondBatch...), false)
assertExpectedTUFMetaInStore(t, s, thirdBatch[:1], true)
assertExpectedTUFMetaInStore(t, s, thirdBatch[1:], false)
return append(append(firstBatch, secondBatch...), thirdBatch...)
}
// UpdateMany does not insert any rows (or at least rolls them back) if there
// are any conflicts.
func testUpdateManyConflictRollback(t *testing.T, s MetaStore) []StoredTUFMeta {
blackoutTime = 0
var gun data.GUN = "testGUN"
successBatch := make([]StoredTUFMeta, 4)
updates := make([]MetaUpdate, 4)
for i, role := range data.BaseRoles {
successBatch[i] = SampleCustomTUFObj(gun, role, 1, nil)
updates[i] = MakeUpdate(successBatch[i])
}
require.NoError(t, s.UpdateMany(gun, updates))
before, err := s.GetChanges("0", 1000, "")
require.NoError(t, err)
// conflicts with what's in DB
badBatch := make([]StoredTUFMeta, 4)
for i, role := range data.BaseRoles {
version := 2
if role == data.CanonicalTargetsRole {
version = 1
}
tufdata := []byte(fmt.Sprintf("%s_%s_%d_bad", gun, role, version))
badBatch[i] = SampleCustomTUFObj(gun, role, version, tufdata)
updates[i] = MakeUpdate(badBatch[i])
}
// check no changes were written when there was a conflict+rollback
after, err := s.GetChanges("0", 1000, "")
require.NoError(t, err)
require.Equal(t, len(before), len(after))
err = s.UpdateMany(gun, updates)
require.Error(t, err)
require.IsType(t, ErrOldVersion{}, err)
// self-conflicting, in that it's a duplicate, but otherwise no DB conflicts
duplicate := SampleCustomTUFObj(gun, data.CanonicalTimestampRole, 3, []byte("duplicate"))
duplicateUpdate := MakeUpdate(duplicate)
err = s.UpdateMany(gun, []MetaUpdate{duplicateUpdate, duplicateUpdate})
require.Error(t, err)
require.IsType(t, ErrOldVersion{}, err)
assertExpectedTUFMetaInStore(t, s, successBatch, true)
for _, tufObj := range append(badBatch, duplicate) {
checksumBytes := sha256.Sum256(tufObj.Data)
checksum := hex.EncodeToString(checksumBytes[:])
_, _, err = s.GetChecksum(tufObj.Gun, tufObj.Role, checksum)
require.Error(t, err)
require.IsType(t, ErrNotFound{}, err)
}
return successBatch
}
// Delete will remove all TUF metadata, all versions, associated with a gun
func testDeleteSuccess(t *testing.T, s MetaStore) {
var gun data.GUN = "testGUN"
// If there is nothing in the DB, delete is a no-op success
require.NoError(t, s.Delete(gun))
// If there is data in the DB, all versions are deleted
unexpected := make([]StoredTUFMeta, 0, 10)
updates := make([]MetaUpdate, 0, 10)
for version := 1; version < 3; version++ {
for _, role := range append(data.BaseRoles, "targets/a") {
tufObj := SampleCustomTUFObj(gun, role, version, nil)
unexpected = append(unexpected, tufObj)
updates = append(updates, MakeUpdate(tufObj))
}
}
require.NoError(t, s.UpdateMany(gun, updates))
assertExpectedTUFMetaInStore(t, s, unexpected[:5], false)
assertExpectedTUFMetaInStore(t, s, unexpected[5:], true)
require.NoError(t, s.Delete(gun))
for _, tufObj := range unexpected {
_, _, err := s.GetCurrent(tufObj.Gun, tufObj.Role)
require.IsType(t, ErrNotFound{}, err)
checksumBytes := sha256.Sum256(tufObj.Data)
checksum := hex.EncodeToString(checksumBytes[:])
_, _, err = s.GetChecksum(tufObj.Gun, tufObj.Role, checksum)
require.Error(t, err)
require.IsType(t, ErrNotFound{}, err)
}
// We can now write the same files without conflicts to the DB
require.NoError(t, s.UpdateMany(gun, updates))
assertExpectedTUFMetaInStore(t, s, unexpected[:5], false)
assertExpectedTUFMetaInStore(t, s, unexpected[5:], true)
// And delete them again successfully
require.NoError(t, s.Delete(gun))
}
func testGetChanges(t *testing.T, s MetaStore) {
blackoutTime = 0
// non-int changeID
c, err := s.GetChanges("foo", 10, "")
require.Error(t, err)
require.Len(t, c, 0)
// add some records
require.NoError(t, s.UpdateMany("alpine", []MetaUpdate{
{
Role: data.CanonicalTimestampRole,
Version: 1,
Data: []byte{'1'},
},
}))
require.NoError(t, s.UpdateMany("alpine", []MetaUpdate{
{
Role: data.CanonicalTimestampRole,
Version: 2,
Data: []byte{'2'},
},
}))
require.NoError(t, s.UpdateMany("alpine", []MetaUpdate{
{
Role: data.CanonicalTimestampRole,
Version: 3,
Data: []byte{'3'},
},
}))
require.NoError(t, s.UpdateMany("alpine", []MetaUpdate{
{
Role: data.CanonicalTimestampRole,
Version: 4,
Data: []byte{'4'},
},
}))
require.NoError(t, s.UpdateMany("busybox", []MetaUpdate{
{
Role: data.CanonicalTimestampRole,
Version: 1,
Data: []byte{'5'},
},
}))
require.NoError(t, s.UpdateMany("busybox", []MetaUpdate{
{
Role: data.CanonicalTimestampRole,
Version: 2,
Data: []byte{'6'},
},
}))
require.NoError(t, s.UpdateMany("busybox", []MetaUpdate{
{
Role: data.CanonicalTimestampRole,
Version: 3,
Data: []byte{'7'},
},
}))
require.NoError(t, s.UpdateMany("busybox", []MetaUpdate{
{
Role: data.CanonicalTimestampRole,
Version: 4,
Data: []byte{'8'},
},
}))
// check non-error cases
c, err = s.GetChanges("0", 8, "")
require.NoError(t, err)
require.Len(t, c, 8)
for i := 0; i < 4; i++ {
require.Equal(t, "alpine", c[i].GUN)
require.Equal(t, i+1, c[i].Version)
}
for i := 4; i < 8; i++ {
require.Equal(t, "busybox", c[i].GUN)
require.Equal(t, i-3, c[i].Version)
}
full := c
c, err = s.GetChanges("-1", 4, "")
require.NoError(t, err)
require.Len(t, c, 4)
for i := 0; i < 4; i++ {
require.Equal(t, "busybox", c[i].GUN)
require.Equal(t, i+1, c[i].Version)
}
c, err = s.GetChanges(full[7].ID, 4, "")
require.NoError(t, err)
require.Len(t, c, 0)
c, err = s.GetChanges(full[6].ID, -4, "")
require.NoError(t, err)
require.Len(t, c, 4)
for i := 0; i < 2; i++ {
require.Equal(t, "alpine", c[i].GUN)
require.Equal(t, i+3, c[i].Version)
}
for i := 2; i < 4; i++ {
require.Equal(t, "busybox", c[i].GUN)
require.Equal(t, i-1, c[i].Version)
}
c, err = s.GetChanges("0", 8, "busybox")
require.NoError(t, err)
require.Len(t, c, 4)
for i := 0; i < 4; i++ {
require.Equal(t, "busybox", c[i].GUN)
require.Equal(t, i+1, c[i].Version)
}
c, err = s.GetChanges("-1", -8, "busybox")
require.NoError(t, err)
require.Len(t, c, 4)
for i := 0; i < 4; i++ {
require.Equal(t, "busybox", c[i].GUN)
require.Equal(t, i+1, c[i].Version)
}
// update a snapshot and confirm the most recent item of the changelist
// hasn't changed (only timestamps should create changes)
before, err := s.GetChanges("-1", -1, "")
require.NoError(t, err)
require.NoError(t, s.UpdateMany("alpine", []MetaUpdate{
{
Role: data.CanonicalSnapshotRole,
Version: 1,
Data: []byte{'1'},
},
}))
after, err := s.GetChanges("-1", -1, "")
require.NoError(t, err)
require.Equal(t, before, after)
_, err1 := s.GetChanges("1000", 0, "")
_, err2 := s.GetChanges("doesn't exist", 0, "")
if _, ok := s.(RethinkDB); ok {
require.Error(t, err1)
require.Error(t, err2)
} else {
require.NoError(t, err1)
require.Error(t, err2)
require.IsType(t, ErrBadQuery{}, err2)
}
// do a deletion and check is shows up.
require.NoError(t, s.Delete("alpine"))
c, err = s.GetChanges("-1", -1, "")
require.NoError(t, err)
require.Len(t, c, 1)
require.Equal(t, changeCategoryDeletion, c[0].Category)
require.Equal(t, "alpine", c[0].GUN)
// do another deletion and check it doesn't show up because no records were deleted
// after the first one
require.NoError(t, s.Delete("alpine"))
c, err = s.GetChanges("-1", -2, "")
require.NoError(t, err)
require.Len(t, c, 2)
require.NotEqual(t, changeCategoryDeletion, c[0].Category)
require.NotEqual(t, "alpine", c[0].GUN)
}
package storage
import (
"encoding/hex"
"fmt"
"time"
"github.com/docker/go/canonical/json"
"github.com/theupdateframework/notary"
"github.com/theupdateframework/notary/storage"
"github.com/theupdateframework/notary/tuf/data"
)
// TUFMetaStorage wraps a MetaStore in order to walk the TUF tree for GetCurrent in a consistent manner,
// by always starting from a current timestamp and then looking up other data by hash
type TUFMetaStorage struct {
MetaStore
}
// NewTUFMetaStorage instantiates a TUFMetaStorage instance
func NewTUFMetaStorage(m MetaStore) *TUFMetaStorage {
return &TUFMetaStorage{
MetaStore: m,
}
}
// GetCurrent gets a specific TUF record, by walking from the current Timestamp to other metadata by checksum
func (tms TUFMetaStorage) GetCurrent(gun data.GUN, tufRole data.RoleName) (*time.Time, []byte, error) {
timestampTime, timestampJSON, err := tms.MetaStore.GetCurrent(gun, data.CanonicalTimestampRole)
if err != nil {
return nil, nil, err
}
// If we wanted data for the timestamp role, we're done here
if tufRole == data.CanonicalTimestampRole {
return timestampTime, timestampJSON, nil
}
// If we want to lookup another role, walk to it via current timestamp --> snapshot by checksum --> desired role
timestampMeta := &data.SignedTimestamp{}
if err := json.Unmarshal(timestampJSON, timestampMeta); err != nil {
return nil, nil, fmt.Errorf("could not parse current timestamp")
}
snapshotChecksums, err := timestampMeta.GetSnapshot()
if err != nil || snapshotChecksums == nil {
return nil, nil, fmt.Errorf("could not retrieve latest snapshot checksum")
}
snapshotSHA256Bytes, ok := snapshotChecksums.Hashes[notary.SHA256]
if !ok {
return nil, nil, fmt.Errorf("could not retrieve latest snapshot sha256")
}
snapshotSHA256Hex := hex.EncodeToString(snapshotSHA256Bytes[:])
// Get the snapshot from the underlying store by checksum
snapshotTime, snapshotJSON, err := tms.GetChecksum(gun, data.CanonicalSnapshotRole, snapshotSHA256Hex)
if err != nil {
return nil, nil, err
}
// If we wanted data for the snapshot role, we're done here
if tufRole == data.CanonicalSnapshotRole {
return snapshotTime, snapshotJSON, nil
}
// If it's a different role, we should have the checksum in snapshot metadata, and we can use it to GetChecksum()
snapshotMeta := &data.SignedSnapshot{}
if err := json.Unmarshal(snapshotJSON, snapshotMeta); err != nil {
return nil, nil, fmt.Errorf("could not parse current snapshot")
}
roleMeta, err := snapshotMeta.GetMeta(tufRole)
if err != nil {
return nil, nil, err
}
roleSHA256Bytes, ok := roleMeta.Hashes[notary.SHA256]
if !ok {
return nil, nil, fmt.Errorf("could not retrieve latest %s sha256", tufRole)
}
roleSHA256Hex := hex.EncodeToString(roleSHA256Bytes[:])
roleTime, roleJSON, err := tms.GetChecksum(gun, tufRole, roleSHA256Hex)
if err != nil {
return nil, nil, err
}
return roleTime, roleJSON, nil
}
// Bootstrap the store with tables if possible
func (tms TUFMetaStorage) Bootstrap() error {
if s, ok := tms.MetaStore.(storage.Bootstrapper); ok {
return s.Bootstrap()
}
return fmt.Errorf("store does not support bootstrapping")
}
package keydbstore
import (
"sync"
"github.com/theupdateframework/notary/tuf/data"
"github.com/theupdateframework/notary/tuf/signed"
)
type cachedKeyService struct {
signed.CryptoService
lock *sync.RWMutex
cachedKeys map[string]*cachedKey
}
type cachedKey struct {
role data.RoleName
key data.PrivateKey
}
// NewCachedKeyService returns a new signed.CryptoService that includes caching
func NewCachedKeyService(baseKeyService signed.CryptoService) signed.CryptoService {
return &cachedKeyService{
CryptoService: baseKeyService,
lock: &sync.RWMutex{},
cachedKeys: make(map[string]*cachedKey),
}
}
// AddKey stores the contents of a private key. Both role and gun are ignored,
// we always use Key IDs as name, and don't support aliases
func (s *cachedKeyService) AddKey(role data.RoleName, gun data.GUN, privKey data.PrivateKey) error {
if err := s.CryptoService.AddKey(role, gun, privKey); err != nil {
return err
}
// Add the private key to our cache
s.lock.Lock()
defer s.lock.Unlock()
s.cachedKeys[privKey.ID()] = &cachedKey{
role: role,
key: privKey,
}
return nil
}
// GetKey returns the PrivateKey given a KeyID
func (s *cachedKeyService) GetPrivateKey(keyID string) (data.PrivateKey, data.RoleName, error) {
s.lock.RLock()
cachedKeyEntry, ok := s.cachedKeys[keyID]
s.lock.RUnlock()
if ok {
return cachedKeyEntry.key, cachedKeyEntry.role, nil
}
// retrieve the key from the underlying store and put it into the cache
privKey, role, err := s.CryptoService.GetPrivateKey(keyID)
if err == nil {
s.lock.Lock()
defer s.lock.Unlock()
// Add the key to cache
s.cachedKeys[privKey.ID()] = &cachedKey{key: privKey, role: role}
return privKey, role, nil
}
return nil, "", err
}
// RemoveKey removes the key from the keyfilestore
func (s *cachedKeyService) RemoveKey(keyID string) error {
s.lock.Lock()
defer s.lock.Unlock()
delete(s.cachedKeys, keyID)
return s.CryptoService.RemoveKey(keyID)
}
package keydbstore
import (
"crypto/rand"
"fmt"
"testing"
"github.com/stretchr/testify/require"
"github.com/theupdateframework/notary/cryptoservice"
"github.com/theupdateframework/notary/trustmanager"
"github.com/theupdateframework/notary/tuf/data"
"github.com/theupdateframework/notary/tuf/signed"
"github.com/theupdateframework/notary/tuf/utils"
)
// gets a key from the DB store, and asserts that the key is the expected key
func requireGetKeySuccess(t *testing.T, dbKeyService signed.CryptoService, expectedRole string, expectedKey data.PrivateKey) {
retrKey, retrRole, err := dbKeyService.GetPrivateKey(expectedKey.ID())
require.NoError(t, err)
require.Equal(t, retrKey.ID(), expectedKey.ID())
require.Equal(t, retrKey.Algorithm(), expectedKey.Algorithm())
require.Equal(t, retrKey.Public(), expectedKey.Public())
require.Equal(t, retrKey.Private(), expectedKey.Private())
require.EqualValues(t, retrRole, expectedRole)
}
func requireGetPubKeySuccess(t *testing.T, dbKeyService signed.CryptoService, expectedRole string, expectedPubKey data.PublicKey) {
retrPubKey := dbKeyService.GetKey(expectedPubKey.ID())
require.Equal(t, retrPubKey.Public(), expectedPubKey.Public())
require.Equal(t, retrPubKey.ID(), expectedPubKey.ID())
require.Equal(t, retrPubKey.Algorithm(), expectedPubKey.Algorithm())
}
// closes the DB connection first so we can test that the successful get was
// from the cache
func requireGetKeySuccessFromCache(t *testing.T, cachedStore, underlyingStore signed.CryptoService, expectedRole string, expectedKey data.PrivateKey) {
require.NoError(t, underlyingStore.RemoveKey(expectedKey.ID()))
requireGetKeySuccess(t, cachedStore, expectedRole, expectedKey)
}
func requireGetKeyFailure(t *testing.T, dbStore signed.CryptoService, keyID string) {
_, _, err := dbStore.GetPrivateKey(keyID)
require.Error(t, err)
k := dbStore.GetKey(keyID)
require.Nil(t, k)
}
type unAddableKeyService struct {
signed.CryptoService
}
func (u unAddableKeyService) AddKey(_ data.RoleName, _ data.GUN, _ data.PrivateKey) error {
return fmt.Errorf("can't add to keyservice")
}
type unRemoveableKeyService struct {
signed.CryptoService
failToRemove bool
}
func (u unRemoveableKeyService) RemoveKey(keyID string) error {
if u.failToRemove {
return fmt.Errorf("can't remove from keystore")
}
return u.CryptoService.RemoveKey(keyID)
}
// Getting a key, on success, populates the cache.
func TestGetSuccessPopulatesCache(t *testing.T) {
underlying := cryptoservice.NewCryptoService(trustmanager.NewKeyMemoryStore(constRetriever))
cached := NewCachedKeyService(underlying)
testKey, err := utils.GenerateECDSAKey(rand.Reader)
require.NoError(t, err)
// nothing there yet
requireGetKeyFailure(t, cached, testKey.ID())
// Add key to underlying store only
err = underlying.AddKey(data.CanonicalTimestampRole, "gun", testKey)
require.NoError(t, err)
// getting for the first time is successful, and after that getting from cache should be too
requireGetKeySuccess(t, cached, data.CanonicalTimestampRole.String(), testKey)
requireGetKeySuccessFromCache(t, cached, underlying, data.CanonicalTimestampRole.String(), testKey)
}
// Creating a key, on success, populates the cache, but does not do so on failure
func TestAddKeyPopulatesCacheIfSuccessful(t *testing.T) {
underlying := cryptoservice.NewCryptoService(trustmanager.NewKeyMemoryStore(constRetriever))
cached := NewCachedKeyService(underlying)
testKeys := make([]data.PrivateKey, 2)
for i := 0; i < 2; i++ {
privKey, err := utils.GenerateECDSAKey(rand.Reader)
require.NoError(t, err)
testKeys[i] = privKey
}
// Writing in the key service succeeds
err := cached.AddKey(data.CanonicalTimestampRole, "gun", testKeys[0])
require.NoError(t, err)
// Now even if it's deleted from the underlying database, it's fine because it's cached
requireGetKeySuccessFromCache(t, cached, underlying, data.CanonicalTimestampRole.String(), testKeys[0])
// Writing in the key service fails
cached = NewCachedKeyService(unAddableKeyService{underlying})
err = cached.AddKey(data.CanonicalTimestampRole, "gun", testKeys[1])
require.Error(t, err)
// And now it can't be found in either DB
requireGetKeyFailure(t, cached, testKeys[1].ID())
}
// Deleting a key, no matter whether we succeed in the underlying layer or not, evicts the cached key.
func TestDeleteKeyRemovesKeyFromCache(t *testing.T) {
underlying := cryptoservice.NewCryptoService(trustmanager.NewKeyMemoryStore(constRetriever))
cached := NewCachedKeyService(underlying)
testKey, err := utils.GenerateECDSAKey(rand.Reader)
require.NoError(t, err)
// Write the key, which puts it in the cache
err = cached.AddKey(data.CanonicalTimestampRole, "gun", testKey)
require.NoError(t, err)
// Deleting removes the key from the cache and the underlying store
err = cached.RemoveKey(testKey.ID())
require.NoError(t, err)
requireGetKeyFailure(t, cached, testKey.ID())
// Now set up an underlying store where the key can't be deleted
failingUnderlying := unRemoveableKeyService{CryptoService: underlying, failToRemove: true}
cached = NewCachedKeyService(failingUnderlying)
err = cached.AddKey(data.CanonicalTimestampRole, "gun", testKey)
require.NoError(t, err)
// Deleting fails to remove the key from the underlying store
err = cached.RemoveKey(testKey.ID())
require.Error(t, err)
requireGetKeySuccess(t, failingUnderlying, data.CanonicalTimestampRole.String(), testKey)
// now actually remove the key from the underlying store to test that it's gone from the cache
failingUnderlying.failToRemove = false
require.NoError(t, failingUnderlying.RemoveKey(testKey.ID()))
// and it's not in the cache
requireGetKeyFailure(t, cached, testKey.ID())
}
// Copyright 2023 the cncf-fuzzing authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package keydbstore
import (
"bytes"
"fmt"
"io/ioutil"
"os"
"path/filepath"
"testing"
"time"
fuzz "github.com/AdaLogics/go-fuzz-headers"
_ "github.com/mattn/go-sqlite3"
"github.com/theupdateframework/notary/tuf/data"
"github.com/theupdateframework/notary/tuf/utils"
)
var gormActiveTimeFuzz = time.Date(2016, 12, 31, 1, 1, 1, 0, time.UTC)
var (
roleNames = []data.RoleName{data.CanonicalRootRole,
data.CanonicalTargetsRole,
data.CanonicalSnapshotRole,
data.CanonicalTimestampRole}
)
func sqlite3Setup_Fuzz() (*SQLKeyDBStore, func(), error) {
tempBaseDir, err := ioutil.TempDir("", "notary-test-")
if err != nil {
panic("Could not create tempdir")
}
dbStore, err := SetupSQLDB_Fuzz("sqlite3", filepath.Join(tempBaseDir, "test_db"))
if err != nil {
return nil, func() {}, err
}
var cleanup = func() {
dbStore.db.Close()
os.RemoveAll(tempBaseDir)
}
if dbStore.Name() != "sqlite3" {
panic("Failed creating an sqlite3 db")
}
return dbStore, cleanup, nil
}
func SetupSQLDB_Fuzz(dbtype, dburl string) (*SQLKeyDBStore, error) {
dbStore, err := NewSQLKeyDBStore(multiAliasRetriever, validAliases[0], dbtype, dburl)
if err != nil {
return nil, err
}
dbStore.nowFunc = func() time.Time { return gormActiveTimeFuzz }
// Create the DB tables if they don't exist
dbStore.db.CreateTable(&GormPrivateKey{})
// verify that the table is empty
var count int
query := dbStore.db.Model(&GormPrivateKey{}).Count(&count)
if query.Error != nil {
return nil, query.Error
}
if count != 0 {
panic("count should not be nil. This is an error in the fuzzer.")
}
return dbStore, nil
}
func FuzzKeyDBStore(f *testing.F) {
f.Fuzz(func(t *testing.T, fuzzData []byte) {
ff := fuzz.NewConsumer(fuzzData)
testKeys := make([]data.PrivateKey, 0)
noOfTestKeys, err := ff.GetInt()
if err != nil {
t.Skip()
}
for i := 0; i < noOfTestKeys%20; i++ {
readerData, err := ff.GetBytes()
if err != nil {
t.Skip()
}
testKey, err := utils.GenerateECDSAKey(bytes.NewReader(readerData))
if err != nil {
t.Skip()
}
testKeys = append(testKeys, testKey)
}
if len(testKeys) == 0 {
t.Skip()
}
dbStore, cleanup, err := sqlite3Setup_Fuzz()
if err != nil {
fmt.Println(err)
t.Fatal("Could not create the db. This is not a fuzz issue.")
}
defer cleanup()
for i := 0; i < len(testKeys); i++ {
testKey := testKeys[i]
// Add keys to the DB
roleInd, err := ff.GetInt()
if err != nil {
t.Skip()
}
err = dbStore.AddKey(roleNames[roleInd%len(roleNames)], "gun", testKey)
if err != nil {
t.Skip()
}
}
noOfCalls, err := ff.GetInt()
if err != nil {
t.Skip()
}
for i := 0; i < noOfCalls%10; i++ {
typeOfCall, err := ff.GetInt()
if err != nil {
t.Skip()
}
switch typeOfCall % 4 {
case 0:
keyInd, err := ff.GetInt()
if err != nil {
t.Skip()
}
if len(testKeys) != 0 {
dbStore.RemoveKey(testKeys[keyInd%len(testKeys)].ID())
}
case 1:
keyInd, err := ff.GetInt()
if err != nil {
t.Skip()
}
if len(testKeys) != 0 {
_, _, _ = dbStore.GetPrivateKey(testKeys[keyInd%len(testKeys)].ID())
}
case 2:
keyInd, err := ff.GetInt()
if err != nil {
t.Skip()
}
if len(testKeys) != 0 {
newValidAlias, err := ff.GetString()
if err != nil {
t.Skip()
}
_ = dbStore.RotateKeyPassphrase(testKeys[keyInd%len(testKeys)].ID(), newValidAlias)
}
case 3:
dbStore.HealthCheck()
}
}
})
}
package keydbstore
import (
"crypto"
"crypto/rand"
"fmt"
"io"
"github.com/sirupsen/logrus"
"github.com/theupdateframework/notary/tuf/data"
"github.com/theupdateframework/notary/tuf/utils"
)
type activatingPrivateKey struct {
data.PrivateKey
activationFunc func(keyID string) error
}
func (a activatingPrivateKey) Sign(rand io.Reader, digest []byte, opts crypto.SignerOpts) (signature []byte, err error) {
keyID := a.PrivateKey.ID()
sig, err := a.PrivateKey.Sign(rand, digest, opts)
if err == nil {
if activationErr := a.activationFunc(keyID); activationErr != nil {
logrus.Errorf("Key %s was just used to sign hash %s, error when trying to mark key as active: %s",
keyID, digest, activationErr.Error())
}
}
return sig, err
}
// helper function to generate private keys for the signer databases - does not implement RSA since that is not
// supported by the signer
func generatePrivateKey(algorithm string) (data.PrivateKey, error) {
var privKey data.PrivateKey
var err error
switch algorithm {
case data.ECDSAKey:
privKey, err = utils.GenerateECDSAKey(rand.Reader)
if err != nil {
return nil, fmt.Errorf("failed to generate EC key: %v", err)
}
case data.ED25519Key:
privKey, err = utils.GenerateED25519Key(rand.Reader)
if err != nil {
return nil, fmt.Errorf("failed to generate ED25519 key: %v", err)
}
default:
return nil, fmt.Errorf("private key type not supported for key generation: %s", algorithm)
}
return privKey, nil
}
package keydbstore
import (
"crypto/rand"
"errors"
"fmt"
"testing"
"github.com/stretchr/testify/require"
"github.com/theupdateframework/notary/tuf/data"
"github.com/theupdateframework/notary/tuf/signed"
"github.com/theupdateframework/notary/tuf/utils"
)
func constRetriever(string, string, bool, int) (string, bool, error) {
return "constantPass", false, nil
}
var validAliases = []string{"validAlias1", "validAlias2"}
var validAliasesAndPasswds = map[string]string{
"validAlias1": "passphrase_1",
"validAlias2": "passphrase_2",
}
func multiAliasRetriever(_, alias string, _ bool, _ int) (string, bool, error) {
if passwd, ok := validAliasesAndPasswds[alias]; ok {
return passwd, false, nil
}
return "", false, errors.New("password alias not found")
}
type keyRotator interface {
signed.CryptoService
RotateKeyPassphrase(keyID, newPassphraseAlias string) error
}
// A key can only be added to the DB once. Returns a list of expected keys, and which keys are expected to exist.
func testKeyCanOnlyBeAddedOnce(t *testing.T, dbStore signed.CryptoService) []data.PrivateKey {
expectedKeys := make([]data.PrivateKey, 2)
for i := 0; i < len(expectedKeys); i++ {
testKey, err := utils.GenerateECDSAKey(rand.Reader)
require.NoError(t, err)
expectedKeys[i] = testKey
}
// Test writing new key in database alone, not cache
err := dbStore.AddKey(data.CanonicalTimestampRole, "gun", expectedKeys[0])
require.NoError(t, err)
requireGetKeySuccess(t, dbStore, data.CanonicalTimestampRole.String(), expectedKeys[0])
// Test writing the same key in the database. Should fail.
err = dbStore.AddKey(data.CanonicalTimestampRole, "gun", expectedKeys[0])
require.Error(t, err, "failed to add private key to database:")
// Test writing new key succeeds
err = dbStore.AddKey(data.CanonicalTimestampRole, "gun", expectedKeys[1])
require.NoError(t, err)
return expectedKeys
}
// a key can be deleted - returns a list of expected keys
func testCreateDelete(t *testing.T, dbStore signed.CryptoService) []data.PrivateKey {
testKeys := make([]data.PrivateKey, 2)
for i := 0; i < len(testKeys); i++ {
testKey, err := utils.GenerateECDSAKey(rand.Reader)
require.NoError(t, err)
testKeys[i] = testKey
// Add them to the DB
err = dbStore.AddKey(data.CanonicalTimestampRole, "gun", testKey)
require.NoError(t, err)
requireGetKeySuccess(t, dbStore, data.CanonicalTimestampRole.String(), testKey)
}
// Deleting the key should succeed and only remove the key that was deleted
require.NoError(t, dbStore.RemoveKey(testKeys[0].ID()))
requireGetKeyFailure(t, dbStore, testKeys[0].ID())
requireGetKeySuccess(t, dbStore, data.CanonicalTimestampRole.String(), testKeys[1])
// Deleting the key again should succeed even though it's not in the DB
require.NoError(t, dbStore.RemoveKey(testKeys[0].ID()))
requireGetKeyFailure(t, dbStore, testKeys[0].ID())
return testKeys[1:]
}
// key rotation is successful provided the other alias is valid.
// Returns the key that was rotated and one that was not rotated
func testKeyRotation(t *testing.T, dbStore keyRotator, newValidAlias string) (data.PrivateKey, data.PrivateKey) {
testKeys := make([]data.PrivateKey, 2)
for i := 0; i < len(testKeys); i++ {
testKey, err := utils.GenerateECDSAKey(rand.Reader)
require.NoError(t, err)
testKeys[i] = testKey
// Add them to the DB
err = dbStore.AddKey(data.CanonicalTimestampRole, "gun", testKey)
require.NoError(t, err)
}
// Try rotating the key to a valid alias
err := dbStore.RotateKeyPassphrase(testKeys[0].ID(), newValidAlias)
require.NoError(t, err)
// Try rotating the key to an invalid alias
err = dbStore.RotateKeyPassphrase(testKeys[0].ID(), "invalidAlias")
require.Error(t, err, "there should be no password for invalidAlias so rotation should fail")
return testKeys[0], testKeys[1]
}
type badReader struct{}
func (b badReader) Read([]byte) (n int, err error) {
return 0, fmt.Errorf("Nope, not going to read")
}
// Signing with a key marks it as active if the signing is successful. Marking as active is successful no matter what,
// but should only activate a key that exists in the DB.
// Returns the key that was used and one that was not
func testSigningWithKeyMarksAsActive(t *testing.T, dbStore signed.CryptoService) (data.PrivateKey, data.PrivateKey) {
testKeys := make([]data.PrivateKey, 3)
for i := 0; i < len(testKeys); i++ {
testKey, err := utils.GenerateECDSAKey(rand.Reader)
require.NoError(t, err)
// Add them to the DB
err = dbStore.AddKey(data.CanonicalTimestampRole, "gun", testKey)
require.NoError(t, err)
requireGetKeySuccess(t, dbStore, data.CanonicalTimestampRole.String(), testKey)
// store the gotten key, because that key is special
gottenKey, _, err := dbStore.GetPrivateKey(testKey.ID())
require.NoError(t, err)
testKeys[i] = gottenKey
}
// sign successfully with the first key - this key will become active
msg := []byte("successful")
sig, err := testKeys[0].Sign(rand.Reader, msg, nil)
require.NoError(t, err)
require.NoError(t, signed.Verifiers[data.ECDSASignature].Verify(
data.PublicKeyFromPrivate(testKeys[0]), sig, msg))
// sign unsuccessfully with the second key - this key should remain inactive
sig, err = testKeys[1].Sign(badReader{}, []byte("unsuccessful"), nil)
require.Error(t, err)
require.Equal(t, "Nope, not going to read", err.Error())
require.Nil(t, sig)
// delete the third key from the DB - sign should still succeed, even though
// this key cannot be marked as active anymore due to it not existing
// (this probably won't return an error)
require.NoError(t, dbStore.RemoveKey(testKeys[2].ID()))
requireGetKeyFailure(t, dbStore, testKeys[2].ID())
msg = []byte("successful, not active")
sig, err = testKeys[2].Sign(rand.Reader, msg, nil)
require.NoError(t, err)
require.NoError(t, signed.Verifiers[data.ECDSASignature].Verify(
data.PublicKeyFromPrivate(testKeys[2]), sig, msg))
return testKeys[0], testKeys[1] // testKeys[2] should no longer exist in the DB
}
func testCreateKey(t *testing.T, dbStore signed.CryptoService) (data.PrivateKey, data.PrivateKey, data.PrivateKey) {
// Create a test key, and check that it is successfully added to the database
role := data.CanonicalSnapshotRole
var gun data.GUN = "gun"
// First create an ECDSA key
createdECDSAKey, err := dbStore.Create(role, gun, data.ECDSAKey)
require.NoError(t, err)
require.NotNil(t, createdECDSAKey)
require.Equal(t, data.ECDSAKey, createdECDSAKey.Algorithm())
// Retrieve the key from the database by ID, and check that it is correct
requireGetPubKeySuccess(t, dbStore, role.String(), createdECDSAKey)
// Calling Create with the same parameters will return the same key because it is inactive
createdSameECDSAKey, err := dbStore.Create(role, gun, data.ECDSAKey)
require.NoError(t, err)
require.Equal(t, createdECDSAKey.Algorithm(), createdSameECDSAKey.Algorithm())
require.Equal(t, createdECDSAKey.Public(), createdSameECDSAKey.Public())
require.Equal(t, createdECDSAKey.ID(), createdSameECDSAKey.ID())
// Calling Create with the same role and gun but a different algorithm will create a new key
createdED25519Key, err := dbStore.Create(role, gun, data.ED25519Key)
require.NoError(t, err)
require.NotEqual(t, createdECDSAKey.Algorithm(), createdED25519Key.Algorithm())
require.NotEqual(t, createdECDSAKey.Public(), createdED25519Key.Public())
require.NotEqual(t, createdECDSAKey.ID(), createdED25519Key.ID())
// Retrieve the key from the database by ID, and check that it is correct
requireGetPubKeySuccess(t, dbStore, role.String(), createdED25519Key)
// Sign with the ED25519 key from the DB to mark it as active
activeED25519Key, _, err := dbStore.GetPrivateKey(createdED25519Key.ID())
require.NoError(t, err)
_, err = activeED25519Key.Sign(rand.Reader, []byte("msg"), nil)
require.NoError(t, err)
// Calling Create for the same role, gun and ED25519 algorithm will now create a new key
createdNewED25519Key, err := dbStore.Create(role, gun, data.ED25519Key)
require.NoError(t, err)
require.Equal(t, activeED25519Key.Algorithm(), createdNewED25519Key.Algorithm())
require.NotEqual(t, activeED25519Key.Public(), createdNewED25519Key.Public())
require.NotEqual(t, activeED25519Key.ID(), createdNewED25519Key.ID())
// Get the inactive ED25519 key from the database explicitly to return
inactiveED25519Key, _, err := dbStore.GetPrivateKey(createdNewED25519Key.ID())
require.NoError(t, err)
// Get the inactive ECDSA key from the database explicitly to return
inactiveECDSAKey, _, err := dbStore.GetPrivateKey(createdSameECDSAKey.ID())
require.NoError(t, err)
// Calling Create with an invalid algorithm gives an error
_, err = dbStore.Create(role, gun, "invalid")
require.Error(t, err)
return activeED25519Key, inactiveED25519Key, inactiveECDSAKey
}
func testUnimplementedInterfaceMethods(t *testing.T, dbStore signed.CryptoService) {
// add one key to the db
testKey, err := utils.GenerateECDSAKey(rand.Reader)
require.NoError(t, err)
err = dbStore.AddKey(data.CanonicalTimestampRole, "gun", testKey)
require.NoError(t, err)
requireGetKeySuccess(t, dbStore, data.CanonicalTimestampRole.String(), testKey)
// these are unimplemented/unused, and return nil
require.Nil(t, dbStore.ListAllKeys())
require.Nil(t, dbStore.ListKeys(data.CanonicalTimestampRole))
}
package keydbstore
import (
"encoding/json"
"fmt"
"time"
jose "github.com/dvsekhvalnov/jose2go"
"github.com/theupdateframework/notary"
"github.com/theupdateframework/notary/storage/rethinkdb"
"github.com/theupdateframework/notary/trustmanager"
"github.com/theupdateframework/notary/tuf/data"
gorethink "gopkg.in/rethinkdb/rethinkdb-go.v6"
)
// RethinkDBKeyStore persists and manages private keys on a RethinkDB database
type RethinkDBKeyStore struct {
sess *gorethink.Session
dbName string
defaultPassAlias string
retriever notary.PassRetriever
user string
password string
nowFunc func() time.Time
}
// RDBPrivateKey represents a PrivateKey in the rethink database
type RDBPrivateKey struct {
rethinkdb.Timing
KeyID string `gorethink:"key_id"`
EncryptionAlg string `gorethink:"encryption_alg"`
KeywrapAlg string `gorethink:"keywrap_alg"`
Algorithm string `gorethink:"algorithm"`
PassphraseAlias string `gorethink:"passphrase_alias"`
Gun data.GUN `gorethink:"gun"`
Role data.RoleName `gorethink:"role"`
// gorethink specifically supports binary types, and says to pass it in as
// a byteslice. Currently our encryption method for the private key bytes
// produces a base64-encoded string, but for future compatibility in case
// we change how we encrypt, use a byteslace for the encrypted private key
// too
Public []byte `gorethink:"public"`
Private []byte `gorethink:"private"`
// whether this key is active or not
LastUsed time.Time `gorethink:"last_used"`
}
// gorethink can't handle an UnmarshalJSON function (see https://github.com/gorethink/gorethink/issues/201),
// so do this here in an anonymous struct
func rdbPrivateKeyFromJSON(jsonData []byte) (interface{}, error) {
a := struct {
CreatedAt time.Time `json:"created_at"`
UpdatedAt time.Time `json:"updated_at"`
DeletedAt time.Time `json:"deleted_at"`
KeyID string `json:"key_id"`
EncryptionAlg string `json:"encryption_alg"`
KeywrapAlg string `json:"keywrap_alg"`
Algorithm string `json:"algorithm"`
PassphraseAlias string `json:"passphrase_alias"`
Gun data.GUN `json:"gun"`
Role data.RoleName `json:"role"`
Public []byte `json:"public"`
Private []byte `json:"private"`
LastUsed time.Time `json:"last_used"`
}{}
if err := json.Unmarshal(jsonData, &a); err != nil {
return RDBPrivateKey{}, err
}
return RDBPrivateKey{
Timing: rethinkdb.Timing{
CreatedAt: a.CreatedAt,
UpdatedAt: a.UpdatedAt,
DeletedAt: a.DeletedAt,
},
KeyID: a.KeyID,
EncryptionAlg: a.EncryptionAlg,
KeywrapAlg: a.KeywrapAlg,
Algorithm: a.Algorithm,
PassphraseAlias: a.PassphraseAlias,
Gun: a.Gun,
Role: a.Role,
Public: a.Public,
Private: a.Private,
LastUsed: a.LastUsed,
}, nil
}
// PrivateKeysRethinkTable is the table definition for notary signer's key information
var PrivateKeysRethinkTable = rethinkdb.Table{
Name: RDBPrivateKey{}.TableName(),
PrimaryKey: "key_id",
JSONUnmarshaller: rdbPrivateKeyFromJSON,
}
// TableName sets a specific table name for our RDBPrivateKey
func (g RDBPrivateKey) TableName() string {
return "private_keys"
}
// NewRethinkDBKeyStore returns a new RethinkDBKeyStore backed by a RethinkDB database
func NewRethinkDBKeyStore(dbName, username, password string, passphraseRetriever notary.PassRetriever, defaultPassAlias string, rethinkSession *gorethink.Session) *RethinkDBKeyStore {
return &RethinkDBKeyStore{
sess: rethinkSession,
defaultPassAlias: defaultPassAlias,
dbName: dbName,
retriever: passphraseRetriever,
user: username,
password: password,
nowFunc: time.Now,
}
}
// Name returns a user friendly name for the storage location
func (rdb *RethinkDBKeyStore) Name() string {
return "RethinkDB"
}
// AddKey stores the contents of a private key. Both role and gun are ignored,
// we always use Key IDs as name, and don't support aliases
func (rdb *RethinkDBKeyStore) AddKey(role data.RoleName, gun data.GUN, privKey data.PrivateKey) error {
passphrase, _, err := rdb.retriever(privKey.ID(), rdb.defaultPassAlias, false, 1)
if err != nil {
return err
}
encryptedKey, err := jose.Encrypt(string(privKey.Private()), KeywrapAlg, EncryptionAlg, passphrase)
if err != nil {
return err
}
now := rdb.nowFunc()
rethinkPrivKey := RDBPrivateKey{
Timing: rethinkdb.Timing{
CreatedAt: now,
UpdatedAt: now,
},
KeyID: privKey.ID(),
EncryptionAlg: EncryptionAlg,
KeywrapAlg: KeywrapAlg,
PassphraseAlias: rdb.defaultPassAlias,
Algorithm: privKey.Algorithm(),
Gun: gun,
Role: role,
Public: privKey.Public(),
Private: []byte(encryptedKey),
}
// Add encrypted private key to the database
_, err = gorethink.DB(rdb.dbName).Table(rethinkPrivKey.TableName()).Insert(rethinkPrivKey).RunWrite(rdb.sess)
if err != nil {
return fmt.Errorf("failed to add private key %s to database: %s", privKey.ID(), err.Error())
}
return nil
}
// getKeyBytes returns the RDBPrivateKey given a KeyID, as well as the decrypted private bytes
func (rdb *RethinkDBKeyStore) getKey(keyID string) (*RDBPrivateKey, string, error) {
// Retrieve the RethinkDB private key from the database
dbPrivateKey := RDBPrivateKey{}
res, err := gorethink.DB(rdb.dbName).Table(dbPrivateKey.TableName()).Filter(gorethink.Row.Field("key_id").Eq(keyID)).Run(rdb.sess)
if err != nil {
return nil, "", err
}
defer res.Close()
err = res.One(&dbPrivateKey)
if err != nil {
return nil, "", trustmanager.ErrKeyNotFound{}
}
// Get the passphrase to use for this key
passphrase, _, err := rdb.retriever(dbPrivateKey.KeyID, dbPrivateKey.PassphraseAlias, false, 1)
if err != nil {
return nil, "", err
}
// Decrypt private bytes from the gorm key
decryptedPrivKey, _, err := jose.Decode(string(dbPrivateKey.Private), passphrase)
if err != nil {
return nil, "", err
}
return &dbPrivateKey, decryptedPrivKey, nil
}
// GetPrivateKey returns the PrivateKey given a KeyID
func (rdb *RethinkDBKeyStore) GetPrivateKey(keyID string) (data.PrivateKey, data.RoleName, error) {
dbPrivateKey, decryptedPrivKey, err := rdb.getKey(keyID)
if err != nil {
return nil, "", err
}
pubKey := data.NewPublicKey(dbPrivateKey.Algorithm, dbPrivateKey.Public)
// Create a new PrivateKey with unencrypted bytes
privKey, err := data.NewPrivateKey(pubKey, []byte(decryptedPrivKey))
if err != nil {
return nil, "", err
}
return activatingPrivateKey{PrivateKey: privKey, activationFunc: rdb.markActive}, dbPrivateKey.Role, nil
}
// GetKey returns the PublicKey given a KeyID, and does not activate the key
func (rdb *RethinkDBKeyStore) GetKey(keyID string) data.PublicKey {
dbPrivateKey, _, err := rdb.getKey(keyID)
if err != nil {
return nil
}
return data.NewPublicKey(dbPrivateKey.Algorithm, dbPrivateKey.Public)
}
// ListKeys always returns nil. This method is here to satisfy the CryptoService interface
func (rdb RethinkDBKeyStore) ListKeys(role data.RoleName) []string {
return nil
}
// ListAllKeys always returns nil. This method is here to satisfy the CryptoService interface
func (rdb RethinkDBKeyStore) ListAllKeys() map[string]data.RoleName {
return nil
}
// RemoveKey removes the key from the table
func (rdb RethinkDBKeyStore) RemoveKey(keyID string) error {
// Delete the key from the database
dbPrivateKey := RDBPrivateKey{KeyID: keyID}
_, err := gorethink.DB(rdb.dbName).Table(dbPrivateKey.TableName()).Filter(gorethink.Row.Field("key_id").Eq(keyID)).Delete().RunWrite(rdb.sess)
if err != nil {
return fmt.Errorf("unable to delete private key %s from database: %s", keyID, err.Error())
}
return nil
}
// RotateKeyPassphrase rotates the key-encryption-key
func (rdb RethinkDBKeyStore) RotateKeyPassphrase(keyID, newPassphraseAlias string) error {
dbPrivateKey, decryptedPrivKey, err := rdb.getKey(keyID)
if err != nil {
return err
}
// Get the new passphrase to use for this key
newPassphrase, _, err := rdb.retriever(dbPrivateKey.KeyID, newPassphraseAlias, false, 1)
if err != nil {
return err
}
// Re-encrypt the private bytes with the new passphrase
newEncryptedKey, err := jose.Encrypt(decryptedPrivKey, KeywrapAlg, EncryptionAlg, newPassphrase)
if err != nil {
return err
}
// Update the database object
dbPrivateKey.Private = []byte(newEncryptedKey)
dbPrivateKey.PassphraseAlias = newPassphraseAlias
if _, err := gorethink.DB(rdb.dbName).Table(dbPrivateKey.TableName()).Get(keyID).Update(dbPrivateKey).RunWrite(rdb.sess); err != nil {
return err
}
return nil
}
// markActive marks a particular key as active
func (rdb RethinkDBKeyStore) markActive(keyID string) error {
_, err := gorethink.DB(rdb.dbName).Table(PrivateKeysRethinkTable.Name).Get(keyID).Update(map[string]interface{}{
"last_used": rdb.nowFunc(),
}).RunWrite(rdb.sess)
return err
}
// Create will attempt to first re-use an inactive key for the same role, gun, and algorithm.
// If one isn't found, it will create a private key and add it to the DB as an inactive key
func (rdb RethinkDBKeyStore) Create(role data.RoleName, gun data.GUN, algorithm string) (data.PublicKey, error) {
dbPrivateKey := RDBPrivateKey{}
res, err := gorethink.DB(rdb.dbName).Table(dbPrivateKey.TableName()).
Filter(gorethink.Row.Field("gun").Eq(gun.String())).
Filter(gorethink.Row.Field("role").Eq(role.String())).
Filter(gorethink.Row.Field("algorithm").Eq(algorithm)).
Filter(gorethink.Row.Field("last_used").Eq(time.Time{})).
OrderBy(gorethink.Row.Field("key_id")).
Run(rdb.sess)
if err != nil {
return nil, err
}
defer res.Close()
err = res.One(&dbPrivateKey)
if err == nil {
return data.NewPublicKey(dbPrivateKey.Algorithm, dbPrivateKey.Public), nil
}
privKey, err := generatePrivateKey(algorithm)
if err != nil {
return nil, err
}
if err = rdb.AddKey(role, gun, privKey); err != nil {
return nil, fmt.Errorf("failed to store key: %v", err)
}
return privKey, nil
}
// Bootstrap sets up the database and tables, also creating the notary signer user with appropriate db permission
func (rdb RethinkDBKeyStore) Bootstrap() error {
if err := rethinkdb.SetupDB(rdb.sess, rdb.dbName, []rethinkdb.Table{
PrivateKeysRethinkTable,
}); err != nil {
return err
}
return rethinkdb.CreateAndGrantDBUser(rdb.sess, rdb.dbName, rdb.user, rdb.password)
}
// CheckHealth verifies that DB exists and is query-able
func (rdb RethinkDBKeyStore) CheckHealth() error {
res, err := gorethink.DB(rdb.dbName).Table(PrivateKeysRethinkTable.Name).Info().Run(rdb.sess)
if err != nil {
return fmt.Errorf("%s is unavailable, or missing one or more tables, or permissions are incorrectly set", rdb.dbName)
}
defer res.Close()
return nil
}
package keydbstore
import (
"fmt"
"time"
jose "github.com/dvsekhvalnov/jose2go"
"github.com/jinzhu/gorm"
"github.com/theupdateframework/notary"
"github.com/theupdateframework/notary/trustmanager"
"github.com/theupdateframework/notary/tuf/data"
)
// Constants
const (
EncryptionAlg = jose.A256GCM
KeywrapAlg = jose.PBES2_HS256_A128KW
)
// SQLKeyDBStore persists and manages private keys on a SQL database
type SQLKeyDBStore struct {
db gorm.DB
dbType string
defaultPassAlias string
retriever notary.PassRetriever
nowFunc func() time.Time
}
// GormPrivateKey represents a PrivateKey in the database
type GormPrivateKey struct {
gorm.Model
KeyID string `sql:"type:varchar(255);not null;unique;index:key_id_idx"`
EncryptionAlg string `sql:"type:varchar(255);not null"`
KeywrapAlg string `sql:"type:varchar(255);not null"`
Algorithm string `sql:"type:varchar(50);not null"`
PassphraseAlias string `sql:"type:varchar(50);not null"`
Gun string `sql:"type:varchar(255);not null"`
Role string `sql:"type:varchar(255);not null"`
Public string `sql:"type:blob;not null"`
Private string `sql:"type:blob;not null"`
LastUsed time.Time `sql:"type:datetime;null;default:null"`
}
// TableName sets a specific table name for our GormPrivateKey
func (g GormPrivateKey) TableName() string {
return "private_keys"
}
// NewSQLKeyDBStore returns a new SQLKeyDBStore backed by a SQL database
func NewSQLKeyDBStore(passphraseRetriever notary.PassRetriever, defaultPassAlias string,
dbDialect string, dbArgs ...interface{}) (*SQLKeyDBStore, error) {
db, err := gorm.Open(dbDialect, dbArgs...)
if err != nil {
return nil, err
}
return &SQLKeyDBStore{
db: *db,
dbType: dbDialect,
defaultPassAlias: defaultPassAlias,
retriever: passphraseRetriever,
nowFunc: time.Now,
}, nil
}
// Name returns a user friendly name for the storage location
func (s *SQLKeyDBStore) Name() string {
return s.dbType
}
// AddKey stores the contents of a private key. Both role and gun are ignored,
// we always use Key IDs as name, and don't support aliases
func (s *SQLKeyDBStore) AddKey(role data.RoleName, gun data.GUN, privKey data.PrivateKey) error {
passphrase, _, err := s.retriever(privKey.ID(), s.defaultPassAlias, false, 1)
if err != nil {
return err
}
encryptedKey, err := jose.Encrypt(string(privKey.Private()), KeywrapAlg, EncryptionAlg, passphrase)
if err != nil {
return err
}
gormPrivKey := GormPrivateKey{
KeyID: privKey.ID(),
EncryptionAlg: EncryptionAlg,
KeywrapAlg: KeywrapAlg,
PassphraseAlias: s.defaultPassAlias,
Algorithm: privKey.Algorithm(),
Gun: gun.String(),
Role: role.String(),
Public: string(privKey.Public()),
Private: encryptedKey,
}
// Add encrypted private key to the database
s.db.Create(&gormPrivKey)
// Value will be false if Create succeeds
failure := s.db.NewRecord(gormPrivKey)
if failure {
return fmt.Errorf("failed to add private key to database: %s", privKey.ID())
}
return nil
}
func (s *SQLKeyDBStore) getKey(keyID string, markActive bool) (*GormPrivateKey, string, error) {
// Retrieve the GORM private key from the database
dbPrivateKey := GormPrivateKey{}
if s.db.Where(&GormPrivateKey{KeyID: keyID}).First(&dbPrivateKey).RecordNotFound() {
return nil, "", trustmanager.ErrKeyNotFound{KeyID: keyID}
}
// Get the passphrase to use for this key
passphrase, _, err := s.retriever(dbPrivateKey.KeyID, dbPrivateKey.PassphraseAlias, false, 1)
if err != nil {
return nil, "", err
}
// Decrypt private bytes from the gorm key
decryptedPrivKey, _, err := jose.Decode(dbPrivateKey.Private, passphrase)
if err != nil {
return nil, "", err
}
return &dbPrivateKey, decryptedPrivKey, nil
}
// GetPrivateKey returns the PrivateKey given a KeyID
func (s *SQLKeyDBStore) GetPrivateKey(keyID string) (data.PrivateKey, data.RoleName, error) {
// Retrieve the GORM private key from the database
dbPrivateKey, decryptedPrivKey, err := s.getKey(keyID, true)
if err != nil {
return nil, "", err
}
pubKey := data.NewPublicKey(dbPrivateKey.Algorithm, []byte(dbPrivateKey.Public))
// Create a new PrivateKey with unencrypted bytes
privKey, err := data.NewPrivateKey(pubKey, []byte(decryptedPrivKey))
if err != nil {
return nil, "", err
}
return activatingPrivateKey{PrivateKey: privKey, activationFunc: s.markActive}, data.RoleName(dbPrivateKey.Role), nil
}
// ListKeys always returns nil. This method is here to satisfy the CryptoService interface
func (s *SQLKeyDBStore) ListKeys(role data.RoleName) []string {
return nil
}
// ListAllKeys always returns nil. This method is here to satisfy the CryptoService interface
func (s *SQLKeyDBStore) ListAllKeys() map[string]data.RoleName {
return nil
}
// RemoveKey removes the key from the keyfilestore
func (s *SQLKeyDBStore) RemoveKey(keyID string) error {
// Delete the key from the database
s.db.Where(&GormPrivateKey{KeyID: keyID}).Delete(&GormPrivateKey{})
return nil
}
// RotateKeyPassphrase rotates the key-encryption-key
func (s *SQLKeyDBStore) RotateKeyPassphrase(keyID, newPassphraseAlias string) error {
// Retrieve the GORM private key from the database
dbPrivateKey, decryptedPrivKey, err := s.getKey(keyID, false)
if err != nil {
return err
}
// Get the new passphrase to use for this key
newPassphrase, _, err := s.retriever(dbPrivateKey.KeyID, newPassphraseAlias, false, 1)
if err != nil {
return err
}
// Re-encrypt the private bytes with the new passphrase
newEncryptedKey, err := jose.Encrypt(decryptedPrivKey, KeywrapAlg, EncryptionAlg, newPassphrase)
if err != nil {
return err
}
// want to only update 2 fields, not save the whole row - we have to use the where clause because key_id is not
// the primary key
return s.db.Model(GormPrivateKey{}).Where("key_id = ?", keyID).Updates(GormPrivateKey{
Private: newEncryptedKey,
PassphraseAlias: newPassphraseAlias,
}).Error
}
// markActive marks a particular key as active
func (s *SQLKeyDBStore) markActive(keyID string) error {
// we have to use the where clause because key_id is not the primary key
return s.db.Model(GormPrivateKey{}).Where("key_id = ?", keyID).Updates(GormPrivateKey{LastUsed: s.nowFunc()}).Error
}
// Create will attempt to first re-use an inactive key for the same role, gun, and algorithm.
// If one isn't found, it will create a private key and add it to the DB as an inactive key
func (s *SQLKeyDBStore) Create(role data.RoleName, gun data.GUN, algorithm string) (data.PublicKey, error) {
// If an unused key exists, simply return it. Else, error because SQL can't make keys
dbPrivateKey := GormPrivateKey{}
if !s.db.Model(GormPrivateKey{}).Where("role = ? AND gun = ? AND algorithm = ? AND last_used IS NULL", role.String(), gun.String(), algorithm).Order("key_id").First(&dbPrivateKey).RecordNotFound() {
// Just return the public key component if we found one
return data.NewPublicKey(dbPrivateKey.Algorithm, []byte(dbPrivateKey.Public)), nil
}
privKey, err := generatePrivateKey(algorithm)
if err != nil {
return nil, err
}
if err = s.AddKey(role, gun, privKey); err != nil {
return nil, fmt.Errorf("failed to store key: %v", err)
}
return privKey, nil
}
// GetKey performs the same get as GetPrivateKey, but does not mark the as active and only returns the public bytes
func (s *SQLKeyDBStore) GetKey(keyID string) data.PublicKey {
privKey, _, err := s.getKey(keyID, false)
if err != nil {
return nil
}
return data.NewPublicKey(privKey.Algorithm, []byte(privKey.Public))
}
// HealthCheck verifies that DB exists and is query-able
func (s *SQLKeyDBStore) HealthCheck() (err error) {
defer func() {
if r := recover(); r != nil {
err = fmt.Errorf("panic checking db health: %v", r)
}
}()
dbPrivateKey := GormPrivateKey{}
tableOk := s.db.HasTable(&dbPrivateKey)
switch {
case s.db.Error != nil:
return s.db.Error
case !tableOk:
return fmt.Errorf(
"cannot access table: %s", dbPrivateKey.TableName())
}
return nil
}
package storage
import (
"errors"
"fmt"
)
var (
// ErrPathOutsideStore indicates that the returned path would be
// outside the store
ErrPathOutsideStore = errors.New("path outside file store")
)
// ErrMetaNotFound indicates we did not find a particular piece
// of metadata in the store
type ErrMetaNotFound struct {
Resource string
}
func (err ErrMetaNotFound) Error() string {
return fmt.Sprintf("%s trust data unavailable. Has a notary repository been initialized?", err.Resource)
}
package storage
import (
"bytes"
"encoding/pem"
"fmt"
"io"
"io/ioutil"
"os"
"path/filepath"
"strings"
"github.com/sirupsen/logrus"
"github.com/theupdateframework/notary"
)
// NewFileStore creates a fully configurable file store
func NewFileStore(baseDir, fileExt string) (*FilesystemStore, error) {
baseDir = filepath.Clean(baseDir)
if err := createDirectory(baseDir, notary.PrivExecPerms); err != nil {
return nil, err
}
if !strings.HasPrefix(fileExt, ".") {
fileExt = "." + fileExt
}
return &FilesystemStore{
baseDir: baseDir,
ext: fileExt,
}, nil
}
// NewPrivateKeyFileStorage initializes a new filestore for private keys, appending
// the notary.PrivDir to the baseDir.
func NewPrivateKeyFileStorage(baseDir, fileExt string) (*FilesystemStore, error) {
baseDir = filepath.Join(baseDir, notary.PrivDir)
myStore, err := NewFileStore(baseDir, fileExt)
myStore.migrateTo0Dot4()
return myStore, err
}
// NewPrivateSimpleFileStore is a wrapper to create an owner readable/writeable
// _only_ filestore
func NewPrivateSimpleFileStore(baseDir, fileExt string) (*FilesystemStore, error) {
return NewFileStore(baseDir, fileExt)
}
// FilesystemStore is a store in a locally accessible directory
type FilesystemStore struct {
baseDir string
ext string
}
func (f *FilesystemStore) moveKeyTo0Dot4Location(file string) {
keyID := filepath.Base(file)
fileDir := filepath.Dir(file)
d, _ := f.Get(file)
block, _ := pem.Decode(d)
if block == nil {
logrus.Warn("Key data for", file, "could not be decoded as a valid PEM block. The key will not been migrated and may not be available")
return
}
fileDir = strings.TrimPrefix(fileDir, notary.RootKeysSubdir)
fileDir = strings.TrimPrefix(fileDir, notary.NonRootKeysSubdir)
if fileDir != "" {
block.Headers["gun"] = filepath.ToSlash(fileDir[1:])
}
if strings.Contains(keyID, "_") {
role := strings.Split(keyID, "_")[1]
keyID = strings.TrimSuffix(keyID, "_"+role)
block.Headers["role"] = role
}
var keyPEM bytes.Buffer
// since block came from decoding the PEM bytes in the first place, and all we're doing is adding some headers we ignore the possibility of an error while encoding the block
pem.Encode(&keyPEM, block)
f.Set(keyID, keyPEM.Bytes())
}
func (f *FilesystemStore) migrateTo0Dot4() {
rootKeysSubDir := filepath.Clean(filepath.Join(f.Location(), notary.RootKeysSubdir))
nonRootKeysSubDir := filepath.Clean(filepath.Join(f.Location(), notary.NonRootKeysSubdir))
if _, err := os.Stat(rootKeysSubDir); !os.IsNotExist(err) && f.Location() != rootKeysSubDir {
if rootKeysSubDir == "" || rootKeysSubDir == "/" {
// making sure we don't remove a user's homedir
logrus.Warn("The directory for root keys is an unsafe value, we are not going to delete the directory. Please delete it manually")
} else {
// root_keys exists, migrate things from it
listOnlyRootKeysDirStore, _ := NewFileStore(rootKeysSubDir, f.ext)
for _, file := range listOnlyRootKeysDirStore.ListFiles() {
f.moveKeyTo0Dot4Location(filepath.Join(notary.RootKeysSubdir, file))
}
// delete the old directory
os.RemoveAll(rootKeysSubDir)
}
}
if _, err := os.Stat(nonRootKeysSubDir); !os.IsNotExist(err) && f.Location() != nonRootKeysSubDir {
if nonRootKeysSubDir == "" || nonRootKeysSubDir == "/" {
// making sure we don't remove a user's homedir
logrus.Warn("The directory for non root keys is an unsafe value, we are not going to delete the directory. Please delete it manually")
} else {
// tuf_keys exists, migrate things from it
listOnlyNonRootKeysDirStore, _ := NewFileStore(nonRootKeysSubDir, f.ext)
for _, file := range listOnlyNonRootKeysDirStore.ListFiles() {
f.moveKeyTo0Dot4Location(filepath.Join(notary.NonRootKeysSubdir, file))
}
// delete the old directory
os.RemoveAll(nonRootKeysSubDir)
}
}
// if we have a trusted_certificates folder, let's delete for a complete migration since it is unused by new clients
certsSubDir := filepath.Join(f.Location(), "trusted_certificates")
if certsSubDir == "" || certsSubDir == "/" {
logrus.Warn("The directory for trusted certificate is an unsafe value, we are not going to delete the directory. Please delete it manually")
} else {
os.RemoveAll(certsSubDir)
}
}
func (f *FilesystemStore) getPath(name string) (string, error) {
fileName := fmt.Sprintf("%s%s", name, f.ext)
fullPath := filepath.Join(f.baseDir, fileName)
if !strings.HasPrefix(fullPath, f.baseDir) {
return "", ErrPathOutsideStore
}
return fullPath, nil
}
// GetSized returns the meta for the given name (a role) up to size bytes
// If size is "NoSizeLimit", this corresponds to "infinite," but we cut off at a
// predefined threshold "notary.MaxDownloadSize". If the file is larger than size
// we return ErrMaliciousServer for consistency with the HTTPStore
func (f *FilesystemStore) GetSized(name string, size int64) ([]byte, error) {
p, err := f.getPath(name)
if err != nil {
return nil, err
}
file, err := os.Open(p)
if err != nil {
if os.IsNotExist(err) {
err = ErrMetaNotFound{Resource: name}
}
return nil, err
}
defer func() {
_ = file.Close()
}()
if size == NoSizeLimit {
size = notary.MaxDownloadSize
}
stat, err := file.Stat()
if err != nil {
return nil, err
}
if stat.Size() > size {
return nil, ErrMaliciousServer{}
}
l := io.LimitReader(file, size)
return ioutil.ReadAll(l)
}
// Get returns the meta for the given name.
func (f *FilesystemStore) Get(name string) ([]byte, error) {
p, err := f.getPath(name)
if err != nil {
return nil, err
}
meta, err := ioutil.ReadFile(p)
if err != nil {
if os.IsNotExist(err) {
err = ErrMetaNotFound{Resource: name}
}
return nil, err
}
return meta, nil
}
// SetMulti sets the metadata for multiple roles in one operation
func (f *FilesystemStore) SetMulti(metas map[string][]byte) error {
for role, blob := range metas {
err := f.Set(role, blob)
if err != nil {
return err
}
}
return nil
}
// Set sets the meta for a single role
func (f *FilesystemStore) Set(name string, meta []byte) error {
fp, err := f.getPath(name)
if err != nil {
return err
}
// Ensures the parent directories of the file we are about to write exist
err = os.MkdirAll(filepath.Dir(fp), notary.PrivExecPerms)
if err != nil {
return err
}
// if something already exists, just delete it and re-write it
os.RemoveAll(fp)
// Write the file to disk
return ioutil.WriteFile(fp, meta, notary.PrivNoExecPerms)
}
// RemoveAll clears the existing filestore by removing its base directory
func (f *FilesystemStore) RemoveAll() error {
return os.RemoveAll(f.baseDir)
}
// Remove removes the metadata for a single role - if the metadata doesn't
// exist, no error is returned
func (f *FilesystemStore) Remove(name string) error {
p, err := f.getPath(name)
if err != nil {
return err
}
return os.RemoveAll(p) // RemoveAll succeeds if path doesn't exist
}
// Location returns a human readable name for the storage location
func (f FilesystemStore) Location() string {
return f.baseDir
}
// ListFiles returns a list of all the filenames that can be used with Get*
// to retrieve content from this filestore
func (f FilesystemStore) ListFiles() []string {
files := make([]string, 0)
filepath.Walk(f.baseDir, func(fp string, fi os.FileInfo, err error) error {
// If there are errors, ignore this particular file
if err != nil {
return nil
}
// Ignore if it is a directory
if fi.IsDir() {
return nil
}
// If this is a symlink, ignore it
if fi.Mode()&os.ModeSymlink == os.ModeSymlink {
return nil
}
// Only allow matches that end with our certificate extension (e.g. *.crt)
matched, _ := filepath.Match("*"+f.ext, fi.Name())
if matched {
// Find the relative path for this file relative to the base path.
fp, err = filepath.Rel(f.baseDir, fp)
if err != nil {
return err
}
trimmed := strings.TrimSuffix(fp, f.ext)
files = append(files, trimmed)
}
return nil
})
return files
}
// createDirectory receives a string of the path to a directory.
// It does not support passing files, so the caller has to remove
// the filename by doing filepath.Dir(full_path_to_file)
func createDirectory(dir string, perms os.FileMode) error {
// This prevents someone passing /path/to/dir and 'dir' not being created
// If two '//' exist, MkdirAll deals it with correctly
dir = dir + "/"
return os.MkdirAll(dir, perms)
}
// A Store that can fetch and set metadata on a remote server.
// Some API constraints:
// - Response bodies for error codes should be unmarshallable as:
// {"errors": [{..., "detail": <serialized validation error>}]}
// else validation error details, etc. will be unparsable. The errors
// should have a github.com/theupdateframework/notary/tuf/validation/SerializableError
// in the Details field.
// If writing your own server, please have a look at
// github.com/docker/distribution/registry/api/errcode
package storage
import (
"bytes"
"encoding/json"
"errors"
"fmt"
"io"
"io/ioutil"
"mime/multipart"
"net/http"
"net/url"
"path"
"github.com/sirupsen/logrus"
"github.com/theupdateframework/notary"
"github.com/theupdateframework/notary/tuf/data"
"github.com/theupdateframework/notary/tuf/validation"
)
const (
// MaxErrorResponseSize is the maximum size for an error message - 1KiB
MaxErrorResponseSize int64 = 1 << 10
// MaxKeySize is the maximum size for a stored TUF key - 256KiB
MaxKeySize = 256 << 10
)
// ErrServerUnavailable indicates an error from the server. code allows us to
// populate the http error we received
type ErrServerUnavailable struct {
code int
}
// NetworkError represents any kind of network error when attempting to make a request
type NetworkError struct {
Wrapped error
}
func (n NetworkError) Error() string {
if _, ok := n.Wrapped.(*url.Error); ok {
// QueryUnescape does the inverse transformation of QueryEscape,
// converting %AB into the byte 0xAB and '+' into ' ' (space).
// It returns an error if any % is not followed by two hexadecimal digits.
//
// If this happens, we log out the QueryUnescape error and return the
// original error to client.
res, err := url.QueryUnescape(n.Wrapped.Error())
if err != nil {
logrus.Errorf("unescape network error message failed: %s", err)
return n.Wrapped.Error()
}
return res
}
return n.Wrapped.Error()
}
func (err ErrServerUnavailable) Error() string {
if err.code == 401 {
return "you are not authorized to perform this operation: server returned 401."
}
return fmt.Sprintf("unable to reach trust server at this time: %d.", err.code)
}
// ErrMaliciousServer indicates the server returned a response that is highly suspected
// of being malicious. i.e. it attempted to send us more data than the known size of a
// particular role metadata.
type ErrMaliciousServer struct{}
func (err ErrMaliciousServer) Error() string {
return "trust server returned a bad response."
}
// ErrInvalidOperation indicates that the server returned a 400 response and
// propagate any body we received.
type ErrInvalidOperation struct {
msg string
}
func (err ErrInvalidOperation) Error() string {
if err.msg != "" {
return fmt.Sprintf("trust server rejected operation: %s", err.msg)
}
return "trust server rejected operation."
}
// HTTPStore manages pulling and pushing metadata from and to a remote
// service over HTTP. It assumes the URL structure of the remote service
// maps identically to the structure of the TUF repo:
// <baseURL>/<metaPrefix>/(root|targets|snapshot|timestamp).json
// <baseURL>/<targetsPrefix>/foo.sh
//
// If consistent snapshots are disabled, it is advised that caching is not
// enabled. Simple set a cachePath (and ensure it's writeable) to enable
// caching.
type HTTPStore struct {
baseURL url.URL
metaPrefix string
metaExtension string
keyExtension string
roundTrip http.RoundTripper
}
// NewNotaryServerStore returns a new HTTPStore against a URL which should represent a notary
// server
func NewNotaryServerStore(serverURL string, gun data.GUN, roundTrip http.RoundTripper) (RemoteStore, error) {
return NewHTTPStore(
serverURL+"/v2/"+gun.String()+"/_trust/tuf/",
"",
"json",
"key",
roundTrip,
)
}
// NewHTTPStore initializes a new store against a URL and a number of configuration options.
//
// In case of a nil `roundTrip`, a default offline store is used instead.
func NewHTTPStore(baseURL, metaPrefix, metaExtension, keyExtension string, roundTrip http.RoundTripper) (RemoteStore, error) {
base, err := url.Parse(baseURL)
if err != nil {
return nil, err
}
if !base.IsAbs() {
return nil, errors.New("HTTPStore requires an absolute baseURL")
}
if roundTrip == nil {
return &OfflineStore{}, nil
}
return &HTTPStore{
baseURL: *base,
metaPrefix: metaPrefix,
metaExtension: metaExtension,
keyExtension: keyExtension,
roundTrip: roundTrip,
}, nil
}
func tryUnmarshalError(resp *http.Response, defaultError error) error {
b := io.LimitReader(resp.Body, MaxErrorResponseSize)
bodyBytes, err := ioutil.ReadAll(b)
if err != nil {
return defaultError
}
var parsedErrors struct {
Errors []struct {
Detail validation.SerializableError `json:"detail"`
} `json:"errors"`
}
if err := json.Unmarshal(bodyBytes, &parsedErrors); err != nil {
return defaultError
}
if len(parsedErrors.Errors) != 1 {
return defaultError
}
err = parsedErrors.Errors[0].Detail.Error
if err == nil {
return defaultError
}
return err
}
func translateStatusToError(resp *http.Response, resource string) error {
switch resp.StatusCode {
case http.StatusOK:
return nil
case http.StatusNotFound:
return ErrMetaNotFound{Resource: resource}
case http.StatusBadRequest:
return tryUnmarshalError(resp, ErrInvalidOperation{})
default:
return ErrServerUnavailable{code: resp.StatusCode}
}
}
// GetSized downloads the named meta file with the given size. A short body
// is acceptable because in the case of timestamp.json, the size is a cap,
// not an exact length.
// If size is "NoSizeLimit", this corresponds to "infinite," but we cut off at a
// predefined threshold "notary.MaxDownloadSize".
func (s HTTPStore) GetSized(name string, size int64) ([]byte, error) {
url, err := s.buildMetaURL(name)
if err != nil {
return nil, err
}
req, err := http.NewRequest("GET", url.String(), nil)
if err != nil {
return nil, err
}
resp, err := s.roundTrip.RoundTrip(req)
if err != nil {
return nil, NetworkError{Wrapped: err}
}
defer resp.Body.Close()
if err := translateStatusToError(resp, name); err != nil {
logrus.Debugf("received HTTP status %d when requesting %s.", resp.StatusCode, name)
return nil, err
}
if size == NoSizeLimit {
size = notary.MaxDownloadSize
}
if resp.ContentLength > size {
return nil, ErrMaliciousServer{}
}
logrus.Debugf("%d when retrieving metadata for %s", resp.StatusCode, name)
b := io.LimitReader(resp.Body, size)
body, err := ioutil.ReadAll(b)
if err != nil {
return nil, err
}
return body, nil
}
// Set sends a single piece of metadata to the TUF server
func (s HTTPStore) Set(name string, blob []byte) error {
return s.SetMulti(map[string][]byte{name: blob})
}
// Remove always fails, because we should never be able to delete metadata
// remotely
func (s HTTPStore) Remove(name string) error {
return ErrInvalidOperation{msg: "cannot delete individual metadata files"}
}
// NewMultiPartMetaRequest builds a request with the provided metadata updates
// in multipart form
func NewMultiPartMetaRequest(url string, metas map[string][]byte) (*http.Request, error) {
body := &bytes.Buffer{}
writer := multipart.NewWriter(body)
for role, blob := range metas {
part, err := writer.CreateFormFile("files", role)
if err != nil {
return nil, err
}
_, err = io.Copy(part, bytes.NewBuffer(blob))
if err != nil {
return nil, err
}
}
err := writer.Close()
if err != nil {
return nil, err
}
req, err := http.NewRequest("POST", url, body)
if err != nil {
return nil, err
}
req.Header.Set("Content-Type", writer.FormDataContentType())
return req, nil
}
// SetMulti does a single batch upload of multiple pieces of TUF metadata.
// This should be preferred for updating a remote server as it enable the server
// to remain consistent, either accepting or rejecting the complete update.
func (s HTTPStore) SetMulti(metas map[string][]byte) error {
url, err := s.buildMetaURL("")
if err != nil {
return err
}
req, err := NewMultiPartMetaRequest(url.String(), metas)
if err != nil {
return err
}
resp, err := s.roundTrip.RoundTrip(req)
if err != nil {
return NetworkError{Wrapped: err}
}
defer resp.Body.Close()
// if this 404's something is pretty wrong
return translateStatusToError(resp, "POST metadata endpoint")
}
// RemoveAll will attempt to delete all TUF metadata for a GUN
func (s HTTPStore) RemoveAll() error {
url, err := s.buildMetaURL("")
if err != nil {
return err
}
req, err := http.NewRequest("DELETE", url.String(), nil)
if err != nil {
return err
}
resp, err := s.roundTrip.RoundTrip(req)
if err != nil {
return NetworkError{Wrapped: err}
}
defer resp.Body.Close()
return translateStatusToError(resp, "DELETE metadata for GUN endpoint")
}
func (s HTTPStore) buildMetaURL(name string) (*url.URL, error) {
var filename string
if name != "" {
filename = fmt.Sprintf("%s.%s", name, s.metaExtension)
}
uri := path.Join(s.metaPrefix, filename)
return s.buildURL(uri)
}
func (s HTTPStore) buildKeyURL(name data.RoleName) (*url.URL, error) {
filename := fmt.Sprintf("%s.%s", name.String(), s.keyExtension)
uri := path.Join(s.metaPrefix, filename)
return s.buildURL(uri)
}
func (s HTTPStore) buildURL(uri string) (*url.URL, error) {
sub, err := url.Parse(uri)
if err != nil {
return nil, err
}
return s.baseURL.ResolveReference(sub), nil
}
// GetKey retrieves a public key from the remote server
func (s HTTPStore) GetKey(role data.RoleName) ([]byte, error) {
url, err := s.buildKeyURL(role)
if err != nil {
return nil, err
}
req, err := http.NewRequest("GET", url.String(), nil)
if err != nil {
return nil, err
}
resp, err := s.roundTrip.RoundTrip(req)
if err != nil {
return nil, NetworkError{Wrapped: err}
}
defer resp.Body.Close()
if err := translateStatusToError(resp, role.String()+" key"); err != nil {
return nil, err
}
b := io.LimitReader(resp.Body, MaxKeySize)
body, err := ioutil.ReadAll(b)
if err != nil {
return nil, err
}
return body, nil
}
// RotateKey rotates a private key and returns the public component from the remote server
func (s HTTPStore) RotateKey(role data.RoleName) ([]byte, error) {
url, err := s.buildKeyURL(role)
if err != nil {
return nil, err
}
req, err := http.NewRequest("POST", url.String(), nil)
if err != nil {
return nil, err
}
resp, err := s.roundTrip.RoundTrip(req)
if err != nil {
return nil, NetworkError{Wrapped: err}
}
defer resp.Body.Close()
if err := translateStatusToError(resp, role.String()+" key"); err != nil {
return nil, err
}
b := io.LimitReader(resp.Body, MaxKeySize)
body, err := ioutil.ReadAll(b)
if err != nil {
return nil, err
}
return body, nil
}
// Location returns a human readable name for the storage location
func (s HTTPStore) Location() string {
return s.baseURL.Host
}
package storage
import (
"crypto/sha256"
"encoding/json"
"fmt"
"github.com/theupdateframework/notary"
"github.com/theupdateframework/notary/tuf/data"
"github.com/theupdateframework/notary/tuf/utils"
)
// NewMemoryStore returns a MetadataStore that operates entirely in memory.
// Very useful for testing
func NewMemoryStore(seed map[data.RoleName][]byte) *MemoryStore {
var (
consistent = make(map[string][]byte)
initial = make(map[string][]byte)
)
// add all seed meta to consistent
for name, d := range seed {
checksum := sha256.Sum256(d)
path := utils.ConsistentName(name.String(), checksum[:])
initial[name.String()] = d
consistent[path] = d
}
return &MemoryStore{
data: initial,
consistent: consistent,
}
}
// MemoryStore implements a mock RemoteStore entirely in memory.
// For testing purposes only.
type MemoryStore struct {
data map[string][]byte
consistent map[string][]byte
}
// GetSized returns up to size bytes of data references by name.
// If size is "NoSizeLimit", this corresponds to "infinite," but we cut off at a
// predefined threshold "notary.MaxDownloadSize", as we will always know the
// size for everything but a timestamp and sometimes a root,
// neither of which should be exceptionally large
func (m MemoryStore) GetSized(name string, size int64) ([]byte, error) {
d, ok := m.data[name]
if ok {
if size == NoSizeLimit {
size = notary.MaxDownloadSize
}
if int64(len(d)) < size {
return d, nil
}
return d[:size], nil
}
d, ok = m.consistent[name]
if ok {
if int64(len(d)) < size {
return d, nil
}
return d[:size], nil
}
return nil, ErrMetaNotFound{Resource: name}
}
// Get returns the data associated with name
func (m MemoryStore) Get(name string) ([]byte, error) {
if d, ok := m.data[name]; ok {
return d, nil
}
if d, ok := m.consistent[name]; ok {
return d, nil
}
return nil, ErrMetaNotFound{Resource: name}
}
// Set sets the metadata value for the given name
func (m *MemoryStore) Set(name string, meta []byte) error {
m.data[name] = meta
parsedMeta := &data.SignedMeta{}
err := json.Unmarshal(meta, parsedMeta)
if err == nil {
// no parse error means this is metadata and not a key, so store by version
version := parsedMeta.Signed.Version
versionedName := fmt.Sprintf("%d.%s", version, name)
m.data[versionedName] = meta
}
checksum := sha256.Sum256(meta)
path := utils.ConsistentName(name, checksum[:])
m.consistent[path] = meta
return nil
}
// SetMulti sets multiple pieces of metadata for multiple names
// in a single operation.
func (m *MemoryStore) SetMulti(metas map[string][]byte) error {
for role, blob := range metas {
m.Set(role, blob)
}
return nil
}
// Remove removes the metadata for a single role - if the metadata doesn't
// exist, no error is returned
func (m *MemoryStore) Remove(name string) error {
if meta, ok := m.data[name]; ok {
checksum := sha256.Sum256(meta)
path := utils.ConsistentName(name, checksum[:])
delete(m.data, name)
delete(m.consistent, path)
}
return nil
}
// RemoveAll clears the existing memory store by setting this store as new empty one
func (m *MemoryStore) RemoveAll() error {
*m = *NewMemoryStore(nil)
return nil
}
// Location provides a human readable name for the storage location
func (m MemoryStore) Location() string {
return "memory"
}
// ListFiles returns a list of all files. The names returned should be
// usable with Get directly, with no modification.
func (m *MemoryStore) ListFiles() []string {
names := make([]string, 0, len(m.data))
for n := range m.data {
names = append(names, n)
}
return names
}
package storage
import (
"github.com/theupdateframework/notary/tuf/data"
)
// ErrOffline is used to indicate we are operating offline
type ErrOffline struct{}
func (e ErrOffline) Error() string {
return "client is offline"
}
var err = ErrOffline{}
// OfflineStore is to be used as a placeholder for a nil store. It simply
// returns ErrOffline for every operation
type OfflineStore struct{}
// GetSized returns ErrOffline
func (es OfflineStore) GetSized(name string, size int64) ([]byte, error) {
return nil, err
}
// Set returns ErrOffline
func (es OfflineStore) Set(name string, blob []byte) error {
return err
}
// SetMulti returns ErrOffline
func (es OfflineStore) SetMulti(map[string][]byte) error {
return err
}
// Remove returns ErrOffline
func (es OfflineStore) Remove(name string) error {
return err
}
// GetKey returns ErrOffline
func (es OfflineStore) GetKey(role data.RoleName) ([]byte, error) {
return nil, err
}
// RotateKey returns ErrOffline
func (es OfflineStore) RotateKey(role data.RoleName) ([]byte, error) {
return nil, err
}
// RemoveAll return ErrOffline
func (es OfflineStore) RemoveAll() error {
return err
}
// Location returns a human readable name for the storage location
func (es OfflineStore) Location() string {
return "offline"
}
package trustmanager
import "fmt"
// ErrAttemptsExceeded is returned when too many attempts have been made to decrypt a key
type ErrAttemptsExceeded struct{}
// Error is returned when too many attempts have been made to decrypt a key
func (err ErrAttemptsExceeded) Error() string {
return "maximum number of passphrase attempts exceeded"
}
// ErrPasswordInvalid is returned when signing fails. It could also mean the signing
// key file was corrupted, but we have no way to distinguish.
type ErrPasswordInvalid struct{}
// Error is returned when signing fails. It could also mean the signing
// key file was corrupted, but we have no way to distinguish.
func (err ErrPasswordInvalid) Error() string {
return "password invalid, operation has failed."
}
// ErrKeyNotFound is returned when the keystore fails to retrieve a specific key.
type ErrKeyNotFound struct {
KeyID string
}
// Error is returned when the keystore fails to retrieve a specific key.
func (err ErrKeyNotFound) Error() string {
return fmt.Sprintf("signing key not found: %s", err.KeyID)
}
package trustmanager
import (
"encoding/pem"
"errors"
"fmt"
"io"
"io/ioutil"
"path/filepath"
"sort"
"strings"
"github.com/sirupsen/logrus"
"github.com/theupdateframework/notary"
tufdata "github.com/theupdateframework/notary/tuf/data"
"github.com/theupdateframework/notary/tuf/utils"
)
// Exporter is a simple interface for the two functions we need from the Storage interface
type Exporter interface {
Get(string) ([]byte, error)
ListFiles() []string
}
// Importer is a simple interface for the one function we need from the Storage interface
type Importer interface {
Set(string, []byte) error
}
// ExportKeysByGUN exports all keys filtered to a GUN
func ExportKeysByGUN(to io.Writer, s Exporter, gun string) error {
keys := s.ListFiles()
sort.Strings(keys) // ensure consistency. ListFiles has no order guarantee
for _, loc := range keys {
keyFile, err := s.Get(loc)
if err != nil {
logrus.Warn("Could not parse key file at ", loc)
continue
}
block, _ := pem.Decode(keyFile)
keyGun := block.Headers["gun"]
if keyGun == gun { // must be full GUN match
if err := ExportKeys(to, s, loc); err != nil {
return err
}
}
}
return nil
}
// ExportKeysByID exports all keys matching the given ID
func ExportKeysByID(to io.Writer, s Exporter, ids []string) error {
want := make(map[string]struct{})
for _, id := range ids {
want[id] = struct{}{}
}
keys := s.ListFiles()
for _, k := range keys {
id := filepath.Base(k)
if _, ok := want[id]; ok {
if err := ExportKeys(to, s, k); err != nil {
return err
}
}
}
return nil
}
// ExportKeys copies a key from the store to the io.Writer
func ExportKeys(to io.Writer, s Exporter, from string) error {
// get PEM block
k, err := s.Get(from)
if err != nil {
return err
}
// parse PEM blocks if there are more than one
for block, rest := pem.Decode(k); block != nil; block, rest = pem.Decode(rest) {
// add from path in a header for later import
block.Headers["path"] = from
// write serialized PEM
err = pem.Encode(to, block)
if err != nil {
return err
}
}
return nil
}
// ImportKeys expects an io.Reader containing one or more PEM blocks.
// It reads PEM blocks one at a time until pem.Decode returns a nil
// block.
// Each block is written to the subpath indicated in the "path" PEM
// header. If the file already exists, the file is truncated. Multiple
// adjacent PEMs with the same "path" header are appended together.
func ImportKeys(from io.Reader, to []Importer, fallbackRole string, fallbackGUN string, passRet notary.PassRetriever) error {
// importLogic.md contains a small flowchart I made to clear up my understand while writing the cases in this function
// it is very rough, but it may help while reading this piece of code
data, err := ioutil.ReadAll(from)
if err != nil {
return err
}
var (
writeTo string
toWrite []byte
errBlocks []string
)
for block, rest := pem.Decode(data); block != nil; block, rest = pem.Decode(rest) {
handleLegacyPath(block)
setFallbacks(block, fallbackGUN, fallbackRole)
loc, err := checkValidity(block)
if err != nil {
// already logged in checkValidity
errBlocks = append(errBlocks, err.Error())
continue
}
// the path header is not of any use once we've imported the key so strip it away
delete(block.Headers, "path")
// we are now all set for import but let's first encrypt the key
blockBytes := pem.EncodeToMemory(block)
// check if key is encrypted, note: if it is encrypted at this point, it will have had a path header
if privKey, err := utils.ParsePEMPrivateKey(blockBytes, ""); err == nil {
// Key is not encrypted- ask for a passphrase and encrypt this key
var chosenPassphrase string
for attempts := 0; ; attempts++ {
var giveup bool
chosenPassphrase, giveup, err = passRet(loc, block.Headers["role"], true, attempts)
if err == nil {
break
}
if giveup || attempts > 10 {
return errors.New("maximum number of passphrase attempts exceeded")
}
}
blockBytes, err = utils.ConvertPrivateKeyToPKCS8(privKey, tufdata.RoleName(block.Headers["role"]), tufdata.GUN(block.Headers["gun"]), chosenPassphrase)
if err != nil {
return errors.New("failed to encrypt key with given passphrase")
}
}
if loc != writeTo {
// next location is different from previous one. We've finished aggregating
// data for the previous file. If we have data, write the previous file,
// clear toWrite and set writeTo to the next path we're going to write
if toWrite != nil {
if err = importToStores(to, writeTo, toWrite); err != nil {
return err
}
}
// set up for aggregating next file's data
toWrite = nil
writeTo = loc
}
toWrite = append(toWrite, blockBytes...)
}
if toWrite != nil { // close out final iteration if there's data left
return importToStores(to, writeTo, toWrite)
}
if len(errBlocks) > 0 {
return fmt.Errorf("failed to import all keys: %s", strings.Join(errBlocks, ", "))
}
return nil
}
func handleLegacyPath(block *pem.Block) {
// if there is a legacy path then we set the gun header from this path
// this is the case when a user attempts to import a key bundle generated by an older client
if rawPath := block.Headers["path"]; rawPath != "" && rawPath != filepath.Base(rawPath) {
// this is a legacy filepath and we should try to deduce the gun name from it
pathWOFileName := filepath.Dir(rawPath)
if strings.HasPrefix(pathWOFileName, notary.NonRootKeysSubdir) {
// remove the notary keystore-specific segment of the path, and any potential leading or trailing slashes
gunName := strings.Trim(strings.TrimPrefix(pathWOFileName, notary.NonRootKeysSubdir), "/")
if gunName != "" {
block.Headers["gun"] = gunName
}
}
block.Headers["path"] = filepath.Base(rawPath)
}
}
func setFallbacks(block *pem.Block, fallbackGUN, fallbackRole string) {
if block.Headers["gun"] == "" {
if fallbackGUN != "" {
block.Headers["gun"] = fallbackGUN
}
}
if block.Headers["role"] == "" {
if fallbackRole == "" {
block.Headers["role"] = notary.DefaultImportRole
} else {
block.Headers["role"] = fallbackRole
}
}
}
// checkValidity ensures the fields in the pem headers are valid and parses out the location.
// While importing a collection of keys, errors from this function should result in only the
// current pem block being skipped.
func checkValidity(block *pem.Block) (string, error) {
// A root key or a delegations key should not have a gun
// Note that a key that is not any of the canonical roles (except root) is a delegations key and should not have a gun
switch block.Headers["role"] {
case tufdata.CanonicalSnapshotRole.String(), tufdata.CanonicalTargetsRole.String(), tufdata.CanonicalTimestampRole.String():
// check if the key is missing a gun header or has an empty gun and error out since we don't know what gun it belongs to
if block.Headers["gun"] == "" {
logrus.Warnf("failed to import key (%s) to store: Cannot have canonical role key without a gun, don't know what gun it belongs to", block.Headers["path"])
return "", errors.New("invalid key pem block")
}
default:
delete(block.Headers, "gun")
}
loc, ok := block.Headers["path"]
// only if the path isn't specified do we get into this parsing path logic
if !ok || loc == "" {
// if the path isn't specified, we will try to infer the path rel to trust dir from the role (and then gun)
// parse key for the keyID which we will save it by.
// if the key is encrypted at this point, we will generate an error and continue since we don't know the ID to save it by
decodedKey, err := utils.ParsePEMPrivateKey(pem.EncodeToMemory(block), "")
if err != nil {
logrus.Warn("failed to import key to store: Invalid key generated, key may be encrypted and does not contain path header")
return "", errors.New("invalid key pem block")
}
loc = decodedKey.ID()
}
return loc, nil
}
func importToStores(to []Importer, path string, bytes []byte) error {
var err error
for _, i := range to {
if err = i.Set(path, bytes); err != nil {
logrus.Errorf("failed to import key to store: %s", err.Error())
continue
}
break
}
return err
}
package trustmanager
import (
"bytes"
"crypto/rand"
"encoding/pem"
"errors"
"io/ioutil"
"os"
"path/filepath"
"testing"
"github.com/stretchr/testify/require"
"github.com/theupdateframework/notary"
"github.com/theupdateframework/notary/tuf/data"
"github.com/theupdateframework/notary/tuf/utils"
)
type TestImportStore struct {
data map[string][]byte
}
func NewTestImportStore() *TestImportStore {
return &TestImportStore{
data: make(map[string][]byte),
}
}
func (s *TestImportStore) Set(name string, data []byte) error {
s.data[name] = data
return nil
}
type TestExportStore struct {
data map[string][]byte
}
func NewTestExportStore() *TestExportStore {
return &TestExportStore{
data: make(map[string][]byte),
}
}
func (s *TestExportStore) Get(name string) ([]byte, error) {
if data, ok := s.data[name]; ok {
return data, nil
}
return nil, errors.New("Not Found")
}
func (s *TestExportStore) ListFiles() []string {
files := make([]string, 0, len(s.data))
for k := range s.data {
files = append(files, k)
}
return files
}
func TestExportKeys(t *testing.T) {
s := NewTestExportStore()
b := &pem.Block{}
b.Bytes = make([]byte, 1000)
rand.Read(b.Bytes)
c := &pem.Block{}
c.Bytes = make([]byte, 1000)
rand.Read(c.Bytes)
bBytes := pem.EncodeToMemory(b)
cBytes := pem.EncodeToMemory(c)
s.data["ankh"] = bBytes
s.data["morpork"] = cBytes
buf := bytes.NewBuffer(nil)
err := ExportKeys(buf, s, "ankh")
require.NoError(t, err)
err = ExportKeys(buf, s, "morpork")
require.NoError(t, err)
out, err := ioutil.ReadAll(buf)
require.NoError(t, err)
bFinal, rest := pem.Decode(out)
require.Equal(t, b.Bytes, bFinal.Bytes)
require.Equal(t, "ankh", bFinal.Headers["path"])
cFinal, rest := pem.Decode(rest)
require.Equal(t, c.Bytes, cFinal.Bytes)
require.Equal(t, "morpork", cFinal.Headers["path"])
require.Len(t, rest, 0)
}
func TestExportKeysByGUN(t *testing.T) {
s := NewTestExportStore()
keyHeaders := make(map[string]string)
keyHeaders["gun"] = "ankh"
keyHeaders["role"] = "snapshot"
b := &pem.Block{
Headers: keyHeaders,
}
b.Bytes = make([]byte, 1000)
rand.Read(b.Bytes)
b2 := &pem.Block{
Headers: keyHeaders,
}
b2.Bytes = make([]byte, 1000)
rand.Read(b2.Bytes)
otherHeaders := make(map[string]string)
otherHeaders["gun"] = "morpork"
otherHeaders["role"] = "snapshot"
c := &pem.Block{
Headers: otherHeaders,
}
c.Bytes = make([]byte, 1000)
rand.Read(c.Bytes)
bBytes := pem.EncodeToMemory(b)
b2Bytes := pem.EncodeToMemory(b2)
cBytes := pem.EncodeToMemory(c)
s.data["one"] = bBytes
s.data["two"] = b2Bytes
s.data["three"] = cBytes
buf := bytes.NewBuffer(nil)
err := ExportKeysByGUN(buf, s, "ankh")
require.NoError(t, err)
out, err := ioutil.ReadAll(buf)
require.NoError(t, err)
bFinal, rest := pem.Decode(out)
require.Equal(t, b.Bytes, bFinal.Bytes)
require.Equal(t, "one", bFinal.Headers["path"])
b2Final, rest := pem.Decode(rest)
require.Equal(t, b2.Bytes, b2Final.Bytes)
require.Equal(t, "two", b2Final.Headers["path"])
require.Len(t, rest, 0)
}
func TestExportKeysByID(t *testing.T) {
s := NewTestExportStore()
b := &pem.Block{}
b.Bytes = make([]byte, 1000)
rand.Read(b.Bytes)
c := &pem.Block{}
c.Bytes = make([]byte, 1000)
rand.Read(c.Bytes)
bBytes := pem.EncodeToMemory(b)
cBytes := pem.EncodeToMemory(c)
s.data["ankh"] = bBytes
s.data["morpork/identifier"] = cBytes
buf := bytes.NewBuffer(nil)
err := ExportKeysByID(buf, s, []string{"identifier"})
require.NoError(t, err)
out, err := ioutil.ReadAll(buf)
require.NoError(t, err)
cFinal, rest := pem.Decode(out)
require.Equal(t, c.Bytes, cFinal.Bytes)
require.Equal(t, "morpork/identifier", cFinal.Headers["path"])
require.Len(t, rest, 0)
}
func TestExport2InOneFile(t *testing.T) {
s := NewTestExportStore()
b := &pem.Block{}
b.Bytes = make([]byte, 1000)
rand.Read(b.Bytes)
b2 := &pem.Block{}
b2.Bytes = make([]byte, 1000)
rand.Read(b2.Bytes)
c := &pem.Block{}
c.Bytes = make([]byte, 1000)
rand.Read(c.Bytes)
bBytes := pem.EncodeToMemory(b)
b2Bytes := pem.EncodeToMemory(b2)
bBytes = append(bBytes, b2Bytes...)
cBytes := pem.EncodeToMemory(c)
s.data["ankh"] = bBytes
s.data["morpork"] = cBytes
buf := bytes.NewBuffer(nil)
err := ExportKeys(buf, s, "ankh")
require.NoError(t, err)
err = ExportKeys(buf, s, "morpork")
require.NoError(t, err)
out, err := ioutil.ReadAll(buf)
require.NoError(t, err)
bFinal, rest := pem.Decode(out)
require.Equal(t, b.Bytes, bFinal.Bytes)
require.Equal(t, "ankh", bFinal.Headers["path"])
b2Final, rest := pem.Decode(rest)
require.Equal(t, b2.Bytes, b2Final.Bytes)
require.Equal(t, "ankh", b2Final.Headers["path"])
cFinal, rest := pem.Decode(rest)
require.Equal(t, c.Bytes, cFinal.Bytes)
require.Equal(t, "morpork", cFinal.Headers["path"])
require.Len(t, rest, 0)
}
func TestImportKeys(t *testing.T) {
s := NewTestImportStore()
from, _ := os.Open("../fixtures/secure.example.com.key")
b := &pem.Block{
Headers: make(map[string]string),
}
b.Bytes, _ = ioutil.ReadAll(from)
rand.Read(b.Bytes)
b.Headers["path"] = "ankh"
c := &pem.Block{
Headers: make(map[string]string),
}
c.Bytes, _ = ioutil.ReadAll(from)
rand.Read(c.Bytes)
c.Headers["path"] = "morpork"
c.Headers["role"] = data.CanonicalSnapshotRole.String()
c.Headers["gun"] = "somegun"
bBytes := pem.EncodeToMemory(b)
cBytes := pem.EncodeToMemory(c)
byt := append(bBytes, cBytes...)
in := bytes.NewBuffer(byt)
err := ImportKeys(in, []Importer{s}, "", "", passphraseRetriever)
require.NoError(t, err)
bFinal, bRest := pem.Decode(s.data["ankh"])
require.Equal(t, b.Bytes, bFinal.Bytes)
_, ok := bFinal.Headers["path"]
require.False(t, ok, "expected no path header, should have been removed at import")
require.Equal(t, notary.DefaultImportRole, bFinal.Headers["role"]) // if no role is specified we assume it is a delegation key
_, ok = bFinal.Headers["gun"]
require.False(t, ok, "expected no gun header, should have been removed at import")
require.Len(t, bRest, 0)
cFinal, cRest := pem.Decode(s.data["morpork"])
require.Equal(t, c.Bytes, cFinal.Bytes)
_, ok = cFinal.Headers["path"]
require.False(t, ok, "expected no path header, should have been removed at import")
require.EqualValues(t, data.CanonicalSnapshotRole, cFinal.Headers["role"])
require.Equal(t, "somegun", cFinal.Headers["gun"])
require.Len(t, cRest, 0)
}
func TestImportNoPath(t *testing.T) {
s := NewTestImportStore()
from, _ := os.Open("../fixtures/secure.example.com.key")
defer from.Close()
fromBytes, _ := ioutil.ReadAll(from)
in := bytes.NewBuffer(fromBytes)
err := ImportKeys(in, []Importer{s}, data.CanonicalRootRole.String(), "", passphraseRetriever)
require.NoError(t, err)
for key := range s.data {
// no path but role included should work
require.Equal(t, "12ba0e0a8e05e177bc2c3489bdb6d28836879469f078e68a4812fc8a2d521497", key)
}
s = NewTestImportStore()
err = ImportKeys(in, []Importer{s}, "", "", passphraseRetriever)
require.NoError(t, err)
require.Len(t, s.data, 0) // no path and no role should not work
}
func TestNonRootPathInference(t *testing.T) {
s := NewTestImportStore()
from, _ := os.Open("../fixtures/secure.example.com.key")
defer from.Close()
fromBytes, _ := ioutil.ReadAll(from)
in := bytes.NewBuffer(fromBytes)
err := ImportKeys(in, []Importer{s}, data.CanonicalSnapshotRole.String(), "somegun", passphraseRetriever)
require.NoError(t, err)
for key := range s.data {
// no path but role included should work and 12ba0e0a8e05e177bc2c3489bdb6d28836879469f078e68a4812fc8a2d521497 is the key ID of the fixture
require.Equal(t, "12ba0e0a8e05e177bc2c3489bdb6d28836879469f078e68a4812fc8a2d521497", key)
}
}
func TestBlockHeaderPrecedenceRoleAndGun(t *testing.T) {
s := NewTestImportStore()
from, _ := os.Open("../fixtures/secure.example.com.key")
defer from.Close()
fromBytes, _ := ioutil.ReadAll(from)
b, _ := pem.Decode(fromBytes)
b.Headers["role"] = data.CanonicalSnapshotRole.String()
b.Headers["gun"] = "anothergun"
bBytes := pem.EncodeToMemory(b)
in := bytes.NewBuffer(bBytes)
err := ImportKeys(in, []Importer{s}, "somerole", "somegun", passphraseRetriever)
require.NoError(t, err)
require.Len(t, s.data, 1)
for key := range s.data {
// block header role= root should take precedence over command line flag
require.Equal(t, "12ba0e0a8e05e177bc2c3489bdb6d28836879469f078e68a4812fc8a2d521497", key)
final, rest := pem.Decode(s.data[key])
require.Len(t, rest, 0)
require.Equal(t, final.Headers["role"], "snapshot")
require.Equal(t, final.Headers["gun"], "anothergun")
}
}
func TestBlockHeaderPrecedenceGunFromPath(t *testing.T) {
// this is a proof of concept that if we have legacy fixtures with nested paths, we infer the gun from them correctly
s := NewTestImportStore()
from, _ := os.Open("../fixtures/secure.example.com.key")
defer from.Close()
fromBytes, _ := ioutil.ReadAll(from)
b, _ := pem.Decode(fromBytes)
b.Headers["role"] = data.CanonicalSnapshotRole.String()
b.Headers["path"] = filepath.Join(notary.NonRootKeysSubdir, "anothergun", "12ba0e0a8e05e177bc2c3489bdb6d28836879469f078e68a4812fc8a2d521497")
bBytes := pem.EncodeToMemory(b)
in := bytes.NewBuffer(bBytes)
err := ImportKeys(in, []Importer{s}, "somerole", "somegun", passphraseRetriever)
require.NoError(t, err)
require.Len(t, s.data, 1)
for key := range s.data {
// block header role= root should take precedence over command line flag
require.Equal(t, "12ba0e0a8e05e177bc2c3489bdb6d28836879469f078e68a4812fc8a2d521497", key)
final, rest := pem.Decode(s.data[key])
require.Len(t, rest, 0)
require.Equal(t, "snapshot", final.Headers["role"])
require.Equal(t, "anothergun", final.Headers["gun"])
}
}
func TestImportKeys2InOneFile(t *testing.T) {
s := NewTestImportStore()
b := &pem.Block{
Headers: make(map[string]string),
}
b.Bytes = make([]byte, 1000)
rand.Read(b.Bytes)
b.Headers["path"] = "ankh"
b2 := &pem.Block{
Headers: make(map[string]string),
}
b2.Bytes = make([]byte, 1000)
rand.Read(b2.Bytes)
b2.Headers["path"] = "ankh"
c := &pem.Block{
Headers: make(map[string]string),
}
c.Bytes = make([]byte, 1000)
rand.Read(c.Bytes)
c.Headers["path"] = "morpork"
bBytes := pem.EncodeToMemory(b)
b2Bytes := pem.EncodeToMemory(b2)
bBytes = append(bBytes, b2Bytes...)
cBytes := pem.EncodeToMemory(c)
byt := append(bBytes, cBytes...)
in := bytes.NewBuffer(byt)
err := ImportKeys(in, []Importer{s}, "", "", passphraseRetriever)
require.NoError(t, err)
bFinal, bRest := pem.Decode(s.data["ankh"])
require.Equal(t, b.Bytes, bFinal.Bytes)
_, ok := bFinal.Headers["path"]
require.False(t, ok, "expected no path header, should have been removed at import")
role := bFinal.Headers["role"]
require.Equal(t, notary.DefaultImportRole, role)
b2Final, b2Rest := pem.Decode(bRest)
require.Equal(t, b2.Bytes, b2Final.Bytes)
_, ok = b2Final.Headers["path"]
require.False(t, ok, "expected no path header, should have been removed at import")
require.Len(t, b2Rest, 0)
cFinal, cRest := pem.Decode(s.data["morpork"])
require.Equal(t, c.Bytes, cFinal.Bytes)
_, ok = cFinal.Headers["path"]
require.False(t, ok, "expected no path header, should have been removed at import")
require.Len(t, cRest, 0)
}
func TestImportKeys2InOneFileNoPath(t *testing.T) {
s := NewTestImportStore()
from, _ := os.Open("../fixtures/secure.example.com.key")
defer from.Close()
fromBytes, _ := ioutil.ReadAll(from)
b, _ := pem.Decode(fromBytes)
b.Headers["gun"] = "testgun"
b.Headers["role"] = data.CanonicalSnapshotRole.String()
bBytes := pem.EncodeToMemory(b)
b2, _ := pem.Decode(fromBytes)
b2.Headers["gun"] = "testgun"
b2.Headers["role"] = data.CanonicalSnapshotRole.String()
b2Bytes := pem.EncodeToMemory(b2)
c := &pem.Block{
Headers: make(map[string]string),
}
c.Bytes = make([]byte, 1000)
rand.Read(c.Bytes)
c.Headers["path"] = "morpork"
bBytes = append(bBytes, b2Bytes...)
cBytes := pem.EncodeToMemory(c)
byt := append(bBytes, cBytes...)
in := bytes.NewBuffer(byt)
err := ImportKeys(in, []Importer{s}, "", "", passphraseRetriever)
require.NoError(t, err)
bFinal, bRest := pem.Decode(s.data["12ba0e0a8e05e177bc2c3489bdb6d28836879469f078e68a4812fc8a2d521497"])
require.Equal(t, b.Headers["gun"], bFinal.Headers["gun"])
require.Equal(t, b.Headers["role"], bFinal.Headers["role"])
b2Final, b2Rest := pem.Decode(bRest)
require.Equal(t, b2.Headers["gun"], b2Final.Headers["gun"])
require.Equal(t, b2.Headers["role"], b2Final.Headers["role"])
require.Len(t, b2Rest, 0)
cFinal, cRest := pem.Decode(s.data["morpork"])
require.Equal(t, c.Bytes, cFinal.Bytes)
_, ok := cFinal.Headers["path"]
require.False(t, ok, "expected no path header, should have been removed at import")
require.Len(t, cRest, 0)
}
// no path and encrypted key import should fail
func TestEncryptedKeyImportFail(t *testing.T) {
s := NewTestImportStore()
privKey, err := utils.GenerateECDSAKey(rand.Reader)
require.NoError(t, err)
pemBytes, err := utils.ConvertPrivateKeyToPKCS8(privKey, data.CanonicalRootRole, "", cannedPassphrase)
require.NoError(t, err)
in := bytes.NewBuffer(pemBytes)
_ = ImportKeys(in, []Importer{s}, "", "", passphraseRetriever)
require.Len(t, s.data, 0)
}
// path and encrypted key should succeed, tests gun inference from path as well
func TestEncryptedKeyImportSuccess(t *testing.T) {
s := NewTestImportStore()
privKey, err := utils.GenerateECDSAKey(rand.Reader)
originalKey := privKey.Private()
require.NoError(t, err)
pemBytes, err := utils.ConvertPrivateKeyToPKCS8(privKey, data.CanonicalSnapshotRole, "somegun", cannedPassphrase)
require.NoError(t, err)
b, _ := pem.Decode(pemBytes)
b.Headers["path"] = privKey.ID()
pemBytes = pem.EncodeToMemory(b)
in := bytes.NewBuffer(pemBytes)
_ = ImportKeys(in, []Importer{s}, "", "", passphraseRetriever)
require.Len(t, s.data, 1)
keyBytes := s.data[privKey.ID()]
bFinal, bRest := pem.Decode(keyBytes)
require.Equal(t, "somegun", bFinal.Headers["gun"])
require.Len(t, bRest, 0)
// we should fail to parse it without the passphrase
privKey, err = utils.ParsePEMPrivateKey(keyBytes, "")
require.Equal(t, err, errors.New("could not decrypt private key"))
require.Nil(t, privKey)
// we should succeed to parse it with the passphrase
privKey, err = utils.ParsePEMPrivateKey(keyBytes, cannedPassphrase)
require.NoError(t, err)
require.Equal(t, originalKey, privKey.Private())
}
func TestEncryption(t *testing.T) {
s := NewTestImportStore()
privKey, err := utils.GenerateECDSAKey(rand.Reader)
originalKey := privKey.Private()
require.NoError(t, err)
pemBytes, err := utils.ConvertPrivateKeyToPKCS8(privKey, "", "", "")
require.NoError(t, err)
in := bytes.NewBuffer(pemBytes)
_ = ImportKeys(in, []Importer{s}, "", "", passphraseRetriever)
require.Len(t, s.data, 1)
shouldBeEnc, ok := s.data[privKey.ID()]
// we should have got a key imported to this location
require.True(t, ok)
// we should fail to parse it without the passphrase
privKey, err = utils.ParsePEMPrivateKey(shouldBeEnc, "")
require.Equal(t, err, errors.New("could not decrypt private key"))
require.Nil(t, privKey)
// we should succeed to parse it with the passphrase
privKey, err = utils.ParsePEMPrivateKey(shouldBeEnc, cannedPassphrase)
require.NoError(t, err)
require.Equal(t, originalKey, privKey.Private())
}
package trustmanager
import (
"fmt"
"path/filepath"
"strings"
"sync"
"github.com/sirupsen/logrus"
"github.com/theupdateframework/notary"
store "github.com/theupdateframework/notary/storage"
"github.com/theupdateframework/notary/tuf/data"
"github.com/theupdateframework/notary/tuf/utils"
)
type keyInfoMap map[string]KeyInfo
type cachedKey struct {
role data.RoleName
key data.PrivateKey
}
// GenericKeyStore is a wrapper for Storage instances that provides
// translation between the []byte form and Public/PrivateKey objects
type GenericKeyStore struct {
store Storage
sync.Mutex
notary.PassRetriever
cachedKeys map[string]*cachedKey
keyInfoMap
}
// NewKeyFileStore returns a new KeyFileStore creating a private directory to
// hold the keys.
func NewKeyFileStore(baseDir string, p notary.PassRetriever) (*GenericKeyStore, error) {
fileStore, err := store.NewPrivateKeyFileStorage(baseDir, notary.KeyExtension)
if err != nil {
return nil, err
}
return NewGenericKeyStore(fileStore, p), nil
}
// NewKeyMemoryStore returns a new KeyMemoryStore which holds keys in memory
func NewKeyMemoryStore(p notary.PassRetriever) *GenericKeyStore {
memStore := store.NewMemoryStore(nil)
return NewGenericKeyStore(memStore, p)
}
// NewGenericKeyStore creates a GenericKeyStore wrapping the provided
// Storage instance, using the PassRetriever to enc/decrypt keys
func NewGenericKeyStore(s Storage, p notary.PassRetriever) *GenericKeyStore {
ks := GenericKeyStore{
store: s,
PassRetriever: p,
cachedKeys: make(map[string]*cachedKey),
keyInfoMap: make(keyInfoMap),
}
ks.loadKeyInfo()
return &ks
}
func generateKeyInfoMap(s Storage) map[string]KeyInfo {
keyInfoMap := make(map[string]KeyInfo)
for _, keyPath := range s.ListFiles() {
d, err := s.Get(keyPath)
if err != nil {
logrus.Error(err)
continue
}
keyID, keyInfo, err := KeyInfoFromPEM(d, keyPath)
if err != nil {
logrus.Error(err)
continue
}
keyInfoMap[keyID] = keyInfo
}
return keyInfoMap
}
func (s *GenericKeyStore) loadKeyInfo() {
s.keyInfoMap = generateKeyInfoMap(s.store)
}
// GetKeyInfo returns the corresponding gun and role key info for a keyID
func (s *GenericKeyStore) GetKeyInfo(keyID string) (KeyInfo, error) {
if info, ok := s.keyInfoMap[keyID]; ok {
return info, nil
}
return KeyInfo{}, fmt.Errorf("could not find info for keyID %s", keyID)
}
// AddKey stores the contents of a PEM-encoded private key as a PEM block
func (s *GenericKeyStore) AddKey(keyInfo KeyInfo, privKey data.PrivateKey) error {
var (
chosenPassphrase string
giveup bool
err error
pemPrivKey []byte
)
s.Lock()
defer s.Unlock()
if keyInfo.Role == data.CanonicalRootRole || data.IsDelegation(keyInfo.Role) || !data.ValidRole(keyInfo.Role) {
keyInfo.Gun = ""
}
keyID := privKey.ID()
for attempts := 0; ; attempts++ {
chosenPassphrase, giveup, err = s.PassRetriever(keyID, keyInfo.Role.String(), true, attempts)
if err == nil {
break
}
if giveup || attempts > 10 {
return ErrAttemptsExceeded{}
}
}
pemPrivKey, err = utils.ConvertPrivateKeyToPKCS8(privKey, keyInfo.Role, keyInfo.Gun, chosenPassphrase)
if err != nil {
return err
}
s.cachedKeys[keyID] = &cachedKey{role: keyInfo.Role, key: privKey}
err = s.store.Set(keyID, pemPrivKey)
if err != nil {
return err
}
s.keyInfoMap[privKey.ID()] = keyInfo
return nil
}
// GetKey returns the PrivateKey given a KeyID
func (s *GenericKeyStore) GetKey(keyID string) (data.PrivateKey, data.RoleName, error) {
s.Lock()
defer s.Unlock()
cachedKeyEntry, ok := s.cachedKeys[keyID]
if ok {
return cachedKeyEntry.key, cachedKeyEntry.role, nil
}
role, err := getKeyRole(s.store, keyID)
if err != nil {
return nil, "", err
}
keyBytes, err := s.store.Get(keyID)
if err != nil {
return nil, "", err
}
// See if the key is encrypted. If its encrypted we'll fail to parse the private key
privKey, err := utils.ParsePEMPrivateKey(keyBytes, "")
if err != nil {
privKey, _, err = GetPasswdDecryptBytes(s.PassRetriever, keyBytes, keyID, string(role))
if err != nil {
return nil, "", err
}
}
s.cachedKeys[keyID] = &cachedKey{role: role, key: privKey}
return privKey, role, nil
}
// ListKeys returns a list of unique PublicKeys present on the KeyFileStore, by returning a copy of the keyInfoMap
func (s *GenericKeyStore) ListKeys() map[string]KeyInfo {
return copyKeyInfoMap(s.keyInfoMap)
}
// RemoveKey removes the key from the keyfilestore
func (s *GenericKeyStore) RemoveKey(keyID string) error {
s.Lock()
defer s.Unlock()
delete(s.cachedKeys, keyID)
err := s.store.Remove(keyID)
if err != nil {
return err
}
delete(s.keyInfoMap, keyID)
return nil
}
// Name returns a user friendly name for the location this store
// keeps its data
func (s *GenericKeyStore) Name() string {
return s.store.Location()
}
// copyKeyInfoMap returns a deep copy of the passed-in keyInfoMap
func copyKeyInfoMap(keyInfoMap map[string]KeyInfo) map[string]KeyInfo {
copyMap := make(map[string]KeyInfo)
for keyID, keyInfo := range keyInfoMap {
copyMap[keyID] = KeyInfo{Role: keyInfo.Role, Gun: keyInfo.Gun}
}
return copyMap
}
// KeyInfoFromPEM attempts to get a keyID and KeyInfo from the filename and PEM bytes of a key
func KeyInfoFromPEM(pemBytes []byte, filename string) (string, KeyInfo, error) {
keyID := filepath.Base(filename)
role, gun, err := utils.ExtractPrivateKeyAttributes(pemBytes)
if err != nil {
return "", KeyInfo{}, err
}
return keyID, KeyInfo{Gun: gun, Role: role}, nil
}
// getKeyRole finds the role for the given keyID. It attempts to look
// both in the newer format PEM headers, and also in the legacy filename
// format. It returns: the role, and an error
func getKeyRole(s Storage, keyID string) (data.RoleName, error) {
name := strings.TrimSpace(strings.TrimSuffix(filepath.Base(keyID), filepath.Ext(keyID)))
for _, file := range s.ListFiles() {
filename := filepath.Base(file)
if strings.HasPrefix(filename, name) {
d, err := s.Get(file)
if err != nil {
return "", err
}
role, _, err := utils.ExtractPrivateKeyAttributes(d)
if err != nil {
return "", err
}
return role, nil
}
}
return "", ErrKeyNotFound{KeyID: keyID}
}
// GetPasswdDecryptBytes gets the password to decrypt the given pem bytes.
// Returns the password and private key
func GetPasswdDecryptBytes(passphraseRetriever notary.PassRetriever, pemBytes []byte, name, alias string) (data.PrivateKey, string, error) {
var (
passwd string
privKey data.PrivateKey
)
for attempts := 0; ; attempts++ {
var (
giveup bool
err error
)
if attempts > 10 {
return nil, "", ErrAttemptsExceeded{}
}
passwd, giveup, err = passphraseRetriever(name, alias, false, attempts)
// Check if the passphrase retriever got an error or if it is telling us to give up
if giveup || err != nil {
return nil, "", ErrPasswordInvalid{}
}
// Try to convert PEM encoded bytes back to a PrivateKey using the passphrase
privKey, err = utils.ParsePEMPrivateKey(pemBytes, passwd)
if err == nil {
// We managed to parse the PrivateKey. We've succeeded!
break
}
}
return privKey, passwd, nil
}
package trustmanager
import (
"crypto/rand"
"encoding/pem"
"errors"
"io/ioutil"
"os"
"path/filepath"
"testing"
"github.com/stretchr/testify/require"
"github.com/theupdateframework/notary"
"github.com/theupdateframework/notary/tuf/data"
"github.com/theupdateframework/notary/tuf/utils"
)
const cannedPassphrase = "passphrase"
var passphraseRetriever = func(keyID string, alias string, createNew bool, numAttempts int) (string, bool, error) {
if numAttempts > 5 {
giveup := true
return "", giveup, errors.New("passPhraseRetriever failed after too many requests")
}
return cannedPassphrase, false, nil
}
func TestAddKey(t *testing.T) {
testAddKeyWithRole(t, data.CanonicalRootRole)
testAddKeyWithRole(t, data.CanonicalTargetsRole)
testAddKeyWithRole(t, data.CanonicalSnapshotRole)
testAddKeyWithRole(t, "targets/a/b/c")
testAddKeyWithRole(t, "invalidRole")
}
func testAddKeyWithRole(t *testing.T, role data.RoleName) {
var gun data.GUN = "docker.com/notary"
testExt := "key"
// Temporary directory where test files will be created
tempBaseDir, err := ioutil.TempDir("", "notary-test-")
require.NoError(t, err, "failed to create a temporary directory")
defer os.RemoveAll(tempBaseDir)
// Create our store
store, err := NewKeyFileStore(tempBaseDir, passphraseRetriever)
require.NoError(t, err, "failed to create new key filestore")
privKey, err := utils.GenerateECDSAKey(rand.Reader)
require.NoError(t, err, "could not generate private key")
// Since we're generating this manually we need to add the extension '.'
expectedFilePath := filepath.Join(tempBaseDir, notary.PrivDir, privKey.ID()+"."+testExt)
// Call the AddKey function
err = store.AddKey(KeyInfo{Role: role, Gun: gun}, privKey)
require.NoError(t, err, "failed to add key to store")
// Check to see if file exists
b, err := ioutil.ReadFile(expectedFilePath)
require.NoError(t, err, "expected file not found")
require.Contains(t, string(b), "-----BEGIN ENCRYPTED PRIVATE KEY-----")
// Check that we have the role and gun info for this key's ID
keyInfo, ok := store.keyInfoMap[privKey.ID()]
require.True(t, ok)
require.Equal(t, role, keyInfo.Role)
if role == data.CanonicalRootRole || data.IsDelegation(role) || !data.ValidRole(role) {
require.Empty(t, keyInfo.Gun.String())
} else {
require.EqualValues(t, gun, keyInfo.Gun.String())
}
}
func TestKeyStoreInternalState(t *testing.T) {
// Temporary directory where test files will be created
tempBaseDir, err := ioutil.TempDir("", "notary-test-")
require.NoError(t, err, "failed to create a temporary directory")
defer os.RemoveAll(tempBaseDir)
var gun data.GUN = "docker.com/notary"
// Mimic a notary repo setup, and test that bringing up a keyfilestore creates the correct keyInfoMap
roles := []data.RoleName{data.CanonicalRootRole, data.CanonicalTargetsRole, data.CanonicalSnapshotRole, data.RoleName("targets/delegation")}
// Keep track of the key IDs for each role, so we can validate later against the keystore state
roleToID := make(map[string]string)
for _, role := range roles {
// generate a key for the role
privKey, err := utils.GenerateECDSAKey(rand.Reader)
require.NoError(t, err, "could not generate private key")
var privKeyPEM []byte
// generate the correct PEM role header
if role == data.CanonicalRootRole || data.IsDelegation(role) || !data.ValidRole(role) {
privKeyPEM, err = utils.ConvertPrivateKeyToPKCS8(privKey, role, "", "")
} else {
privKeyPEM, err = utils.ConvertPrivateKeyToPKCS8(privKey, role, gun, "")
}
require.NoError(t, err, "could not generate PEM")
// write the key file to the correct location
keyPath := filepath.Join(tempBaseDir, notary.PrivDir)
keyPath = filepath.Join(keyPath, privKey.ID())
require.NoError(t, os.MkdirAll(filepath.Dir(keyPath), 0755))
require.NoError(t, ioutil.WriteFile(keyPath+".key", privKeyPEM, 0755))
roleToID[role.String()] = privKey.ID()
}
store, err := NewKeyFileStore(tempBaseDir, passphraseRetriever)
require.NoError(t, err)
require.Len(t, store.keyInfoMap, 4)
for _, role := range roles {
keyID := roleToID[role.String()]
// make sure this keyID is the right length
require.Len(t, keyID, notary.SHA256HexSize)
require.Equal(t, role, store.keyInfoMap[keyID].Role)
// targets and snapshot keys should have a gun set, root and delegation keys should not
if role == data.CanonicalTargetsRole || role == data.CanonicalSnapshotRole {
require.EqualValues(t, gun, store.keyInfoMap[keyID].Gun.String())
} else {
require.Empty(t, store.keyInfoMap[keyID].Gun.String())
}
}
// Try removing the targets key only by ID (no gun provided)
require.NoError(t, store.RemoveKey(roleToID[data.CanonicalTargetsRole.String()]))
// The key file itself should have been removed
_, err = os.Stat(filepath.Join(tempBaseDir, notary.PrivDir, roleToID[data.CanonicalTargetsRole.String()]+".key"))
require.Error(t, err)
// The keyInfoMap should have also updated by deleting the key
_, ok := store.keyInfoMap[roleToID[data.CanonicalTargetsRole.String()]]
require.False(t, ok)
// Try removing the delegation key only by ID (no gun provided)
require.NoError(t, store.RemoveKey(roleToID["targets/delegation"]))
// The key file itself should have been removed
_, err = os.Stat(filepath.Join(tempBaseDir, notary.PrivDir, roleToID["targets/delegation"]+".key"))
require.Error(t, err)
// The keyInfoMap should have also updated
_, ok = store.keyInfoMap[roleToID["targets/delegation"]]
require.False(t, ok)
// Try removing the root key only by ID (no gun provided)
require.NoError(t, store.RemoveKey(roleToID[data.CanonicalRootRole.String()]))
// The key file itself should have been removed
_, err = os.Stat(filepath.Join(tempBaseDir, notary.PrivDir, roleToID[data.CanonicalRootRole.String()]+".key"))
require.Error(t, err)
// The keyInfoMap should have also updated_
_, ok = store.keyInfoMap[roleToID[data.CanonicalRootRole.String()]]
require.False(t, ok)
// Generate a new targets key and add it with its gun, check that the map gets updated back
privKey, err := utils.GenerateECDSAKey(rand.Reader)
require.NoError(t, err, "could not generate private key")
require.NoError(t, store.AddKey(KeyInfo{Role: data.CanonicalTargetsRole, Gun: gun}, privKey))
require.Equal(t, gun, store.keyInfoMap[privKey.ID()].Gun)
require.Equal(t, data.CanonicalTargetsRole, store.keyInfoMap[privKey.ID()].Role)
}
func TestGet(t *testing.T) {
nonRootRolesToTest := []data.RoleName{
data.CanonicalTargetsRole,
data.CanonicalSnapshotRole,
"targets/a/b/c",
"invalidRole",
}
var gun data.GUN = "docker.io/notary"
testGetKeyWithRole(t, "", data.CanonicalRootRole)
for _, role := range nonRootRolesToTest {
testGetKeyWithRole(t, data.GUN(""), role)
testGetKeyWithRole(t, gun, role)
}
}
func testGetKeyWithRole(t *testing.T, gun data.GUN, role data.RoleName) {
testPEM := []byte(`-----BEGIN PRIVATE KEY-----
MIIEvgIBADANBgkqhkiG9w0BAQEFAASCBKgwggSkAgEAAoIBAQC2cL8WamG24ihl
JSVG8ZVel05lPqYD0S8ol1L+zzwsHkim2DS+a5BLX5+QJtCfZrR+Pzo+4pCrjU+N
R/71aYNm/M95h/JSJxdEoTgYCCHNJD8IYpTc6lXyy49lSQh7svLpZ2dQwHoGB5VC
tpsh8xvLLbXfk/G7ihEeZqG7/Tnoe+uotkiODOTjxiTGvQQjoAc4hQgzGH4sjC7U
8E8zB0j1BQWM/fhRX/ww3V/SRB2T1u0aAurF1BnUdDazZMBxWQ7DxmY3FNbeNXqf
KKeQMN1Rodu8hJw0gxL1hbOWmcYksmGZfPDzYXiHBdscCFr/wimOl9BO/o2xbV5+
phbph9cFAgMBAAECggEBAIAcA9L1uM/3V25O+zIqCj11+jLWHzWm+nqCaGFNnG9O
hK3EPKVKWvTSnPVYjD6inDPaqkfmSLhubmJDICGsif0ToY0xjVNq58flfcJCU5n9
zdVRhD7svpXTo0n4UuCp9DE5zy7BOe5p/MHwAFeCow21d3UcKi8K8KJsZz3ev38j
9Y8ASd24NcyZfE4mnjDjA/MuzlPoQYMwAh4f3mrEKu5v9dCT+m70lJTzSNAc4gD0
93mMkGRsUKjvZyCu/IlXncBczaSVovX5IGdiGPa7Qk+CP9r+PGQUasb+e5o7VMzh
xyjIrCV1u48vRyJsc7xrZ+PUkVk74u9mQ3wxQXNzi7ECgYEA5BftyMlzv2oqAzQg
isS0f616qX5YmRK/riC/4+HRaXEsA/LiI8tuW04vdgcelUqxo1TFpv+J4z16ItF5
kscb6ev9wsFa0VInsvI3hqZ8e4AuqlvU8Rii1anxkbwE5mstRgeR9p410+0T2GiW
JaWVy8mxsneVI0sdR5ooJ+ZBQpcCgYEAzMLtV52aQvnCLPejPI+fBnOjoLXTVaaB
xqZWfOzuozjYVlqSUsKbKbMVtIy+rPIJt26/qw8i6V8Dx2HlUcySU5fAumpWigK4
Dh64eZ+yJrQeqgRJoLoZhTbgxe4fv7+f649WcipwD0ptEaqjD11Wdr0973tw0wdc
Pqn9SlPoksMCgYBqUKj5xMRZvQ82DQ75/3Oua1rYM9byCmYjsIogmrn0Ltb4RDaZ
vpGCp2/B0NG1fmpMGhBCpatMqvQJ1J+ZBYuCPgg6xcsh8+wjIXk2HtW47udRappX
gkcr1hmN9xhFmkEw+ghT7ixiyodMgHszsvmeUjWsXMa7+5/7JuR+rHlQowKBgE0T
Lr3lMDT3yJSeno5kTWrjSntrFeLOq1j4MeQSV32PHzfaHewTHs7if1AYDooRDYFD
qdgc+Xo47rY1blmNFKNsovpInsySW2/NNolpiGizMjuzI3fhtUuErbUzfjXyTqMf
sF2HBelrjYSx43EcJDjL4S1tHLoCskFQQWyiCxB7AoGBANSohPiPmJLvCEmZTdHm
KcRNz9jE0wO5atCZADIfuOrYHYTQk3YTI5V3GviUNLdmbw4TQChwAgAYVNth1rpL
5jSqfF3RtNBePZixG2WzxYd2ZwvJxvKa33i1E8UfM+yEZH4Gc5ukDt28m0fyFBmi
QvS5quTEllrvrVuWfhpsjl/l
-----END PRIVATE KEY-----
`)
testBlock, _ := pem.Decode(testPEM)
require.NotEmpty(t, testBlock, "could not decode pem")
testPrivKey, err := utils.ParsePKCS8ToTufKey(testBlock.Bytes, nil)
require.NoError(t, err, "could not parse pkcs8 key")
testData, err := utils.ConvertPrivateKeyToPKCS8(testPrivKey, role, gun, "")
require.NoError(t, err, "could not wrap pkcs8 key")
testName := "keyID"
testExt := "key"
perms := os.FileMode(0755)
emptyPassphraseRetriever := func(string, string, bool, int) (string, bool, error) { return "", false, nil }
// Temporary directory where test files will be created
tempBaseDir, err := ioutil.TempDir("", "notary-test-")
require.NoError(t, err, "failed to create a temporary directory")
defer os.RemoveAll(tempBaseDir)
// Since we're generating this manually we need to add the extension '.'
filePath := filepath.Join(tempBaseDir, notary.PrivDir, testName+"."+testExt)
os.MkdirAll(filepath.Dir(filePath), perms)
err = ioutil.WriteFile(filePath, testData, perms)
require.NoError(t, err, "failed to write test file")
// Create our store
store, err := NewKeyFileStore(tempBaseDir, emptyPassphraseRetriever)
require.NoError(t, err, "failed to create new key filestore")
// Call the GetKey function
privKey, _, err := store.GetKey(testName)
require.NoError(t, err, "failed to get %s key from store (it's in %s)", role, filepath.Join(tempBaseDir, notary.PrivDir))
pemPrivKey, err := utils.ConvertPrivateKeyToPKCS8(privKey, role, gun, "")
require.NoError(t, err, "failed to convert key to PEM")
require.Equal(t, testData, pemPrivKey)
}
// TestGetLegacyKey ensures we can still load keys where the role
// is stored as part of the filename (i.e. <hexID>_<role>.key
func TestGetLegacyKey(t *testing.T) {
if notary.FIPSEnabled() {
t.Skip("skip backward compatibility test in FIPS mode")
}
testData := []byte(`-----BEGIN RSA PRIVATE KEY-----
MIIEogIBAAKCAQEAyUIXjsrWRrvPa4Bzp3VJ6uOUGPay2fUpSV8XzNxZxIG/Opdr
+k3EQi1im6WOqF3Y5AS1UjYRxNuRN+cAZeo3uS1pOTuoSupBXuchVw8s4hZJ5vXn
TRmGb+xY7tZ1ZVgPfAZDib9sRSUsL/gC+aSyprAjG/YBdbF06qKbfOfsoCEYW1OQ
82JqHzQH514RFYPTnEGpvfxWaqmFQLmv0uMxV/cAYvqtrGkXuP0+a8PknlD2obw5
0rHE56Su1c3Q42S7L51K38tpbgWOSRcTfDUWEj5v9wokkNQvyKBwbS996s4EJaZd
7r6M0h1pHnuRxcSaZLYRwgOe1VNGg2VfWzgd5QIDAQABAoIBAF9LGwpygmj1jm3R
YXGd+ITugvYbAW5wRb9G9mb6wspnwNsGTYsz/UR0ZudZyaVw4jx8+jnV/i3e5PC6
QRcAgqf8l4EQ/UuThaZg/AlT1yWp9g4UyxNXja87EpTsGKQGwTYxZRM4/xPyWOzR
mt8Hm8uPROB9aA2JG9npaoQG8KSUj25G2Qot3ukw/IOtqwN/Sx1EqF0EfCH1K4KU
a5TrqlYDFmHbqT1zTRec/BTtVXNsg8xmF94U1HpWf3Lpg0BPYT7JiN2DPoLelRDy
a/A+a3ZMRNISL5wbq/jyALLOOyOkIqa+KEOeW3USuePd6RhDMzMm/0ocp5FCwYfo
k4DDeaECgYEA0eSMD1dPGo+u8UTD8i7ZsZCS5lmXLNuuAg5f5B/FGghD8ymPROIb
dnJL5QSbUpmBsYJ+nnO8RiLrICGBe7BehOitCKi/iiZKJO6edrfNKzhf4XlU0HFl
jAOMa975pHjeCoZ1cXJOEO9oW4SWTCyBDBSqH3/ZMgIOiIEk896lSmkCgYEA9Xf5
Jqv3HtQVvjugV/axAh9aI8LMjlfFr9SK7iXpY53UdcylOSWKrrDok3UnrSEykjm7
UL3eCU5jwtkVnEXesNn6DdYo3r43E6iAiph7IBkB5dh0yv3vhIXPgYqyTnpdz4pg
3yPGBHMPnJUBThg1qM7k6a2BKHWySxEgC1DTMB0CgYAGvdmF0J8Y0k6jLzs/9yNE
4cjmHzCM3016gW2xDRgumt9b2xTf+Ic7SbaIV5qJj6arxe49NqhwdESrFohrKaIP
kM2l/o2QaWRuRT/Pvl2Xqsrhmh0QSOQjGCYVfOb10nAHVIRHLY22W4o1jk+piLBo
a+1+74NRaOGAnu1J6/fRKQKBgAF180+dmlzemjqFlFCxsR/4G8s2r4zxTMXdF+6O
3zKuj8MbsqgCZy7e8qNeARxwpCJmoYy7dITNqJ5SOGSzrb2Trn9ClP+uVhmR2SH6
AlGQlIhPn3JNzI0XVsLIloMNC13ezvDE/7qrDJ677EQQtNEKWiZh1/DrsmHr+irX
EkqpAoGAJWe8PC0XK2RE9VkbSPg9Ehr939mOLWiHGYTVWPttUcum/rTKu73/X/mj
WxnPWGtzM1pHWypSokW90SP4/xedMxludvBvmz+CTYkNJcBGCrJumy11qJhii9xp
EMl3eFOJXjIch/wIesRSN+2dGOsl7neercjMh1i9RvpCwHDx/E0=
-----END RSA PRIVATE KEY-----
`)
testName := "docker.com/notary/root"
testExt := "key"
testAlias := "root"
perms := os.FileMode(0755)
emptyPassphraseRetriever := func(string, string, bool, int) (string, bool, error) { return "", false, nil }
// Temporary directory where test files will be created
tempBaseDir, err := ioutil.TempDir("", "notary-test-")
require.NoError(t, err, "failed to create a temporary directory")
defer os.RemoveAll(tempBaseDir)
// Since we're generating this manually we need to add the extension '.'
filePath := filepath.Join(tempBaseDir, notary.PrivDir, notary.RootKeysSubdir, testName+"_"+testAlias+"."+testExt)
os.MkdirAll(filepath.Dir(filePath), perms)
err = ioutil.WriteFile(filePath, testData, perms)
require.NoError(t, err, "failed to write test file")
// Create our store
store, err := NewKeyFileStore(tempBaseDir, emptyPassphraseRetriever)
require.NoError(t, err, "failed to create new key filestore")
// Call the GetKey function
_, role, err := store.GetKey(testAlias)
require.NoError(t, err, "failed to get key from store")
require.EqualValues(t, testAlias, role)
}
func TestListKeys(t *testing.T) {
testName := "docker.com/notary/root"
perms := os.FileMode(0755)
// Temporary directory where test files will be created
tempBaseDir, err := ioutil.TempDir("", "notary-test-")
require.NoError(t, err, "failed to create a temporary directory")
defer os.RemoveAll(tempBaseDir)
// Create our store
store, err := NewKeyFileStore(tempBaseDir, passphraseRetriever)
require.NoError(t, err, "failed to create new key filestore")
roles := append(data.BaseRoles, "targets/a", "invalidRoleName")
for i, role := range roles {
// Make a new key for each role
privKey, err := utils.GenerateECDSAKey(rand.Reader)
require.NoError(t, err, "could not generate private key")
// Call the AddKey function
gun := data.GUN(filepath.Dir(testName))
err = store.AddKey(KeyInfo{Role: role, Gun: gun}, privKey)
require.NoError(t, err, "failed to add key to store")
// Check to see if the keystore lists this key
keyMap := store.ListKeys()
// Expect to see exactly one key in the map
require.Len(t, keyMap, i+1)
// Expect to see privKeyID inside of the map
listedInfo, ok := keyMap[privKey.ID()]
require.True(t, ok)
require.Equal(t, role, listedInfo.Role)
}
// Write an invalid filename to the directory
filePath := filepath.Join(tempBaseDir, notary.PrivDir, "fakekeyname.key")
err = ioutil.WriteFile(filePath, []byte("data"), perms)
require.NoError(t, err, "failed to write test file")
// Check to see if the keystore still lists two keys
keyMap := store.ListKeys()
require.Len(t, keyMap, len(roles))
// Check that ListKeys() returns a copy of the state
// so modifying its returned information does not change the underlying store's keyInfo
for keyID := range keyMap {
delete(keyMap, keyID)
_, err = store.GetKeyInfo(keyID)
require.NoError(t, err)
}
}
func TestAddGetKeyMemStore(t *testing.T) {
testAlias := data.CanonicalRootRole
// Create our store
store := NewKeyMemoryStore(passphraseRetriever)
privKey, err := utils.GenerateECDSAKey(rand.Reader)
require.NoError(t, err, "could not generate private key")
// Call the AddKey function
err = store.AddKey(KeyInfo{Role: testAlias, Gun: ""}, privKey)
require.NoError(t, err, "failed to add key to store")
// Check to see if file exists
retrievedKey, retrievedAlias, err := store.GetKey(privKey.ID())
require.NoError(t, err, "failed to get key from store")
require.Equal(t, retrievedAlias, testAlias)
require.Equal(t, retrievedKey.Public(), privKey.Public())
require.Equal(t, retrievedKey.Private(), privKey.Private())
}
func TestAddGetKeyInfoMemStore(t *testing.T) {
var gun data.GUN = "docker.com/notary"
// Create our store
store := NewKeyMemoryStore(passphraseRetriever)
rootKey, err := utils.GenerateECDSAKey(rand.Reader)
require.NoError(t, err, "could not generate private key")
// Call the AddKey function
err = store.AddKey(KeyInfo{Role: data.CanonicalRootRole, Gun: ""}, rootKey)
require.NoError(t, err, "failed to add key to store")
// Get and validate key info
rootInfo, err := store.GetKeyInfo(rootKey.ID())
require.NoError(t, err)
require.Equal(t, data.CanonicalRootRole, rootInfo.Role)
require.EqualValues(t, "", rootInfo.Gun)
targetsKey, err := utils.GenerateECDSAKey(rand.Reader)
require.NoError(t, err, "could not generate private key")
// Call the AddKey function
err = store.AddKey(KeyInfo{Role: data.CanonicalTargetsRole, Gun: gun}, targetsKey)
require.NoError(t, err, "failed to add key to store")
// Get and validate key info
targetsInfo, err := store.GetKeyInfo(targetsKey.ID())
require.NoError(t, err)
require.Equal(t, data.CanonicalTargetsRole, targetsInfo.Role)
require.Equal(t, gun, targetsInfo.Gun)
delgKey, err := utils.GenerateECDSAKey(rand.Reader)
require.NoError(t, err, "could not generate private key")
// Call the AddKey function
err = store.AddKey(KeyInfo{Role: "targets/delegation", Gun: gun}, delgKey)
require.NoError(t, err, "failed to add key to store")
// Get and validate key info
delgInfo, err := store.GetKeyInfo(delgKey.ID())
require.NoError(t, err)
require.EqualValues(t, "targets/delegation", delgInfo.Role)
require.EqualValues(t, "", delgInfo.Gun)
}
func TestGetDecryptedWithTamperedCipherText(t *testing.T) {
testExt := "key"
testAlias := data.CanonicalRootRole
// Temporary directory where test files will be created
tempBaseDir, err := ioutil.TempDir("", "notary-test-")
require.NoError(t, err, "failed to create a temporary directory")
defer os.RemoveAll(tempBaseDir)
// Create our FileStore
store, err := NewKeyFileStore(tempBaseDir, passphraseRetriever)
require.NoError(t, err, "failed to create new key filestore")
// Generate a new Private Key
privKey, err := utils.GenerateECDSAKey(rand.Reader)
require.NoError(t, err, "could not generate private key")
// Call the AddEncryptedKey function
err = store.AddKey(KeyInfo{Role: testAlias, Gun: ""}, privKey)
require.NoError(t, err, "failed to add key to store")
// Since we're generating this manually we need to add the extension '.'
expectedFilePath := filepath.Join(tempBaseDir, notary.PrivDir, privKey.ID()+"."+testExt)
// Get file description, open file
fp, err := os.OpenFile(expectedFilePath, os.O_WRONLY, 0600)
require.NoError(t, err, "expected file not found")
// Tamper the file
fp.WriteAt([]byte("a"), int64(1))
// Recreate the KeyFileStore to avoid caching
store, err = NewKeyFileStore(tempBaseDir, passphraseRetriever)
require.NoError(t, err, "failed to create new key filestore")
// Try to decrypt the file
_, _, err = store.GetKey(privKey.ID())
require.Error(t, err, "expected error while decrypting the content due to invalid cipher text")
}
func TestGetDecryptedWithInvalidPassphrase(t *testing.T) {
// Make a passphraseRetriever that always returns a different passphrase in order to test
// decryption failure
a := "a"
var invalidPassphraseRetriever = func(keyId string, alias string, createNew bool, numAttempts int) (string, bool, error) {
if numAttempts > 5 {
giveup := true
return "", giveup, nil
}
a = a + a
return a, false, nil
}
// Temporary directory where test files will be created
tempBaseDir, err := ioutil.TempDir("", "notary-test-")
require.NoError(t, err, "failed to create a temporary directory")
defer os.RemoveAll(tempBaseDir)
// Test with KeyFileStore
fileStore, err := NewKeyFileStore(tempBaseDir, invalidPassphraseRetriever)
require.NoError(t, err, "failed to create new key filestore")
newFileStore, err := NewKeyFileStore(tempBaseDir, invalidPassphraseRetriever)
require.NoError(t, err, "failed to create new key filestore")
testGetDecryptedWithInvalidPassphrase(t, fileStore, newFileStore, ErrPasswordInvalid{})
// Can't test with KeyMemoryStore because we cache the decrypted version of
// the key forever
}
func TestGetDecryptedWithConsistentlyInvalidPassphrase(t *testing.T) {
// Make a passphraseRetriever that always returns a different passphrase in order to test
// decryption failure
a := "aaaaaaaaaaaaa"
var consistentlyInvalidPassphraseRetriever = func(keyID string, alias string, createNew bool, numAttempts int) (string, bool, error) {
a = a + "a"
return a, false, nil
}
// Temporary directory where test files will be created
tempBaseDir, err := ioutil.TempDir("", "notary-test-")
require.NoError(t, err, "failed to create a temporary directory")
defer os.RemoveAll(tempBaseDir)
// Test with KeyFileStore
fileStore, err := NewKeyFileStore(tempBaseDir, consistentlyInvalidPassphraseRetriever)
require.NoError(t, err, "failed to create new key filestore")
newFileStore, err := NewKeyFileStore(tempBaseDir, consistentlyInvalidPassphraseRetriever)
require.NoError(t, err, "failed to create new key filestore")
testGetDecryptedWithInvalidPassphrase(t, fileStore, newFileStore, ErrAttemptsExceeded{})
// Can't test with KeyMemoryStore because we cache the decrypted version of
// the key forever
}
// testGetDecryptedWithInvalidPassphrase takes two keystores so it can add to
// one and get from the other (to work around caching)
func testGetDecryptedWithInvalidPassphrase(t *testing.T, store KeyStore, newStore KeyStore, expectedFailureType interface{}) {
testAlias := data.CanonicalRootRole
// Generate a new random RSA Key
privKey, err := utils.GenerateECDSAKey(rand.Reader)
require.NoError(t, err, "could not generate private key")
// Call the AddKey function
err = store.AddKey(KeyInfo{Role: testAlias, Gun: data.GUN("")}, privKey)
require.NoError(t, err, "failed to add key to store")
// Try to decrypt the file with an invalid passphrase
_, _, err = newStore.GetKey(privKey.ID())
require.Error(t, err, "expected error while decrypting the content due to invalid passphrase")
require.IsType(t, err, expectedFailureType)
}
func TestRemoveKey(t *testing.T) {
testRemoveKeyWithRole(t, data.CanonicalRootRole)
testRemoveKeyWithRole(t, data.CanonicalTargetsRole)
testRemoveKeyWithRole(t, data.CanonicalSnapshotRole)
testRemoveKeyWithRole(t, "targets/a/b/c")
testRemoveKeyWithRole(t, "invalidRole")
}
func testRemoveKeyWithRole(t *testing.T, role data.RoleName) {
var gun data.GUN = "docker.com/notary"
testExt := "key"
// Temporary directory where test files will be created
tempBaseDir, err := ioutil.TempDir("", "notary-test-")
require.NoError(t, err, "failed to create a temporary directory")
defer os.RemoveAll(tempBaseDir)
// Create our store
store, err := NewKeyFileStore(tempBaseDir, passphraseRetriever)
require.NoError(t, err, "failed to create new key filestore")
privKey, err := utils.GenerateECDSAKey(rand.Reader)
require.NoError(t, err, "could not generate private key")
// Since we're generating this manually we need to add the extension '.'
expectedFilePath := filepath.Join(tempBaseDir, notary.PrivDir, privKey.ID()+"."+testExt)
err = store.AddKey(KeyInfo{Role: role, Gun: gun}, privKey)
require.NoError(t, err, "failed to add key to store")
// Check to see if file exists
_, err = ioutil.ReadFile(expectedFilePath)
require.NoError(t, err, "expected file not found")
// Call remove key
err = store.RemoveKey(privKey.ID())
require.NoError(t, err, "unable to remove key")
// Check to see if file still exists
_, err = ioutil.ReadFile(expectedFilePath)
require.Error(t, err, "file should not exist")
}
func TestKeysAreCached(t *testing.T) {
var (
gun data.GUN = "docker.com/notary"
testAlias data.RoleName = "alias"
)
// Temporary directory where test files will be created
tempBaseDir, err := ioutil.TempDir("", "notary-test-")
require.NoError(t, err, "failed to create a temporary directory")
defer os.RemoveAll(tempBaseDir)
var countingPassphraseRetriever notary.PassRetriever
numTimesCalled := 0
countingPassphraseRetriever = func(keyId, alias string, createNew bool, attempts int) (passphrase string, giveup bool, err error) {
numTimesCalled++
return "password", false, nil
}
// Create our store
store, err := NewKeyFileStore(tempBaseDir, countingPassphraseRetriever)
require.NoError(t, err, "failed to create new key filestore")
privKey, err := utils.GenerateECDSAKey(rand.Reader)
require.NoError(t, err, "could not generate private key")
// Call the AddKey function
err = store.AddKey(KeyInfo{Role: testAlias, Gun: gun}, privKey)
require.NoError(t, err, "failed to add key to store")
require.Equal(t, 1, numTimesCalled, "numTimesCalled should have been 1")
// Call the AddKey function
privKey2, _, err := store.GetKey(privKey.ID())
require.NoError(t, err, "failed to add key to store")
require.Equal(t, privKey.Public(), privKey2.Public(), "cachedPrivKey should be the same as the added privKey")
require.Equal(t, privKey.Private(), privKey2.Private(), "cachedPrivKey should be the same as the added privKey")
require.Equal(t, 1, numTimesCalled, "numTimesCalled should be 1 -- no additional call to passphraseRetriever")
// Create a new store
store2, err := NewKeyFileStore(tempBaseDir, countingPassphraseRetriever)
require.NoError(t, err, "failed to create new key filestore")
// Call the GetKey function
privKey3, _, err := store2.GetKey(privKey.ID())
require.NoError(t, err, "failed to get key from store")
require.Equal(t, privKey2.Private(), privKey3.Private(), "privkey from store1 should be the same as privkey from store2")
require.Equal(t, privKey2.Public(), privKey3.Public(), "privkey from store1 should be the same as privkey from store2")
require.Equal(t, 2, numTimesCalled, "numTimesCalled should be 2 -- one additional call to passphraseRetriever")
// Call the GetKey function a bunch of times
for i := 0; i < 10; i++ {
_, _, err := store2.GetKey(privKey.ID())
require.NoError(t, err, "failed to get key from store")
}
require.Equal(t, 2, numTimesCalled, "numTimesCalled should be 2 -- no additional call to passphraseRetriever")
}
package data
import "fmt"
// ErrInvalidMetadata is the error to be returned when metadata is invalid
type ErrInvalidMetadata struct {
role RoleName
msg string
}
func (e ErrInvalidMetadata) Error() string {
return fmt.Sprintf("%s type metadata invalid: %s", e.role.String(), e.msg)
}
// ErrMissingMeta - couldn't find the FileMeta object for the given Role, or
// the FileMeta object contained no supported checksums
type ErrMissingMeta struct {
Role string
}
func (e ErrMissingMeta) Error() string {
return fmt.Sprintf("no checksums for supported algorithms were provided for %s", e.Role)
}
// ErrInvalidChecksum is the error to be returned when checksum is invalid
type ErrInvalidChecksum struct {
alg string
}
func (e ErrInvalidChecksum) Error() string {
return fmt.Sprintf("%s checksum invalid", e.alg)
}
// ErrMismatchedChecksum is the error to be returned when checksum is mismatched
type ErrMismatchedChecksum struct {
alg string
name string
expected string
}
func (e ErrMismatchedChecksum) Error() string {
return fmt.Sprintf("%s checksum for %s did not match: expected %s", e.alg, e.name,
e.expected)
}
// ErrCertExpired is the error to be returned when a certificate has expired
type ErrCertExpired struct {
CN string
}
func (e ErrCertExpired) Error() string {
return fmt.Sprintf("certificate with CN %s is expired", e.CN)
}
package data
import (
"crypto"
"crypto/ecdsa"
"crypto/rsa"
"crypto/sha256"
"crypto/x509"
"encoding/asn1"
"encoding/hex"
"errors"
"io"
"math/big"
"github.com/docker/go/canonical/json"
"github.com/sirupsen/logrus"
"golang.org/x/crypto/ed25519"
)
// PublicKey is the necessary interface for public keys
type PublicKey interface {
ID() string
Algorithm() string
Public() []byte
}
// PrivateKey adds the ability to access the private key
type PrivateKey interface {
PublicKey
Sign(rand io.Reader, msg []byte, opts crypto.SignerOpts) (signature []byte, err error)
Private() []byte
CryptoSigner() crypto.Signer
SignatureAlgorithm() SigAlgorithm
}
// KeyPair holds the public and private key bytes
type KeyPair struct {
Public []byte `json:"public"`
Private []byte `json:"private"`
}
// Keys represents a map of key ID to PublicKey object. It's necessary
// to allow us to unmarshal into an interface via the json.Unmarshaller
// interface
type Keys map[string]PublicKey
// UnmarshalJSON implements the json.Unmarshaller interface
func (ks *Keys) UnmarshalJSON(data []byte) error {
parsed := make(map[string]TUFKey)
err := json.Unmarshal(data, &parsed)
if err != nil {
return err
}
final := make(map[string]PublicKey)
for k, tk := range parsed {
final[k] = typedPublicKey(tk)
}
*ks = final
return nil
}
// KeyList represents a list of keys
type KeyList []PublicKey
// UnmarshalJSON implements the json.Unmarshaller interface
func (ks *KeyList) UnmarshalJSON(data []byte) error {
parsed := make([]TUFKey, 0, 1)
err := json.Unmarshal(data, &parsed)
if err != nil {
return err
}
final := make([]PublicKey, 0, len(parsed))
for _, tk := range parsed {
final = append(final, typedPublicKey(tk))
}
*ks = final
return nil
}
// IDs generates a list of the hex encoded key IDs in the KeyList
func (ks KeyList) IDs() []string {
keyIDs := make([]string, 0, len(ks))
for _, k := range ks {
keyIDs = append(keyIDs, k.ID())
}
return keyIDs
}
func typedPublicKey(tk TUFKey) PublicKey {
switch tk.Algorithm() {
case ECDSAKey:
return &ECDSAPublicKey{TUFKey: tk}
case ECDSAx509Key:
return &ECDSAx509PublicKey{TUFKey: tk}
case RSAKey:
return &RSAPublicKey{TUFKey: tk}
case RSAx509Key:
return &RSAx509PublicKey{TUFKey: tk}
case ED25519Key:
return &ED25519PublicKey{TUFKey: tk}
}
return &UnknownPublicKey{TUFKey: tk}
}
func typedPrivateKey(tk TUFKey) (PrivateKey, error) {
private := tk.Value.Private
tk.Value.Private = nil
switch tk.Algorithm() {
case ECDSAKey:
return NewECDSAPrivateKey(
&ECDSAPublicKey{
TUFKey: tk,
},
private,
)
case ECDSAx509Key:
return NewECDSAPrivateKey(
&ECDSAx509PublicKey{
TUFKey: tk,
},
private,
)
case RSAKey:
return NewRSAPrivateKey(
&RSAPublicKey{
TUFKey: tk,
},
private,
)
case RSAx509Key:
return NewRSAPrivateKey(
&RSAx509PublicKey{
TUFKey: tk,
},
private,
)
case ED25519Key:
return NewED25519PrivateKey(
ED25519PublicKey{
TUFKey: tk,
},
private,
)
}
return &UnknownPrivateKey{
TUFKey: tk,
privateKey: privateKey{private: private},
}, nil
}
// NewPublicKey creates a new, correctly typed PublicKey, using the
// UnknownPublicKey catchall for unsupported ciphers
func NewPublicKey(alg string, public []byte) PublicKey {
tk := TUFKey{
Type: alg,
Value: KeyPair{
Public: public,
},
}
return typedPublicKey(tk)
}
// NewPrivateKey creates a new, correctly typed PrivateKey, using the
// UnknownPrivateKey catchall for unsupported ciphers
func NewPrivateKey(pubKey PublicKey, private []byte) (PrivateKey, error) {
tk := TUFKey{
Type: pubKey.Algorithm(),
Value: KeyPair{
Public: pubKey.Public(),
Private: private, // typedPrivateKey moves this value
},
}
return typedPrivateKey(tk)
}
// UnmarshalPublicKey is used to parse individual public keys in JSON
func UnmarshalPublicKey(data []byte) (PublicKey, error) {
var parsed TUFKey
err := json.Unmarshal(data, &parsed)
if err != nil {
return nil, err
}
return typedPublicKey(parsed), nil
}
// UnmarshalPrivateKey is used to parse individual private keys in JSON
func UnmarshalPrivateKey(data []byte) (PrivateKey, error) {
var parsed TUFKey
err := json.Unmarshal(data, &parsed)
if err != nil {
return nil, err
}
return typedPrivateKey(parsed)
}
// TUFKey is the structure used for both public and private keys in TUF.
// Normally it would make sense to use a different structures for public and
// private keys, but that would change the key ID algorithm (since the canonical
// JSON would be different). This structure should normally be accessed through
// the PublicKey or PrivateKey interfaces.
type TUFKey struct {
id string
Type string `json:"keytype"`
Value KeyPair `json:"keyval"`
}
// Algorithm returns the algorithm of the key
func (k TUFKey) Algorithm() string {
return k.Type
}
// ID efficiently generates if necessary, and caches the ID of the key
func (k *TUFKey) ID() string {
if k.id == "" {
pubK := TUFKey{
Type: k.Algorithm(),
Value: KeyPair{
Public: k.Public(),
Private: nil,
},
}
data, err := json.MarshalCanonical(&pubK)
if err != nil {
logrus.Error("Error generating key ID:", err)
}
digest := sha256.Sum256(data)
k.id = hex.EncodeToString(digest[:])
}
return k.id
}
// Public returns the public bytes
func (k TUFKey) Public() []byte {
return k.Value.Public
}
// Public key types
// ECDSAPublicKey represents an ECDSA key using a raw serialization
// of the public key
type ECDSAPublicKey struct {
TUFKey
}
// ECDSAx509PublicKey represents an ECDSA key using an x509 cert
// as the serialized format of the public key
type ECDSAx509PublicKey struct {
TUFKey
}
// RSAPublicKey represents an RSA key using a raw serialization
// of the public key
type RSAPublicKey struct {
TUFKey
}
// RSAx509PublicKey represents an RSA key using an x509 cert
// as the serialized format of the public key
type RSAx509PublicKey struct {
TUFKey
}
// ED25519PublicKey represents an ED25519 key using a raw serialization
// of the public key
type ED25519PublicKey struct {
TUFKey
}
// UnknownPublicKey is a catchall for key types that are not supported
type UnknownPublicKey struct {
TUFKey
}
// NewECDSAPublicKey initializes a new public key with the ECDSAKey type
func NewECDSAPublicKey(public []byte) *ECDSAPublicKey {
return &ECDSAPublicKey{
TUFKey: TUFKey{
Type: ECDSAKey,
Value: KeyPair{
Public: public,
Private: nil,
},
},
}
}
// NewECDSAx509PublicKey initializes a new public key with the ECDSAx509Key type
func NewECDSAx509PublicKey(public []byte) *ECDSAx509PublicKey {
return &ECDSAx509PublicKey{
TUFKey: TUFKey{
Type: ECDSAx509Key,
Value: KeyPair{
Public: public,
Private: nil,
},
},
}
}
// NewRSAPublicKey initializes a new public key with the RSA type
func NewRSAPublicKey(public []byte) *RSAPublicKey {
return &RSAPublicKey{
TUFKey: TUFKey{
Type: RSAKey,
Value: KeyPair{
Public: public,
Private: nil,
},
},
}
}
// NewRSAx509PublicKey initializes a new public key with the RSAx509Key type
func NewRSAx509PublicKey(public []byte) *RSAx509PublicKey {
return &RSAx509PublicKey{
TUFKey: TUFKey{
Type: RSAx509Key,
Value: KeyPair{
Public: public,
Private: nil,
},
},
}
}
// NewED25519PublicKey initializes a new public key with the ED25519Key type
func NewED25519PublicKey(public []byte) *ED25519PublicKey {
return &ED25519PublicKey{
TUFKey: TUFKey{
Type: ED25519Key,
Value: KeyPair{
Public: public,
Private: nil,
},
},
}
}
// Private key types
type privateKey struct {
private []byte
}
type signer struct {
signer crypto.Signer
}
// ECDSAPrivateKey represents a private ECDSA key
type ECDSAPrivateKey struct {
PublicKey
privateKey
signer
}
// RSAPrivateKey represents a private RSA key
type RSAPrivateKey struct {
PublicKey
privateKey
signer
}
// ED25519PrivateKey represents a private ED25519 key
type ED25519PrivateKey struct {
ED25519PublicKey
privateKey
}
// UnknownPrivateKey is a catchall for unsupported key types
type UnknownPrivateKey struct {
TUFKey
privateKey
}
// NewECDSAPrivateKey initializes a new ECDSA private key
func NewECDSAPrivateKey(public PublicKey, private []byte) (*ECDSAPrivateKey, error) {
switch public.(type) {
case *ECDSAPublicKey, *ECDSAx509PublicKey:
default:
return nil, errors.New("invalid public key type provided to NewECDSAPrivateKey")
}
ecdsaPrivKey, err := x509.ParseECPrivateKey(private)
if err != nil {
return nil, err
}
return &ECDSAPrivateKey{
PublicKey: public,
privateKey: privateKey{private: private},
signer: signer{signer: ecdsaPrivKey},
}, nil
}
// NewRSAPrivateKey initialized a new RSA private key
func NewRSAPrivateKey(public PublicKey, private []byte) (*RSAPrivateKey, error) {
switch public.(type) {
case *RSAPublicKey, *RSAx509PublicKey:
default:
return nil, errors.New("invalid public key type provided to NewRSAPrivateKey")
}
rsaPrivKey, err := x509.ParsePKCS1PrivateKey(private)
if err != nil {
return nil, err
}
return &RSAPrivateKey{
PublicKey: public,
privateKey: privateKey{private: private},
signer: signer{signer: rsaPrivKey},
}, nil
}
// NewED25519PrivateKey initialized a new ED25519 private key
func NewED25519PrivateKey(public ED25519PublicKey, private []byte) (*ED25519PrivateKey, error) {
return &ED25519PrivateKey{
ED25519PublicKey: public,
privateKey: privateKey{private: private},
}, nil
}
// Private return the serialized private bytes of the key
func (k privateKey) Private() []byte {
return k.private
}
// CryptoSigner returns the underlying crypto.Signer for use cases where we need the default
// signature or public key functionality (like when we generate certificates)
func (s signer) CryptoSigner() crypto.Signer {
return s.signer
}
// CryptoSigner returns the ED25519PrivateKey which already implements crypto.Signer
func (k ED25519PrivateKey) CryptoSigner() crypto.Signer {
return nil
}
// CryptoSigner returns the UnknownPrivateKey which already implements crypto.Signer
func (k UnknownPrivateKey) CryptoSigner() crypto.Signer {
return nil
}
type ecdsaSig struct {
R *big.Int
S *big.Int
}
// Sign creates an ecdsa signature
func (k ECDSAPrivateKey) Sign(rand io.Reader, msg []byte, opts crypto.SignerOpts) (signature []byte, err error) {
ecdsaPrivKey, ok := k.CryptoSigner().(*ecdsa.PrivateKey)
if !ok {
return nil, errors.New("signer was based on the wrong key type")
}
hashed := sha256.Sum256(msg)
sigASN1, err := ecdsaPrivKey.Sign(rand, hashed[:], opts)
if err != nil {
return nil, err
}
sig := ecdsaSig{}
_, err = asn1.Unmarshal(sigASN1, &sig)
if err != nil {
return nil, err
}
rBytes, sBytes := sig.R.Bytes(), sig.S.Bytes()
octetLength := (ecdsaPrivKey.Params().BitSize + 7) >> 3
// MUST include leading zeros in the output
rBuf := make([]byte, octetLength-len(rBytes), octetLength)
sBuf := make([]byte, octetLength-len(sBytes), octetLength)
rBuf = append(rBuf, rBytes...)
sBuf = append(sBuf, sBytes...)
return append(rBuf, sBuf...), nil
}
// Sign creates an rsa signature
func (k RSAPrivateKey) Sign(rand io.Reader, msg []byte, opts crypto.SignerOpts) (signature []byte, err error) {
hashed := sha256.Sum256(msg)
if opts == nil {
opts = &rsa.PSSOptions{
SaltLength: rsa.PSSSaltLengthEqualsHash,
Hash: crypto.SHA256,
}
}
return k.CryptoSigner().Sign(rand, hashed[:], opts)
}
// Sign creates an ed25519 signature
func (k ED25519PrivateKey) Sign(rand io.Reader, msg []byte, opts crypto.SignerOpts) (signature []byte, err error) {
priv := make([]byte, ed25519.PrivateKeySize)
// The ed25519 key is serialized as public key then private key, so just use private key here.
copy(priv, k.private[ed25519.PublicKeySize:])
return ed25519.Sign(ed25519.PrivateKey(priv), msg)[:], nil
}
// Sign on an UnknownPrivateKey raises an error because the client does not
// know how to sign with this key type.
func (k UnknownPrivateKey) Sign(rand io.Reader, msg []byte, opts crypto.SignerOpts) (signature []byte, err error) {
return nil, errors.New("unknown key type, cannot sign")
}
// SignatureAlgorithm returns the SigAlgorithm for a ECDSAPrivateKey
func (k ECDSAPrivateKey) SignatureAlgorithm() SigAlgorithm {
return ECDSASignature
}
// SignatureAlgorithm returns the SigAlgorithm for a RSAPrivateKey
func (k RSAPrivateKey) SignatureAlgorithm() SigAlgorithm {
return RSAPSSSignature
}
// SignatureAlgorithm returns the SigAlgorithm for a ED25519PrivateKey
func (k ED25519PrivateKey) SignatureAlgorithm() SigAlgorithm {
return EDDSASignature
}
// SignatureAlgorithm returns the SigAlgorithm for an UnknownPrivateKey
func (k UnknownPrivateKey) SignatureAlgorithm() SigAlgorithm {
return ""
}
// PublicKeyFromPrivate returns a new TUFKey based on a private key, with
// the private key bytes guaranteed to be nil.
func PublicKeyFromPrivate(pk PrivateKey) PublicKey {
return typedPublicKey(TUFKey{
Type: pk.Algorithm(),
Value: KeyPair{
Public: pk.Public(),
Private: nil,
},
})
}
package data
import (
"fmt"
"path"
"regexp"
"strings"
"github.com/sirupsen/logrus"
)
// Canonical base role names
var (
CanonicalRootRole RoleName = "root"
CanonicalTargetsRole RoleName = "targets"
CanonicalSnapshotRole RoleName = "snapshot"
CanonicalTimestampRole RoleName = "timestamp"
)
// BaseRoles is an easy to iterate list of the top level
// roles.
var BaseRoles = []RoleName{
CanonicalRootRole,
CanonicalTargetsRole,
CanonicalSnapshotRole,
CanonicalTimestampRole,
}
// Regex for validating delegation names
var delegationRegexp = regexp.MustCompile("^[-a-z0-9_/]+$")
// ErrNoSuchRole indicates the roles doesn't exist
type ErrNoSuchRole struct {
Role RoleName
}
func (e ErrNoSuchRole) Error() string {
return fmt.Sprintf("role does not exist: %s", e.Role)
}
// ErrInvalidRole represents an error regarding a role. Typically
// something like a role for which sone of the public keys were
// not found in the TUF repo.
type ErrInvalidRole struct {
Role RoleName
Reason string
}
func (e ErrInvalidRole) Error() string {
if e.Reason != "" {
return fmt.Sprintf("tuf: invalid role %s. %s", e.Role, e.Reason)
}
return fmt.Sprintf("tuf: invalid role %s.", e.Role)
}
// ValidRole only determines the name is semantically
// correct. For target delegated roles, it does NOT check
// the appropriate parent roles exist.
func ValidRole(name RoleName) bool {
if IsDelegation(name) {
return true
}
for _, v := range BaseRoles {
if name == v {
return true
}
}
return false
}
// IsDelegation checks if the role is a delegation or a root role
func IsDelegation(role RoleName) bool {
strRole := role.String()
targetsBase := CanonicalTargetsRole + "/"
whitelistedChars := delegationRegexp.MatchString(strRole)
// Limit size of full role string to 255 chars for db column size limit
correctLength := len(role) < 256
// Removes ., .., extra slashes, and trailing slash
isClean := path.Clean(strRole) == strRole
return strings.HasPrefix(strRole, targetsBase.String()) &&
whitelistedChars &&
correctLength &&
isClean
}
// IsBaseRole checks if the role is a base role
func IsBaseRole(role RoleName) bool {
for _, baseRole := range BaseRoles {
if role == baseRole {
return true
}
}
return false
}
// IsWildDelegation determines if a role represents a valid wildcard delegation
// path, i.e. targets/*, targets/foo/*.
// The wildcard may only appear as the final part of the delegation and must
// be a whole segment, i.e. targets/foo* is not a valid wildcard delegation.
func IsWildDelegation(role RoleName) bool {
if path.Clean(role.String()) != role.String() {
return false
}
base := role.Parent()
if !(IsDelegation(base) || base == CanonicalTargetsRole) {
return false
}
return role[len(role)-2:] == "/*"
}
// BaseRole is an internal representation of a root/targets/snapshot/timestamp role, with its public keys included
type BaseRole struct {
Keys map[string]PublicKey
Name RoleName
Threshold int
}
// NewBaseRole creates a new BaseRole object with the provided parameters
func NewBaseRole(name RoleName, threshold int, keys ...PublicKey) BaseRole {
r := BaseRole{
Name: name,
Threshold: threshold,
Keys: make(map[string]PublicKey),
}
for _, k := range keys {
r.Keys[k.ID()] = k
}
return r
}
// ListKeys retrieves the public keys valid for this role
func (b BaseRole) ListKeys() KeyList {
return listKeys(b.Keys)
}
// ListKeyIDs retrieves the list of key IDs valid for this role
func (b BaseRole) ListKeyIDs() []string {
return listKeyIDs(b.Keys)
}
// Equals returns whether this BaseRole equals another BaseRole
func (b BaseRole) Equals(o BaseRole) bool {
if b.Threshold != o.Threshold || b.Name != o.Name || len(b.Keys) != len(o.Keys) {
return false
}
for keyID, key := range b.Keys {
oKey, ok := o.Keys[keyID]
if !ok || key.ID() != oKey.ID() {
return false
}
}
return true
}
// DelegationRole is an internal representation of a delegation role, with its public keys included
type DelegationRole struct {
BaseRole
Paths []string
}
func listKeys(keyMap map[string]PublicKey) KeyList {
keys := KeyList{}
for _, key := range keyMap {
keys = append(keys, key)
}
return keys
}
func listKeyIDs(keyMap map[string]PublicKey) []string {
keyIDs := []string{}
for id := range keyMap {
keyIDs = append(keyIDs, id)
}
return keyIDs
}
// Restrict restricts the paths and path hash prefixes for the passed in delegation role,
// returning a copy of the role with validated paths as if it was a direct child
func (d DelegationRole) Restrict(child DelegationRole) (DelegationRole, error) {
if !d.IsParentOf(child) {
return DelegationRole{}, fmt.Errorf("%s is not a parent of %s", d.Name, child.Name)
}
return DelegationRole{
BaseRole: BaseRole{
Keys: child.Keys,
Name: child.Name,
Threshold: child.Threshold,
},
Paths: RestrictDelegationPathPrefixes(d.Paths, child.Paths),
}, nil
}
// IsParentOf returns whether the passed in delegation role is the direct child of this role,
// determined by delegation name.
// Ex: targets/a is a direct parent of targets/a/b, but targets/a is not a direct parent of targets/a/b/c
func (d DelegationRole) IsParentOf(child DelegationRole) bool {
return path.Dir(child.Name.String()) == d.Name.String()
}
// CheckPaths checks if a given path is valid for the role
func (d DelegationRole) CheckPaths(path string) bool {
return checkPaths(path, d.Paths)
}
func checkPaths(path string, permitted []string) bool {
for _, p := range permitted {
if strings.HasPrefix(path, p) {
return true
}
}
return false
}
// RestrictDelegationPathPrefixes returns the list of valid delegationPaths that are prefixed by parentPaths
func RestrictDelegationPathPrefixes(parentPaths, delegationPaths []string) []string {
validPaths := []string{}
if len(delegationPaths) == 0 {
return validPaths
}
// Validate each individual delegation path
for _, delgPath := range delegationPaths {
isPrefixed := false
for _, parentPath := range parentPaths {
if strings.HasPrefix(delgPath, parentPath) {
isPrefixed = true
break
}
}
// If the delegation path did not match prefix against any parent path, it is not valid
if isPrefixed {
validPaths = append(validPaths, delgPath)
}
}
return validPaths
}
// RootRole is a cut down role as it appears in the root.json
// Eventually should only be used for immediately before and after serialization/deserialization
type RootRole struct {
KeyIDs []string `json:"keyids"`
Threshold int `json:"threshold"`
}
// Role is a more verbose role as they appear in targets delegations
// Eventually should only be used for immediately before and after serialization/deserialization
type Role struct {
RootRole
Name RoleName `json:"name"`
Paths []string `json:"paths,omitempty"`
}
// NewRole creates a new Role object from the given parameters
func NewRole(name RoleName, threshold int, keyIDs, paths []string) (*Role, error) {
if IsDelegation(name) {
if len(paths) == 0 {
logrus.Debugf("role %s with no Paths will never be able to publish content until one or more are added", name)
}
}
if threshold < 1 {
return nil, ErrInvalidRole{Role: name}
}
if !ValidRole(name) {
return nil, ErrInvalidRole{Role: name}
}
return &Role{
RootRole: RootRole{
KeyIDs: keyIDs,
Threshold: threshold,
},
Name: name,
Paths: paths,
}, nil
}
// CheckPaths checks if a given path is valid for the role
func (r Role) CheckPaths(path string) bool {
return checkPaths(path, r.Paths)
}
// AddKeys merges the ids into the current list of role key ids
func (r *Role) AddKeys(ids []string) {
r.KeyIDs = mergeStrSlices(r.KeyIDs, ids)
}
// AddPaths merges the paths into the current list of role paths
func (r *Role) AddPaths(paths []string) error {
if len(paths) == 0 {
return nil
}
r.Paths = mergeStrSlices(r.Paths, paths)
return nil
}
// RemoveKeys removes the ids from the current list of key ids
func (r *Role) RemoveKeys(ids []string) {
r.KeyIDs = subtractStrSlices(r.KeyIDs, ids)
}
// RemovePaths removes the paths from the current list of role paths
func (r *Role) RemovePaths(paths []string) {
r.Paths = subtractStrSlices(r.Paths, paths)
}
func mergeStrSlices(orig, new []string) []string {
have := make(map[string]bool)
for _, e := range orig {
have[e] = true
}
merged := make([]string, len(orig), len(orig)+len(new))
copy(merged, orig)
for _, e := range new {
if !have[e] {
merged = append(merged, e)
}
}
return merged
}
func subtractStrSlices(orig, remove []string) []string {
kill := make(map[string]bool)
for _, e := range remove {
kill[e] = true
}
var keep []string
for _, e := range orig {
if !kill[e] {
keep = append(keep, e)
}
}
return keep
}
package data
import (
"fmt"
"github.com/docker/go/canonical/json"
)
// SignedRoot is a fully unpacked root.json
type SignedRoot struct {
Signatures []Signature
Signed Root
Dirty bool
}
// Root is the Signed component of a root.json
type Root struct {
SignedCommon
Keys Keys `json:"keys"`
Roles map[RoleName]*RootRole `json:"roles"`
ConsistentSnapshot bool `json:"consistent_snapshot"`
}
// isValidRootStructure returns an error, or nil, depending on whether the content of the struct
// is valid for root metadata. This does not check signatures or expiry, just that
// the metadata content is valid.
func isValidRootStructure(r Root) error {
expectedType := TUFTypes[CanonicalRootRole]
if r.Type != expectedType {
return ErrInvalidMetadata{
role: CanonicalRootRole, msg: fmt.Sprintf("expected type %s, not %s", expectedType, r.Type)}
}
if r.Version < 1 {
return ErrInvalidMetadata{
role: CanonicalRootRole, msg: "version cannot be less than 1"}
}
// all the base roles MUST appear in the root.json - other roles are allowed,
// but other than the mirror role (not currently supported) are out of spec
for _, roleName := range BaseRoles {
roleObj, ok := r.Roles[roleName]
if !ok || roleObj == nil {
return ErrInvalidMetadata{
role: CanonicalRootRole, msg: fmt.Sprintf("missing %s role specification", roleName)}
}
if err := isValidRootRoleStructure(CanonicalRootRole, roleName, *roleObj, r.Keys); err != nil {
return err
}
}
return nil
}
func isValidRootRoleStructure(metaContainingRole, rootRoleName RoleName, r RootRole, validKeys Keys) error {
if r.Threshold < 1 {
return ErrInvalidMetadata{
role: metaContainingRole,
msg: fmt.Sprintf("invalid threshold specified for %s: %v ", rootRoleName, r.Threshold),
}
}
for _, keyID := range r.KeyIDs {
if _, ok := validKeys[keyID]; !ok {
return ErrInvalidMetadata{
role: metaContainingRole,
msg: fmt.Sprintf("key ID %s specified in %s without corresponding key", keyID, rootRoleName),
}
}
}
return nil
}
// NewRoot initializes a new SignedRoot with a set of keys, roles, and the consistent flag
func NewRoot(keys map[string]PublicKey, roles map[RoleName]*RootRole, consistent bool) (*SignedRoot, error) {
signedRoot := &SignedRoot{
Signatures: make([]Signature, 0),
Signed: Root{
SignedCommon: SignedCommon{
Type: TUFTypes[CanonicalRootRole],
Version: 0,
Expires: DefaultExpires(CanonicalRootRole),
},
Keys: keys,
Roles: roles,
ConsistentSnapshot: consistent,
},
Dirty: true,
}
return signedRoot, nil
}
// BuildBaseRole returns a copy of a BaseRole using the information in this SignedRoot for the specified role name.
// Will error for invalid role name or key metadata within this SignedRoot
func (r SignedRoot) BuildBaseRole(roleName RoleName) (BaseRole, error) {
roleData, ok := r.Signed.Roles[roleName]
if !ok {
return BaseRole{}, ErrInvalidRole{Role: roleName, Reason: "role not found in root file"}
}
// Get all public keys for the base role from TUF metadata
keyIDs := roleData.KeyIDs
pubKeys := make(map[string]PublicKey)
for _, keyID := range keyIDs {
pubKey, ok := r.Signed.Keys[keyID]
if !ok {
return BaseRole{}, ErrInvalidRole{
Role: roleName,
Reason: fmt.Sprintf("key with ID %s was not found in root metadata", keyID),
}
}
pubKeys[keyID] = pubKey
}
return BaseRole{
Name: roleName,
Keys: pubKeys,
Threshold: roleData.Threshold,
}, nil
}
// ToSigned partially serializes a SignedRoot for further signing
func (r SignedRoot) ToSigned() (*Signed, error) {
s, err := defaultSerializer.MarshalCanonical(r.Signed)
if err != nil {
return nil, err
}
// cast into a json.RawMessage
signed := json.RawMessage{}
err = signed.UnmarshalJSON(s)
if err != nil {
return nil, err
}
sigs := make([]Signature, len(r.Signatures))
copy(sigs, r.Signatures)
return &Signed{
Signatures: sigs,
Signed: &signed,
}, nil
}
// MarshalJSON returns the serialized form of SignedRoot as bytes
func (r SignedRoot) MarshalJSON() ([]byte, error) {
signed, err := r.ToSigned()
if err != nil {
return nil, err
}
return defaultSerializer.Marshal(signed)
}
// RootFromSigned fully unpacks a Signed object into a SignedRoot and ensures
// that it is a valid SignedRoot
func RootFromSigned(s *Signed) (*SignedRoot, error) {
r := Root{}
if s.Signed == nil {
return nil, ErrInvalidMetadata{
role: CanonicalRootRole,
msg: "root file contained an empty payload",
}
}
if err := defaultSerializer.Unmarshal(*s.Signed, &r); err != nil {
return nil, err
}
if err := isValidRootStructure(r); err != nil {
return nil, err
}
sigs := make([]Signature, len(s.Signatures))
copy(sigs, s.Signatures)
return &SignedRoot{
Signatures: sigs,
Signed: r,
}, nil
}
package data
import "github.com/docker/go/canonical/json"
// Serializer is an interface that can marshal and unmarshal TUF data. This
// is expected to be a canonical JSON marshaller
type serializer interface {
MarshalCanonical(from interface{}) ([]byte, error)
Marshal(from interface{}) ([]byte, error)
Unmarshal(from []byte, to interface{}) error
}
// CanonicalJSON marshals to and from canonical JSON
type canonicalJSON struct{}
// MarshalCanonical returns the canonical JSON form of a thing
func (c canonicalJSON) MarshalCanonical(from interface{}) ([]byte, error) {
return json.MarshalCanonical(from)
}
// Marshal returns the regular non-canonical JSON form of a thing
func (c canonicalJSON) Marshal(from interface{}) ([]byte, error) {
return json.Marshal(from)
}
// Unmarshal unmarshals some JSON bytes
func (c canonicalJSON) Unmarshal(from []byte, to interface{}) error {
return json.Unmarshal(from, to)
}
// defaultSerializer is a canonical JSON serializer
var defaultSerializer serializer = canonicalJSON{}
func setDefaultSerializer(s serializer) {
defaultSerializer = s
}
package data
import (
"bytes"
"fmt"
"github.com/docker/go/canonical/json"
"github.com/sirupsen/logrus"
"github.com/theupdateframework/notary"
)
// SignedSnapshot is a fully unpacked snapshot.json
type SignedSnapshot struct {
Signatures []Signature
Signed Snapshot
Dirty bool
}
// Snapshot is the Signed component of a snapshot.json
type Snapshot struct {
SignedCommon
Meta Files `json:"meta"`
}
// IsValidSnapshotStructure returns an error, or nil, depending on whether the content of the
// struct is valid for snapshot metadata. This does not check signatures or expiry, just that
// the metadata content is valid.
func IsValidSnapshotStructure(s Snapshot) error {
expectedType := TUFTypes[CanonicalSnapshotRole]
if s.Type != expectedType {
return ErrInvalidMetadata{
role: CanonicalSnapshotRole, msg: fmt.Sprintf("expected type %s, not %s", expectedType, s.Type)}
}
if s.Version < 1 {
return ErrInvalidMetadata{
role: CanonicalSnapshotRole, msg: "version cannot be less than one"}
}
for _, file := range []RoleName{CanonicalRootRole, CanonicalTargetsRole} {
// Meta is a map of FileMeta, so if the role isn't in the map it returns
// an empty FileMeta, which has an empty map, and you can check on keys
// from an empty map.
//
// For now sha256 is required and sha512 is not.
if _, ok := s.Meta[file.String()].Hashes[notary.SHA256]; !ok {
return ErrInvalidMetadata{
role: CanonicalSnapshotRole,
msg: fmt.Sprintf("missing %s sha256 checksum information", file.String()),
}
}
if err := CheckValidHashStructures(s.Meta[file.String()].Hashes); err != nil {
return ErrInvalidMetadata{
role: CanonicalSnapshotRole,
msg: fmt.Sprintf("invalid %s checksum information, %v", file.String(), err),
}
}
}
return nil
}
// NewSnapshot initializes a SignedSnapshot with a given top level root
// and targets objects
func NewSnapshot(root *Signed, targets *Signed) (*SignedSnapshot, error) {
logrus.Debug("generating new snapshot...")
targetsJSON, err := json.Marshal(targets)
if err != nil {
logrus.Debug("Error Marshalling Targets")
return nil, err
}
rootJSON, err := json.Marshal(root)
if err != nil {
logrus.Debug("Error Marshalling Root")
return nil, err
}
rootMeta, err := NewFileMeta(bytes.NewReader(rootJSON), NotaryDefaultHashes...)
if err != nil {
return nil, err
}
targetsMeta, err := NewFileMeta(bytes.NewReader(targetsJSON), NotaryDefaultHashes...)
if err != nil {
return nil, err
}
return &SignedSnapshot{
Signatures: make([]Signature, 0),
Signed: Snapshot{
SignedCommon: SignedCommon{
Type: TUFTypes[CanonicalSnapshotRole],
Version: 0,
Expires: DefaultExpires(CanonicalSnapshotRole),
},
Meta: Files{
CanonicalRootRole.String(): rootMeta,
CanonicalTargetsRole.String(): targetsMeta,
},
},
}, nil
}
// ToSigned partially serializes a SignedSnapshot for further signing
func (sp *SignedSnapshot) ToSigned() (*Signed, error) {
s, err := defaultSerializer.MarshalCanonical(sp.Signed)
if err != nil {
return nil, err
}
signed := json.RawMessage{}
err = signed.UnmarshalJSON(s)
if err != nil {
return nil, err
}
sigs := make([]Signature, len(sp.Signatures))
copy(sigs, sp.Signatures)
return &Signed{
Signatures: sigs,
Signed: &signed,
}, nil
}
// AddMeta updates a role in the snapshot with new meta
func (sp *SignedSnapshot) AddMeta(role RoleName, meta FileMeta) {
sp.Signed.Meta[role.String()] = meta
sp.Dirty = true
}
// GetMeta gets the metadata for a particular role, returning an error if it's
// not found
func (sp *SignedSnapshot) GetMeta(role RoleName) (*FileMeta, error) {
if meta, ok := sp.Signed.Meta[role.String()]; ok {
if _, ok := meta.Hashes["sha256"]; ok {
return &meta, nil
}
}
return nil, ErrMissingMeta{Role: role.String()}
}
// DeleteMeta removes a role from the snapshot. If the role doesn't
// exist in the snapshot, it's a noop.
func (sp *SignedSnapshot) DeleteMeta(role RoleName) {
if _, ok := sp.Signed.Meta[role.String()]; ok {
delete(sp.Signed.Meta, role.String())
sp.Dirty = true
}
}
// MarshalJSON returns the serialized form of SignedSnapshot as bytes
func (sp *SignedSnapshot) MarshalJSON() ([]byte, error) {
signed, err := sp.ToSigned()
if err != nil {
return nil, err
}
return defaultSerializer.Marshal(signed)
}
// SnapshotFromSigned fully unpacks a Signed object into a SignedSnapshot
func SnapshotFromSigned(s *Signed) (*SignedSnapshot, error) {
sp := Snapshot{}
if err := defaultSerializer.Unmarshal(*s.Signed, &sp); err != nil {
return nil, err
}
if err := IsValidSnapshotStructure(sp); err != nil {
return nil, err
}
sigs := make([]Signature, len(s.Signatures))
copy(sigs, s.Signatures)
return &SignedSnapshot{
Signatures: sigs,
Signed: sp,
}, nil
}
package data
import (
"errors"
"fmt"
"path"
"github.com/docker/go/canonical/json"
)
// SignedTargets is a fully unpacked targets.json, or target delegation
// json file
type SignedTargets struct {
Signatures []Signature
Signed Targets
Dirty bool
}
// Targets is the Signed components of a targets.json or delegation json file
type Targets struct {
SignedCommon
Targets Files `json:"targets"`
Delegations Delegations `json:"delegations,omitempty"`
}
// isValidTargetsStructure returns an error, or nil, depending on whether the content of the struct
// is valid for targets metadata. This does not check signatures or expiry, just that
// the metadata content is valid.
func isValidTargetsStructure(t Targets, roleName RoleName) error {
if roleName != CanonicalTargetsRole && !IsDelegation(roleName) {
return ErrInvalidRole{Role: roleName}
}
// even if it's a delegated role, the metadata type is "Targets"
expectedType := TUFTypes[CanonicalTargetsRole]
if t.Type != expectedType {
return ErrInvalidMetadata{
role: roleName, msg: fmt.Sprintf("expected type %s, not %s", expectedType, t.Type)}
}
if t.Version < 1 {
return ErrInvalidMetadata{role: roleName, msg: "version cannot be less than one"}
}
for _, roleObj := range t.Delegations.Roles {
if !IsDelegation(roleObj.Name) || path.Dir(roleObj.Name.String()) != roleName.String() {
return ErrInvalidMetadata{
role: roleName, msg: fmt.Sprintf("delegation role %s invalid", roleObj.Name)}
}
if err := isValidRootRoleStructure(roleName, roleObj.Name, roleObj.RootRole, t.Delegations.Keys); err != nil {
return err
}
}
return nil
}
// NewTargets initializes a new empty SignedTargets object
func NewTargets() *SignedTargets {
return &SignedTargets{
Signatures: make([]Signature, 0),
Signed: Targets{
SignedCommon: SignedCommon{
Type: TUFTypes["targets"],
Version: 0,
Expires: DefaultExpires("targets"),
},
Targets: make(Files),
Delegations: *NewDelegations(),
},
Dirty: true,
}
}
// GetMeta attempts to find the targets entry for the path. It
// will return nil in the case of the target not being found.
func (t SignedTargets) GetMeta(path string) *FileMeta {
for p, meta := range t.Signed.Targets {
if p == path {
return &meta
}
}
return nil
}
// GetValidDelegations filters the delegation roles specified in the signed targets, and
// only returns roles that are direct children and restricts their paths
func (t SignedTargets) GetValidDelegations(parent DelegationRole) []DelegationRole {
roles := t.buildDelegationRoles()
result := []DelegationRole{}
for _, r := range roles {
validRole, err := parent.Restrict(r)
if err != nil {
continue
}
result = append(result, validRole)
}
return result
}
// BuildDelegationRole returns a copy of a DelegationRole using the information in this SignedTargets for the specified role name.
// Will error for invalid role name or key metadata within this SignedTargets. Path data is not validated.
func (t *SignedTargets) BuildDelegationRole(roleName RoleName) (DelegationRole, error) {
for _, role := range t.Signed.Delegations.Roles {
if role.Name == roleName {
pubKeys := make(map[string]PublicKey)
for _, keyID := range role.KeyIDs {
pubKey, ok := t.Signed.Delegations.Keys[keyID]
if !ok {
// Couldn't retrieve all keys, so stop walking and return invalid role
return DelegationRole{}, ErrInvalidRole{
Role: roleName,
Reason: "role lists unknown key " + keyID + " as a signing key",
}
}
pubKeys[keyID] = pubKey
}
return DelegationRole{
BaseRole: BaseRole{
Name: role.Name,
Keys: pubKeys,
Threshold: role.Threshold,
},
Paths: role.Paths,
}, nil
}
}
return DelegationRole{}, ErrNoSuchRole{Role: roleName}
}
// helper function to create DelegationRole structures from all delegations in a SignedTargets,
// these delegations are read directly from the SignedTargets and not modified or validated
func (t SignedTargets) buildDelegationRoles() []DelegationRole {
var roles []DelegationRole
for _, roleData := range t.Signed.Delegations.Roles {
delgRole, err := t.BuildDelegationRole(roleData.Name)
if err != nil {
continue
}
roles = append(roles, delgRole)
}
return roles
}
// AddTarget adds or updates the meta for the given path
func (t *SignedTargets) AddTarget(path string, meta FileMeta) {
t.Signed.Targets[path] = meta
t.Dirty = true
}
// AddDelegation will add a new delegated role with the given keys,
// ensuring the keys either already exist, or are added to the map
// of delegation keys
func (t *SignedTargets) AddDelegation(role *Role, keys []*PublicKey) error {
return errors.New("not Implemented")
}
// ToSigned partially serializes a SignedTargets for further signing
func (t *SignedTargets) ToSigned() (*Signed, error) {
s, err := defaultSerializer.MarshalCanonical(t.Signed)
if err != nil {
return nil, err
}
signed := json.RawMessage{}
err = signed.UnmarshalJSON(s)
if err != nil {
return nil, err
}
sigs := make([]Signature, len(t.Signatures))
copy(sigs, t.Signatures)
return &Signed{
Signatures: sigs,
Signed: &signed,
}, nil
}
// MarshalJSON returns the serialized form of SignedTargets as bytes
func (t *SignedTargets) MarshalJSON() ([]byte, error) {
signed, err := t.ToSigned()
if err != nil {
return nil, err
}
return defaultSerializer.Marshal(signed)
}
// TargetsFromSigned fully unpacks a Signed object into a SignedTargets, given
// a role name (so it can validate the SignedTargets object)
func TargetsFromSigned(s *Signed, roleName RoleName) (*SignedTargets, error) {
t := Targets{}
if err := defaultSerializer.Unmarshal(*s.Signed, &t); err != nil {
return nil, err
}
if err := isValidTargetsStructure(t, roleName); err != nil {
return nil, err
}
sigs := make([]Signature, len(s.Signatures))
copy(sigs, s.Signatures)
return &SignedTargets{
Signatures: sigs,
Signed: t,
}, nil
}
package data
import (
"bytes"
"fmt"
"github.com/docker/go/canonical/json"
"github.com/theupdateframework/notary"
)
// SignedTimestamp is a fully unpacked timestamp.json
type SignedTimestamp struct {
Signatures []Signature
Signed Timestamp
Dirty bool
}
// Timestamp is the Signed component of a timestamp.json
type Timestamp struct {
SignedCommon
Meta Files `json:"meta"`
}
// IsValidTimestampStructure returns an error, or nil, depending on whether the content of the struct
// is valid for timestamp metadata. This does not check signatures or expiry, just that
// the metadata content is valid.
func IsValidTimestampStructure(t Timestamp) error {
expectedType := TUFTypes[CanonicalTimestampRole]
if t.Type != expectedType {
return ErrInvalidMetadata{
role: CanonicalTimestampRole, msg: fmt.Sprintf("expected type %s, not %s", expectedType, t.Type)}
}
if t.Version < 1 {
return ErrInvalidMetadata{
role: CanonicalTimestampRole, msg: "version cannot be less than one"}
}
// Meta is a map of FileMeta, so if the role isn't in the map it returns
// an empty FileMeta, which has an empty map, and you can check on keys
// from an empty map.
//
// For now sha256 is required and sha512 is not.
if _, ok := t.Meta[CanonicalSnapshotRole.String()].Hashes[notary.SHA256]; !ok {
return ErrInvalidMetadata{
role: CanonicalTimestampRole, msg: "missing snapshot sha256 checksum information"}
}
if err := CheckValidHashStructures(t.Meta[CanonicalSnapshotRole.String()].Hashes); err != nil {
return ErrInvalidMetadata{
role: CanonicalTimestampRole, msg: fmt.Sprintf("invalid snapshot checksum information, %v", err)}
}
return nil
}
// NewTimestamp initializes a timestamp with an existing snapshot
func NewTimestamp(snapshot *Signed) (*SignedTimestamp, error) {
snapshotJSON, err := json.Marshal(snapshot)
if err != nil {
return nil, err
}
snapshotMeta, err := NewFileMeta(bytes.NewReader(snapshotJSON), NotaryDefaultHashes...)
if err != nil {
return nil, err
}
return &SignedTimestamp{
Signatures: make([]Signature, 0),
Signed: Timestamp{
SignedCommon: SignedCommon{
Type: TUFTypes[CanonicalTimestampRole],
Version: 0,
Expires: DefaultExpires(CanonicalTimestampRole),
},
Meta: Files{
CanonicalSnapshotRole.String(): snapshotMeta,
},
},
}, nil
}
// ToSigned partially serializes a SignedTimestamp such that it can
// be signed
func (ts *SignedTimestamp) ToSigned() (*Signed, error) {
s, err := defaultSerializer.MarshalCanonical(ts.Signed)
if err != nil {
return nil, err
}
signed := json.RawMessage{}
err = signed.UnmarshalJSON(s)
if err != nil {
return nil, err
}
sigs := make([]Signature, len(ts.Signatures))
copy(sigs, ts.Signatures)
return &Signed{
Signatures: sigs,
Signed: &signed,
}, nil
}
// GetSnapshot gets the expected snapshot metadata hashes in the timestamp metadata,
// or nil if it doesn't exist
func (ts *SignedTimestamp) GetSnapshot() (*FileMeta, error) {
snapshotExpected, ok := ts.Signed.Meta[CanonicalSnapshotRole.String()]
if !ok {
return nil, ErrMissingMeta{Role: CanonicalSnapshotRole.String()}
}
return &snapshotExpected, nil
}
// MarshalJSON returns the serialized form of SignedTimestamp as bytes
func (ts *SignedTimestamp) MarshalJSON() ([]byte, error) {
signed, err := ts.ToSigned()
if err != nil {
return nil, err
}
return defaultSerializer.Marshal(signed)
}
// TimestampFromSigned parsed a Signed object into a fully unpacked
// SignedTimestamp
func TimestampFromSigned(s *Signed) (*SignedTimestamp, error) {
ts := Timestamp{}
if err := defaultSerializer.Unmarshal(*s.Signed, &ts); err != nil {
return nil, err
}
if err := IsValidTimestampStructure(ts); err != nil {
return nil, err
}
sigs := make([]Signature, len(s.Signatures))
copy(sigs, s.Signatures)
return &SignedTimestamp{
Signatures: sigs,
Signed: ts,
}, nil
}
package data
import (
"bytes"
"crypto/sha256"
"crypto/sha512"
"crypto/subtle"
"encoding/hex"
"fmt"
"hash"
"io"
"io/ioutil"
"path"
"strings"
"time"
"github.com/docker/go/canonical/json"
"github.com/sirupsen/logrus"
"github.com/theupdateframework/notary"
)
// GUN is a Globally Unique Name. It is used to identify trust collections.
// An example usage of this is for container image repositories.
// For example: myregistry.io/myuser/myimage
type GUN string
func (g GUN) String() string {
return string(g)
}
// RoleName type for specifying role
type RoleName string
func (r RoleName) String() string {
return string(r)
}
// Parent provides the parent path role from the provided child role
func (r RoleName) Parent() RoleName {
return RoleName(path.Dir(r.String()))
}
// MetadataRoleMapToStringMap generates a map string of bytes from a map RoleName of bytes
func MetadataRoleMapToStringMap(roles map[RoleName][]byte) map[string][]byte {
metadata := make(map[string][]byte)
for k, v := range roles {
metadata[k.String()] = v
}
return metadata
}
// NewRoleList generates an array of RoleName objects from a slice of strings
func NewRoleList(roles []string) []RoleName {
var roleNames []RoleName
for _, role := range roles {
roleNames = append(roleNames, RoleName(role))
}
return roleNames
}
// RolesListToStringList generates an array of string objects from a slice of roles
func RolesListToStringList(roles []RoleName) []string {
var roleNames []string
for _, role := range roles {
roleNames = append(roleNames, role.String())
}
return roleNames
}
// SigAlgorithm for types of signatures
type SigAlgorithm string
func (k SigAlgorithm) String() string {
return string(k)
}
const defaultHashAlgorithm = "sha256"
// NotaryDefaultExpiries is the construct used to configure the default expiry times of
// the various role files.
var NotaryDefaultExpiries = map[RoleName]time.Duration{
CanonicalRootRole: notary.NotaryRootExpiry,
CanonicalTargetsRole: notary.NotaryTargetsExpiry,
CanonicalSnapshotRole: notary.NotarySnapshotExpiry,
CanonicalTimestampRole: notary.NotaryTimestampExpiry,
}
// Signature types
const (
EDDSASignature SigAlgorithm = "eddsa"
RSAPSSSignature SigAlgorithm = "rsapss"
RSAPKCS1v15Signature SigAlgorithm = "rsapkcs1v15"
ECDSASignature SigAlgorithm = "ecdsa"
PyCryptoSignature SigAlgorithm = "pycrypto-pkcs#1 pss"
)
// Key types
const (
ED25519Key = "ed25519"
RSAKey = "rsa"
RSAx509Key = "rsa-x509"
ECDSAKey = "ecdsa"
ECDSAx509Key = "ecdsa-x509"
)
// TUFTypes is the set of metadata types
var TUFTypes = map[RoleName]string{
CanonicalRootRole: "Root",
CanonicalTargetsRole: "Targets",
CanonicalSnapshotRole: "Snapshot",
CanonicalTimestampRole: "Timestamp",
}
// ValidTUFType checks if the given type is valid for the role
func ValidTUFType(typ string, role RoleName) bool {
if ValidRole(role) {
// All targets delegation roles must have
// the valid type is for targets.
if role == "" {
// role is unknown and does not map to
// a type
return false
}
if strings.HasPrefix(role.String(), CanonicalTargetsRole.String()+"/") {
role = CanonicalTargetsRole
}
}
// most people will just use the defaults so have this optimal check
// first. Do comparison just in case there is some unknown vulnerability
// if a key and value in the map differ.
if v, ok := TUFTypes[role]; ok {
return typ == v
}
return false
}
// Signed is the high level, partially deserialized metadata object
// used to verify signatures before fully unpacking, or to add signatures
// before fully packing
type Signed struct {
Signed *json.RawMessage `json:"signed"`
Signatures []Signature `json:"signatures"`
}
// SignedCommon contains the fields common to the Signed component of all
// TUF metadata files
type SignedCommon struct {
Type string `json:"_type"`
Expires time.Time `json:"expires"`
Version int `json:"version"`
}
// SignedMeta is used in server validation where we only need signatures
// and common fields
type SignedMeta struct {
Signed SignedCommon `json:"signed"`
Signatures []Signature `json:"signatures"`
}
// Signature is a signature on a piece of metadata
type Signature struct {
KeyID string `json:"keyid"`
Method SigAlgorithm `json:"method"`
Signature []byte `json:"sig"`
IsValid bool `json:"-"`
}
// Files is the map of paths to file meta container in targets and delegations
// metadata files
type Files map[string]FileMeta
// Hashes is the map of hash type to digest created for each metadata
// and target file
type Hashes map[string][]byte
// NotaryDefaultHashes contains the default supported hash algorithms.
var NotaryDefaultHashes = []string{notary.SHA256, notary.SHA512}
// FileMeta contains the size and hashes for a metadata or target file. Custom
// data can be optionally added.
type FileMeta struct {
Length int64 `json:"length"`
Hashes Hashes `json:"hashes"`
Custom *json.RawMessage `json:"custom,omitempty"`
}
// Equals returns true if the other FileMeta object is equivalent to this one
func (f FileMeta) Equals(o FileMeta) bool {
if o.Length != f.Length || len(o.Hashes) != len(f.Hashes) {
return false
}
if f.Custom == nil && o.Custom != nil || f.Custom != nil && o.Custom == nil {
return false
}
// we don't care if these are valid hashes, just that they are equal
for key, val := range f.Hashes {
if !bytes.Equal(val, o.Hashes[key]) {
return false
}
}
if f.Custom == nil && o.Custom == nil {
return true
}
fBytes, err := f.Custom.MarshalJSON()
if err != nil {
return false
}
oBytes, err := o.Custom.MarshalJSON()
if err != nil {
return false
}
return bytes.Equal(fBytes, oBytes)
}
// CheckHashes verifies all the checksums specified by the "hashes" of the payload.
func CheckHashes(payload []byte, name string, hashes Hashes) error {
cnt := 0
// k, v indicate the hash algorithm and the corresponding value
for k, v := range hashes {
switch k {
case notary.SHA256:
checksum := sha256.Sum256(payload)
if subtle.ConstantTimeCompare(checksum[:], v) == 0 {
return ErrMismatchedChecksum{alg: notary.SHA256, name: name, expected: hex.EncodeToString(v)}
}
cnt++
case notary.SHA512:
checksum := sha512.Sum512(payload)
if subtle.ConstantTimeCompare(checksum[:], v) == 0 {
return ErrMismatchedChecksum{alg: notary.SHA512, name: name, expected: hex.EncodeToString(v)}
}
cnt++
}
}
if cnt == 0 {
return ErrMissingMeta{Role: name}
}
return nil
}
// CompareMultiHashes verifies that the two Hashes passed in can represent the same data.
// This means that both maps must have at least one key defined for which they map, and no conflicts.
// Note that we check the intersection of map keys, which adds support for non-default hash algorithms in notary
func CompareMultiHashes(hashes1, hashes2 Hashes) error {
// First check if the two hash structures are valid
if err := CheckValidHashStructures(hashes1); err != nil {
return err
}
if err := CheckValidHashStructures(hashes2); err != nil {
return err
}
// Check if they have at least one matching hash, and no conflicts
cnt := 0
for hashAlg, hash1 := range hashes1 {
hash2, ok := hashes2[hashAlg]
if !ok {
continue
}
if subtle.ConstantTimeCompare(hash1[:], hash2[:]) == 0 {
return fmt.Errorf("mismatched %s checksum", hashAlg)
}
// If we reached here, we had a match
cnt++
}
if cnt == 0 {
return fmt.Errorf("at least one matching hash needed")
}
return nil
}
// CheckValidHashStructures returns an error, or nil, depending on whether
// the content of the hashes is valid or not.
func CheckValidHashStructures(hashes Hashes) error {
cnt := 0
for k, v := range hashes {
switch k {
case notary.SHA256:
if len(v) != sha256.Size {
return ErrInvalidChecksum{alg: notary.SHA256}
}
cnt++
case notary.SHA512:
if len(v) != sha512.Size {
return ErrInvalidChecksum{alg: notary.SHA512}
}
cnt++
}
}
if cnt == 0 {
return fmt.Errorf("at least one supported hash needed")
}
return nil
}
// NewFileMeta generates a FileMeta object from the reader, using the
// hash algorithms provided
func NewFileMeta(r io.Reader, hashAlgorithms ...string) (FileMeta, error) {
if len(hashAlgorithms) == 0 {
hashAlgorithms = []string{defaultHashAlgorithm}
}
hashes := make(map[string]hash.Hash, len(hashAlgorithms))
for _, hashAlgorithm := range hashAlgorithms {
var h hash.Hash
switch hashAlgorithm {
case notary.SHA256:
h = sha256.New()
case notary.SHA512:
h = sha512.New()
default:
return FileMeta{}, fmt.Errorf("unknown hash algorithm: %s", hashAlgorithm)
}
hashes[hashAlgorithm] = h
r = io.TeeReader(r, h)
}
n, err := io.Copy(ioutil.Discard, r)
if err != nil {
return FileMeta{}, err
}
m := FileMeta{Length: n, Hashes: make(Hashes, len(hashes))}
for hashAlgorithm, h := range hashes {
m.Hashes[hashAlgorithm] = h.Sum(nil)
}
return m, nil
}
// Delegations holds a tier of targets delegations
type Delegations struct {
Keys Keys `json:"keys"`
Roles []*Role `json:"roles"`
}
// NewDelegations initializes an empty Delegations object
func NewDelegations() *Delegations {
return &Delegations{
Keys: make(map[string]PublicKey),
Roles: make([]*Role, 0),
}
}
// These values are recommended TUF expiry times.
var defaultExpiryTimes = map[RoleName]time.Duration{
CanonicalRootRole: notary.Year,
CanonicalTargetsRole: 90 * notary.Day,
CanonicalSnapshotRole: 7 * notary.Day,
CanonicalTimestampRole: notary.Day,
}
// SetDefaultExpiryTimes allows one to change the default expiries.
func SetDefaultExpiryTimes(times map[RoleName]time.Duration) {
for key, value := range times {
if _, ok := defaultExpiryTimes[key]; !ok {
logrus.Errorf("Attempted to set default expiry for an unknown role: %s", key.String())
continue
}
defaultExpiryTimes[key] = value
}
}
// DefaultExpires gets the default expiry time for the given role
func DefaultExpires(role RoleName) time.Time {
if d, ok := defaultExpiryTimes[role]; ok {
return time.Now().Add(d)
}
var t time.Time
return t.UTC().Round(time.Second)
}
type unmarshalledSignature Signature
// UnmarshalJSON does a custom unmarshalling of the signature JSON
func (s *Signature) UnmarshalJSON(data []byte) error {
uSignature := unmarshalledSignature{}
err := json.Unmarshal(data, &uSignature)
if err != nil {
return err
}
uSignature.Method = SigAlgorithm(strings.ToLower(string(uSignature.Method)))
*s = Signature(uSignature)
return nil
}
// Copyright 2023 the cncf-fuzzing authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package utils
import (
"bytes"
"crypto/rsa"
"github.com/theupdateframework/notary/tuf/data"
"testing"
)
var (
keys = map[int]string{
0: "ed25519",
1: "ECDA",
2: "rsa",
}
)
func FuzzParsePEMPrivateKey(f *testing.F) {
f.Fuzz(func(t *testing.T, keyBytes []byte, keyType int, bits int64, passphrase string, usePassPhrase bool) {
var key data.PrivateKey
var edPEM []byte
var err error
switch keys[keyType%len(keys)] {
case "ed25519":
key, err = GenerateED25519Key(bytes.NewReader(keyBytes))
if err != nil {
t.Skip()
}
case "ECDA":
key, err = GenerateECDSAKey(bytes.NewReader(keyBytes))
if err != nil {
t.Skip()
}
case "rsa":
rsaKey, err := rsa.GenerateKey(bytes.NewReader(keyBytes), int(bits))
if err != nil {
t.Skip()
}
err = rsaKey.Validate()
if err != nil {
t.Skip()
}
key, err = RSAToPrivateKey(rsaKey)
if err != nil {
t.Fatal(err)
}
}
if usePassPhrase {
edPEM, err = ConvertPrivateKeyToPKCS8(key, data.CanonicalRootRole, "", passphrase)
if err != nil {
t.Fatal(err)
}
} else {
edPEM, err = ConvertPrivateKeyToPKCS8(key, data.CanonicalRootRole, "", "")
if err != nil {
t.Fatal(err)
}
}
role, _, err := ExtractPrivateKeyAttributes(edPEM)
if err != nil {
t.Fatal(err)
}
if role != "root" {
t.Fatal("role should be root")
}
if usePassPhrase {
_, _ = ParsePEMPrivateKey(edPEM, passphrase)
} else {
_, _ = ParsePEMPrivateKey(edPEM, "")
}
})
}
// Package utils contains tuf related utility functions however this file is hard
// forked from https://github.com/youmark/pkcs8 package. It has been further modified
// based on the requirements of Notary. For converting keys into PKCS#8 format,
// original package expected *crypto.PrivateKey interface, which then type inferred
// to either *rsa.PrivateKey or *ecdsa.PrivateKey depending on the need and later
// converted to ASN.1 DER encoded form, this whole process was superfluous here as
// keys are already being kept in ASN.1 DER format wrapped in data.PrivateKey
// structure. With these changes, package has became tightly coupled with notary as
// most of the method signatures have been updated. Moreover support for ED25519
// keys has been added as well. License for original package is following:
//
// The MIT License (MIT)
//
// Copyright (c) 2014 youmark
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in all
// copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
// SOFTWARE.
package utils
import (
"crypto/aes"
"crypto/cipher"
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rand"
"crypto/rsa"
"crypto/sha1" // #nosec
"crypto/x509"
"crypto/x509/pkix"
"encoding/asn1"
"errors"
"fmt"
"golang.org/x/crypto/pbkdf2"
"github.com/theupdateframework/notary/tuf/data"
)
// Copy from crypto/x509
var (
oidPublicKeyRSA = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 1, 1}
oidPublicKeyECDSA = asn1.ObjectIdentifier{1, 2, 840, 10045, 2, 1}
// crypto/x509 doesn't have support for ED25519
// http://www.oid-info.com/get/1.3.6.1.4.1.11591.15.1
oidPublicKeyED25519 = asn1.ObjectIdentifier{1, 3, 6, 1, 4, 1, 11591, 15, 1}
)
// Copy from crypto/x509
var (
oidNamedCurveP224 = asn1.ObjectIdentifier{1, 3, 132, 0, 33}
oidNamedCurveP256 = asn1.ObjectIdentifier{1, 2, 840, 10045, 3, 1, 7}
oidNamedCurveP384 = asn1.ObjectIdentifier{1, 3, 132, 0, 34}
oidNamedCurveP521 = asn1.ObjectIdentifier{1, 3, 132, 0, 35}
)
// Copy from crypto/x509
func oidFromNamedCurve(curve elliptic.Curve) (asn1.ObjectIdentifier, bool) {
switch curve {
case elliptic.P224():
return oidNamedCurveP224, true
case elliptic.P256():
return oidNamedCurveP256, true
case elliptic.P384():
return oidNamedCurveP384, true
case elliptic.P521():
return oidNamedCurveP521, true
}
return nil, false
}
// Unecrypted PKCS8
var (
oidPKCS5PBKDF2 = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 5, 12}
oidPBES2 = asn1.ObjectIdentifier{1, 2, 840, 113549, 1, 5, 13}
oidAES256CBC = asn1.ObjectIdentifier{2, 16, 840, 1, 101, 3, 4, 1, 42}
)
type privateKeyInfo struct {
Version int
PrivateKeyAlgorithm []asn1.ObjectIdentifier
PrivateKey []byte
}
// Encrypted PKCS8
type pbkdf2Params struct {
Salt []byte
IterationCount int
}
type pbkdf2Algorithms struct {
IDPBKDF2 asn1.ObjectIdentifier
PBKDF2Params pbkdf2Params
}
type pbkdf2Encs struct {
EncryAlgo asn1.ObjectIdentifier
IV []byte
}
type pbes2Params struct {
KeyDerivationFunc pbkdf2Algorithms
EncryptionScheme pbkdf2Encs
}
type pbes2Algorithms struct {
IDPBES2 asn1.ObjectIdentifier
PBES2Params pbes2Params
}
type encryptedPrivateKeyInfo struct {
EncryptionAlgorithm pbes2Algorithms
EncryptedData []byte
}
// pkcs8 reflects an ASN.1, PKCS#8 PrivateKey.
// copied from https://github.com/golang/go/blob/964639cc338db650ccadeafb7424bc8ebb2c0f6c/src/crypto/x509/pkcs8.go#L17
type pkcs8 struct {
Version int
Algo pkix.AlgorithmIdentifier
PrivateKey []byte
}
func parsePKCS8ToTufKey(der []byte) (data.PrivateKey, error) {
var key pkcs8
if _, err := asn1.Unmarshal(der, &key); err != nil {
if _, ok := err.(asn1.StructuralError); ok {
return nil, errors.New("could not decrypt private key")
}
return nil, err
}
if key.Algo.Algorithm.Equal(oidPublicKeyED25519) {
tufED25519PrivateKey, err := ED25519ToPrivateKey(key.PrivateKey)
if err != nil {
return nil, fmt.Errorf("could not convert ed25519.PrivateKey to data.PrivateKey: %v", err)
}
return tufED25519PrivateKey, nil
}
privKey, err := x509.ParsePKCS8PrivateKey(der)
if err != nil {
return nil, err
}
switch priv := privKey.(type) {
case *rsa.PrivateKey:
tufRSAPrivateKey, err := RSAToPrivateKey(priv)
if err != nil {
return nil, fmt.Errorf("could not convert rsa.PrivateKey to data.PrivateKey: %v", err)
}
return tufRSAPrivateKey, nil
case *ecdsa.PrivateKey:
tufECDSAPrivateKey, err := ECDSAToPrivateKey(priv)
if err != nil {
return nil, fmt.Errorf("could not convert ecdsa.PrivateKey to data.PrivateKey: %v", err)
}
return tufECDSAPrivateKey, nil
}
return nil, errors.New("unsupported key type")
}
// ParsePKCS8ToTufKey requires PKCS#8 key in DER format and returns data.PrivateKey
// Password should be provided in case of Encrypted PKCS#8 key, else it should be nil.
func ParsePKCS8ToTufKey(der []byte, password []byte) (data.PrivateKey, error) {
if password == nil {
return parsePKCS8ToTufKey(der)
}
var privKey encryptedPrivateKeyInfo
if _, err := asn1.Unmarshal(der, &privKey); err != nil {
return nil, errors.New("pkcs8: only PKCS #5 v2.0 supported")
}
if !privKey.EncryptionAlgorithm.IDPBES2.Equal(oidPBES2) {
return nil, errors.New("pkcs8: only PBES2 supported")
}
if !privKey.EncryptionAlgorithm.PBES2Params.KeyDerivationFunc.IDPBKDF2.Equal(oidPKCS5PBKDF2) {
return nil, errors.New("pkcs8: only PBKDF2 supported")
}
encParam := privKey.EncryptionAlgorithm.PBES2Params.EncryptionScheme
kdfParam := privKey.EncryptionAlgorithm.PBES2Params.KeyDerivationFunc.PBKDF2Params
switch {
case encParam.EncryAlgo.Equal(oidAES256CBC):
iv := encParam.IV
salt := kdfParam.Salt
iter := kdfParam.IterationCount
encryptedKey := privKey.EncryptedData
symkey := pbkdf2.Key(password, salt, iter, 32, sha1.New)
block, err := aes.NewCipher(symkey)
if err != nil {
return nil, err
}
mode := cipher.NewCBCDecrypter(block, iv)
mode.CryptBlocks(encryptedKey, encryptedKey)
// no need to explicitly remove padding, as ASN.1 unmarshalling will automatically discard it
key, err := parsePKCS8ToTufKey(encryptedKey)
if err != nil {
return nil, errors.New("pkcs8: incorrect password")
}
return key, nil
default:
return nil, errors.New("pkcs8: only AES-256-CBC supported")
}
}
func convertTUFKeyToPKCS8(priv data.PrivateKey) ([]byte, error) {
var pkey privateKeyInfo
switch priv.Algorithm() {
case data.RSAKey, data.RSAx509Key:
// Per RFC5958, if publicKey is present, then version is set to v2(1) else version is set to v1(0).
// But openssl set to v1 even publicKey is present
pkey.Version = 0
pkey.PrivateKeyAlgorithm = make([]asn1.ObjectIdentifier, 1)
pkey.PrivateKeyAlgorithm[0] = oidPublicKeyRSA
pkey.PrivateKey = priv.Private()
case data.ECDSAKey, data.ECDSAx509Key:
// To extract Curve value, parsing ECDSA key to *ecdsa.PrivateKey
eckey, err := x509.ParseECPrivateKey(priv.Private())
if err != nil {
return nil, err
}
oidNamedCurve, ok := oidFromNamedCurve(eckey.Curve)
if !ok {
return nil, errors.New("pkcs8: unknown elliptic curve")
}
// Per RFC5958, if publicKey is present, then version is set to v2(1) else version is set to v1(0).
// But openssl set to v1 even publicKey is present
pkey.Version = 1
pkey.PrivateKeyAlgorithm = make([]asn1.ObjectIdentifier, 2)
pkey.PrivateKeyAlgorithm[0] = oidPublicKeyECDSA
pkey.PrivateKeyAlgorithm[1] = oidNamedCurve
pkey.PrivateKey = priv.Private()
case data.ED25519Key:
pkey.Version = 0
pkey.PrivateKeyAlgorithm = make([]asn1.ObjectIdentifier, 1)
pkey.PrivateKeyAlgorithm[0] = oidPublicKeyED25519
pkey.PrivateKey = priv.Private()
default:
return nil, fmt.Errorf("algorithm %s not supported", priv.Algorithm())
}
return asn1.Marshal(pkey)
}
func convertTUFKeyToPKCS8Encrypted(priv data.PrivateKey, password []byte) ([]byte, error) {
// Convert private key into PKCS8 format
pkey, err := convertTUFKeyToPKCS8(priv)
if err != nil {
return nil, err
}
// Calculate key from password based on PKCS5 algorithm
// Use 8 byte salt, 16 byte IV, and 2048 iteration
iter := 2048
salt := make([]byte, 8)
iv := make([]byte, 16)
_, err = rand.Reader.Read(salt)
if err != nil {
return nil, err
}
_, err = rand.Reader.Read(iv)
if err != nil {
return nil, err
}
key := pbkdf2.Key(password, salt, iter, 32, sha1.New)
// Use AES256-CBC mode, pad plaintext with PKCS5 padding scheme
padding := aes.BlockSize - len(pkey)%aes.BlockSize
if padding > 0 {
n := len(pkey)
pkey = append(pkey, make([]byte, padding)...)
for i := 0; i < padding; i++ {
pkey[n+i] = byte(padding)
}
}
encryptedKey := make([]byte, len(pkey))
block, err := aes.NewCipher(key)
if err != nil {
return nil, err
}
mode := cipher.NewCBCEncrypter(block, iv)
mode.CryptBlocks(encryptedKey, pkey)
pbkdf2algo := pbkdf2Algorithms{oidPKCS5PBKDF2, pbkdf2Params{salt, iter}}
pbkdf2encs := pbkdf2Encs{oidAES256CBC, iv}
pbes2algo := pbes2Algorithms{oidPBES2, pbes2Params{pbkdf2algo, pbkdf2encs}}
encryptedPkey := encryptedPrivateKeyInfo{pbes2algo, encryptedKey}
return asn1.Marshal(encryptedPkey)
}
// ConvertTUFKeyToPKCS8 converts a private key (data.Private) to PKCS#8 and returns in DER format
// if password is not nil, it would convert the Private Key to Encrypted PKCS#8.
func ConvertTUFKeyToPKCS8(priv data.PrivateKey, password []byte) ([]byte, error) {
if password == nil {
return convertTUFKeyToPKCS8(priv)
}
return convertTUFKeyToPKCS8Encrypted(priv, password)
}
package utils
import (
"strings"
)
// RoleList is a list of roles
type RoleList []string
// Len returns the length of the list
func (r RoleList) Len() int {
return len(r)
}
// Less returns true if the item at i should be sorted
// before the item at j. It's an unstable partial ordering
// based on the number of segments, separated by "/", in
// the role name
func (r RoleList) Less(i, j int) bool {
segsI := strings.Split(r[i], "/")
segsJ := strings.Split(r[j], "/")
if len(segsI) == len(segsJ) {
return r[i] < r[j]
}
return len(segsI) < len(segsJ)
}
// Swap the items at 2 locations in the list
func (r RoleList) Swap(i, j int) {
r[i], r[j] = r[j], r[i]
}
package utils
import (
"fmt"
"sync"
)
// ErrEmptyStack is used when an action that requires some
// content is invoked and the stack is empty
type ErrEmptyStack struct {
action string
}
func (err ErrEmptyStack) Error() string {
return fmt.Sprintf("attempted to %s with empty stack", err.action)
}
// ErrBadTypeCast is used by PopX functions when the item
// cannot be typed to X
type ErrBadTypeCast struct{}
func (err ErrBadTypeCast) Error() string {
return "attempted to do a typed pop and item was not of type"
}
// Stack is a simple type agnostic stack implementation
type Stack struct {
s []interface{}
l sync.Mutex
}
// NewStack create a new stack
func NewStack() *Stack {
s := &Stack{
s: make([]interface{}, 0),
}
return s
}
// Push adds an item to the top of the stack.
func (s *Stack) Push(item interface{}) {
s.l.Lock()
defer s.l.Unlock()
s.s = append(s.s, item)
}
// Pop removes and returns the top item on the stack, or returns
// ErrEmptyStack if the stack has no content
func (s *Stack) Pop() (interface{}, error) {
s.l.Lock()
defer s.l.Unlock()
l := len(s.s)
if l > 0 {
item := s.s[l-1]
s.s = s.s[:l-1]
return item, nil
}
return nil, ErrEmptyStack{action: "Pop"}
}
// PopString attempts to cast the top item on the stack to the string type.
// If this succeeds, it removes and returns the top item. If the item
// is not of the string type, ErrBadTypeCast is returned. If the stack
// is empty, ErrEmptyStack is returned
func (s *Stack) PopString() (string, error) {
s.l.Lock()
defer s.l.Unlock()
l := len(s.s)
if l > 0 {
item := s.s[l-1]
if item, ok := item.(string); ok {
s.s = s.s[:l-1]
return item, nil
}
return "", ErrBadTypeCast{}
}
return "", ErrEmptyStack{action: "PopString"}
}
// Empty returns true if the stack is empty
func (s *Stack) Empty() bool {
s.l.Lock()
defer s.l.Unlock()
return len(s.s) == 0
}
package utils
import (
"crypto/sha256"
"crypto/sha512"
"encoding/hex"
"fmt"
"io"
"github.com/theupdateframework/notary/tuf/data"
)
// StrSliceContains checks if the given string appears in the slice
func StrSliceContains(ss []string, s string) bool {
for _, v := range ss {
if v == s {
return true
}
}
return false
}
// RoleNameSliceContains checks if the given string appears in the slice
func RoleNameSliceContains(ss []data.RoleName, s data.RoleName) bool {
for _, v := range ss {
if v == s {
return true
}
}
return false
}
// RoleNameSliceRemove removes the given RoleName from the slice, returning a new slice
func RoleNameSliceRemove(ss []data.RoleName, s data.RoleName) []data.RoleName {
res := []data.RoleName{}
for _, v := range ss {
if v != s {
res = append(res, v)
}
}
return res
}
// NoopCloser is a simple Reader wrapper that does nothing when Close is
// called
type NoopCloser struct {
io.Reader
}
// Close does nothing for a NoopCloser
func (nc *NoopCloser) Close() error {
return nil
}
// DoHash returns the digest of d using the hashing algorithm named
// in alg
func DoHash(alg string, d []byte) []byte {
switch alg {
case "sha256":
digest := sha256.Sum256(d)
return digest[:]
case "sha512":
digest := sha512.Sum512(d)
return digest[:]
}
return nil
}
// UnusedDelegationKeys prunes a list of keys, returning those that are no
// longer in use for a given targets file
func UnusedDelegationKeys(t data.SignedTargets) []string {
// compare ids to all still active key ids in all active roles
// with the targets file
found := make(map[string]bool)
for _, r := range t.Signed.Delegations.Roles {
for _, id := range r.KeyIDs {
found[id] = true
}
}
var discard []string
for id := range t.Signed.Delegations.Keys {
if !found[id] {
discard = append(discard, id)
}
}
return discard
}
// RemoveUnusedKeys determines which keys in the slice of IDs are no longer
// used in the given targets file and removes them from the delegated keys
// map
func RemoveUnusedKeys(t *data.SignedTargets) {
unusedIDs := UnusedDelegationKeys(*t)
for _, id := range unusedIDs {
delete(t.Signed.Delegations.Keys, id)
}
}
// FindRoleIndex returns the index of the role named <name> or -1 if no
// matching role is found.
func FindRoleIndex(rs []*data.Role, name data.RoleName) int {
for i, r := range rs {
if r.Name == name {
return i
}
}
return -1
}
// ConsistentName generates the appropriate HTTP URL path for the role,
// based on whether the repo is marked as consistent. The RemoteStore
// is responsible for adding file extensions.
func ConsistentName(role string, hashSHA256 []byte) string {
if len(hashSHA256) > 0 {
hash := hex.EncodeToString(hashSHA256)
return fmt.Sprintf("%s.%s", role, hash)
}
return role
}
package utils
import (
"bytes"
"crypto/ecdsa"
"crypto/elliptic"
"crypto/rand"
"crypto/rsa"
"crypto/x509"
"crypto/x509/pkix"
"encoding/pem"
"errors"
"fmt"
"io"
"io/ioutil"
"math/big"
"time"
"github.com/sirupsen/logrus"
"github.com/theupdateframework/notary"
"github.com/theupdateframework/notary/tuf/data"
"golang.org/x/crypto/ed25519"
)
// CanonicalKeyID returns the ID of the public bytes version of a TUF key.
// On regular RSA/ECDSA TUF keys, this is just the key ID. On X509 RSA/ECDSA
// TUF keys, this is the key ID of the public key part of the key in the leaf cert
func CanonicalKeyID(k data.PublicKey) (string, error) {
if k == nil {
return "", errors.New("public key is nil")
}
switch k.Algorithm() {
case data.ECDSAx509Key, data.RSAx509Key:
return X509PublicKeyID(k)
default:
return k.ID(), nil
}
}
// LoadCertFromPEM returns the first certificate found in a bunch of bytes or error
// if nothing is found. Taken from https://golang.org/src/crypto/x509/cert_pool.go#L85.
func LoadCertFromPEM(pemBytes []byte) (*x509.Certificate, error) {
for len(pemBytes) > 0 {
var block *pem.Block
block, pemBytes = pem.Decode(pemBytes)
if block == nil {
return nil, errors.New("no certificates found in PEM data")
}
if block.Type != "CERTIFICATE" || len(block.Headers) != 0 {
continue
}
cert, err := x509.ParseCertificate(block.Bytes)
if err != nil {
continue
}
return cert, nil
}
return nil, errors.New("no certificates found in PEM data")
}
// X509PublicKeyID returns a public key ID as a string, given a
// data.PublicKey that contains an X509 Certificate
func X509PublicKeyID(certPubKey data.PublicKey) (string, error) {
// Note that this only loads the first certificate from the public key
cert, err := LoadCertFromPEM(certPubKey.Public())
if err != nil {
return "", err
}
pubKeyBytes, err := x509.MarshalPKIXPublicKey(cert.PublicKey)
if err != nil {
return "", err
}
var key data.PublicKey
switch certPubKey.Algorithm() {
case data.ECDSAx509Key:
key = data.NewECDSAPublicKey(pubKeyBytes)
case data.RSAx509Key:
key = data.NewRSAPublicKey(pubKeyBytes)
}
return key.ID(), nil
}
func parseLegacyPrivateKey(block *pem.Block, passphrase string) (data.PrivateKey, error) {
var privKeyBytes []byte
var err error
//lint:ignore SA1019 needed for legacy keys.
if x509.IsEncryptedPEMBlock(block) {
//lint:ignore SA1019 needed for legacy keys.
privKeyBytes, err = x509.DecryptPEMBlock(block, []byte(passphrase))
if err != nil {
return nil, errors.New("could not decrypt private key")
}
} else {
privKeyBytes = block.Bytes
}
switch block.Type {
case "RSA PRIVATE KEY":
rsaPrivKey, err := x509.ParsePKCS1PrivateKey(privKeyBytes)
if err != nil {
return nil, fmt.Errorf("could not parse DER encoded key: %v", err)
}
tufRSAPrivateKey, err := RSAToPrivateKey(rsaPrivKey)
if err != nil {
return nil, fmt.Errorf("could not convert rsa.PrivateKey to data.PrivateKey: %v", err)
}
return tufRSAPrivateKey, nil
case "EC PRIVATE KEY":
ecdsaPrivKey, err := x509.ParseECPrivateKey(privKeyBytes)
if err != nil {
return nil, fmt.Errorf("could not parse DER encoded private key: %v", err)
}
tufECDSAPrivateKey, err := ECDSAToPrivateKey(ecdsaPrivKey)
if err != nil {
return nil, fmt.Errorf("could not convert ecdsa.PrivateKey to data.PrivateKey: %v", err)
}
return tufECDSAPrivateKey, nil
case "ED25519 PRIVATE KEY":
// We serialize ED25519 keys by concatenating the private key
// to the public key and encoding with PEM. See the
// ED25519ToPrivateKey function.
tufECDSAPrivateKey, err := ED25519ToPrivateKey(privKeyBytes)
if err != nil {
return nil, fmt.Errorf("could not convert ecdsa.PrivateKey to data.PrivateKey: %v", err)
}
return tufECDSAPrivateKey, nil
default:
return nil, fmt.Errorf("unsupported key type %q", block.Type)
}
}
// ParsePEMPrivateKey returns a data.PrivateKey from a PEM encoded private key. It
// supports PKCS#8 as well as RSA/ECDSA (PKCS#1) only in non-FIPS mode and
// attempts to decrypt using the passphrase, if encrypted.
func ParsePEMPrivateKey(pemBytes []byte, passphrase string) (data.PrivateKey, error) {
return parsePEMPrivateKey(pemBytes, passphrase, notary.FIPSEnabled())
}
func parsePEMPrivateKey(pemBytes []byte, passphrase string, fips bool) (data.PrivateKey, error) {
block, _ := pem.Decode(pemBytes)
if block == nil {
return nil, errors.New("no valid private key found")
}
switch block.Type {
case "RSA PRIVATE KEY", "EC PRIVATE KEY", "ED25519 PRIVATE KEY":
if fips {
return nil, fmt.Errorf("%s not supported in FIPS mode", block.Type)
}
return parseLegacyPrivateKey(block, passphrase)
case "ENCRYPTED PRIVATE KEY", "PRIVATE KEY":
if passphrase == "" {
return ParsePKCS8ToTufKey(block.Bytes, nil)
}
return ParsePKCS8ToTufKey(block.Bytes, []byte(passphrase))
default:
return nil, fmt.Errorf("unsupported key type %q", block.Type)
}
}
// CertToPEM is a utility function returns a PEM encoded x509 Certificate
func CertToPEM(cert *x509.Certificate) []byte {
pemCert := pem.EncodeToMemory(&pem.Block{Type: "CERTIFICATE", Bytes: cert.Raw})
return pemCert
}
// CertChainToPEM is a utility function returns a PEM encoded chain of x509 Certificates, in the order they are passed
func CertChainToPEM(certChain []*x509.Certificate) ([]byte, error) {
var pemBytes bytes.Buffer
for _, cert := range certChain {
if err := pem.Encode(&pemBytes, &pem.Block{Type: "CERTIFICATE", Bytes: cert.Raw}); err != nil {
return nil, err
}
}
return pemBytes.Bytes(), nil
}
// LoadCertFromFile loads the first certificate from the file provided. The
// data is expected to be PEM Encoded and contain one of more certificates
// with PEM type "CERTIFICATE"
func LoadCertFromFile(filename string) (*x509.Certificate, error) {
certs, err := LoadCertBundleFromFile(filename)
if err != nil {
return nil, err
}
return certs[0], nil
}
// LoadCertBundleFromFile loads certificates from the []byte provided. The
// data is expected to be PEM Encoded and contain one of more certificates
// with PEM type "CERTIFICATE"
func LoadCertBundleFromFile(filename string) ([]*x509.Certificate, error) {
b, err := ioutil.ReadFile(filename)
if err != nil {
return nil, err
}
return LoadCertBundleFromPEM(b)
}
// LoadCertBundleFromPEM loads certificates from the []byte provided. The
// data is expected to be PEM Encoded and contain one of more certificates
// with PEM type "CERTIFICATE"
func LoadCertBundleFromPEM(pemBytes []byte) ([]*x509.Certificate, error) {
certificates := []*x509.Certificate{}
var block *pem.Block
block, pemBytes = pem.Decode(pemBytes)
for ; block != nil; block, pemBytes = pem.Decode(pemBytes) {
if block.Type == "CERTIFICATE" {
cert, err := x509.ParseCertificate(block.Bytes)
if err != nil {
return nil, err
}
certificates = append(certificates, cert)
} else {
return nil, fmt.Errorf("invalid pem block type: %s", block.Type)
}
}
if len(certificates) == 0 {
return nil, fmt.Errorf("no valid certificates found")
}
return certificates, nil
}
// GetLeafCerts parses a list of x509 Certificates and returns all of them
// that aren't CA
func GetLeafCerts(certs []*x509.Certificate) []*x509.Certificate {
var leafCerts []*x509.Certificate
for _, cert := range certs {
if cert.IsCA {
continue
}
leafCerts = append(leafCerts, cert)
}
return leafCerts
}
// GetIntermediateCerts parses a list of x509 Certificates and returns all of the
// ones marked as a CA, to be used as intermediates
func GetIntermediateCerts(certs []*x509.Certificate) []*x509.Certificate {
var intCerts []*x509.Certificate
for _, cert := range certs {
if cert.IsCA {
intCerts = append(intCerts, cert)
}
}
return intCerts
}
// ParsePEMPublicKey returns a data.PublicKey from a PEM encoded public key or certificate.
func ParsePEMPublicKey(pubKeyBytes []byte) (data.PublicKey, error) {
pemBlock, _ := pem.Decode(pubKeyBytes)
if pemBlock == nil {
return nil, errors.New("no valid public key found")
}
switch pemBlock.Type {
case "CERTIFICATE":
cert, err := x509.ParseCertificate(pemBlock.Bytes)
if err != nil {
return nil, fmt.Errorf("could not parse provided certificate: %v", err)
}
err = ValidateCertificate(cert, true)
if err != nil {
return nil, fmt.Errorf("invalid certificate: %v", err)
}
return CertToKey(cert), nil
case "PUBLIC KEY":
keyType, err := keyTypeForPublicKey(pemBlock.Bytes)
if err != nil {
return nil, err
}
return data.NewPublicKey(keyType, pemBlock.Bytes), nil
default:
return nil, fmt.Errorf("unsupported PEM block type %q, expected CERTIFICATE or PUBLIC KEY", pemBlock.Type)
}
}
func keyTypeForPublicKey(pubKeyBytes []byte) (string, error) {
pub, err := x509.ParsePKIXPublicKey(pubKeyBytes)
if err != nil {
return "", fmt.Errorf("unable to parse pem encoded public key: %v", err)
}
switch pub.(type) {
case *ecdsa.PublicKey:
return data.ECDSAKey, nil
case *rsa.PublicKey:
return data.RSAKey, nil
}
return "", fmt.Errorf("unknown public key format")
}
// ValidateCertificate returns an error if the certificate is not valid for notary
// Currently this is only ensuring the public key has a large enough modulus if RSA,
// using a non SHA1 signature algorithm, and an optional time expiry check
func ValidateCertificate(c *x509.Certificate, checkExpiry bool) error {
if (c.NotBefore).After(c.NotAfter) {
return fmt.Errorf("certificate validity window is invalid")
}
// Can't have SHA1 sig algorithm
if c.SignatureAlgorithm == x509.SHA1WithRSA || c.SignatureAlgorithm == x509.DSAWithSHA1 || c.SignatureAlgorithm == x509.ECDSAWithSHA1 {
return fmt.Errorf("certificate with CN %s uses invalid SHA1 signature algorithm", c.Subject.CommonName)
}
// If we have an RSA key, make sure it's long enough
if c.PublicKeyAlgorithm == x509.RSA {
rsaKey, ok := c.PublicKey.(*rsa.PublicKey)
if !ok {
return fmt.Errorf("unable to parse RSA public key")
}
if rsaKey.N.BitLen() < notary.MinRSABitSize {
return fmt.Errorf("RSA bit length is too short")
}
}
if checkExpiry {
now := time.Now()
tomorrow := now.AddDate(0, 0, 1)
// Give one day leeway on creation "before" time, check "after" against today
if (tomorrow).Before(c.NotBefore) || now.After(c.NotAfter) {
return data.ErrCertExpired{CN: c.Subject.CommonName}
}
// If this certificate is expiring within 6 months, put out a warning
if (c.NotAfter).Before(time.Now().AddDate(0, 6, 0)) {
logrus.Warnf("certificate with CN %s is near expiry", c.Subject.CommonName)
}
}
return nil
}
// GenerateKey returns a new private key using the provided algorithm or an
// error detailing why the key could not be generated
func GenerateKey(algorithm string) (data.PrivateKey, error) {
switch algorithm {
case data.ECDSAKey:
return GenerateECDSAKey(rand.Reader)
case data.ED25519Key:
return GenerateED25519Key(rand.Reader)
}
return nil, fmt.Errorf("private key type not supported for key generation: %s", algorithm)
}
// RSAToPrivateKey converts an rsa.Private key to a TUF data.PrivateKey type
func RSAToPrivateKey(rsaPrivKey *rsa.PrivateKey) (data.PrivateKey, error) {
// Get a DER-encoded representation of the PublicKey
rsaPubBytes, err := x509.MarshalPKIXPublicKey(&rsaPrivKey.PublicKey)
if err != nil {
return nil, fmt.Errorf("failed to marshal public key: %v", err)
}
// Get a DER-encoded representation of the PrivateKey
rsaPrivBytes := x509.MarshalPKCS1PrivateKey(rsaPrivKey)
pubKey := data.NewRSAPublicKey(rsaPubBytes)
return data.NewRSAPrivateKey(pubKey, rsaPrivBytes)
}
// GenerateECDSAKey generates an ECDSA Private key and returns a TUF PrivateKey
func GenerateECDSAKey(random io.Reader) (data.PrivateKey, error) {
ecdsaPrivKey, err := ecdsa.GenerateKey(elliptic.P256(), random)
if err != nil {
return nil, err
}
tufPrivKey, err := ECDSAToPrivateKey(ecdsaPrivKey)
if err != nil {
return nil, err
}
logrus.Debugf("generated ECDSA key with keyID: %s", tufPrivKey.ID())
return tufPrivKey, nil
}
// GenerateED25519Key generates an ED25519 private key and returns a TUF
// PrivateKey. The serialization format we use is just the public key bytes
// followed by the private key bytes
func GenerateED25519Key(random io.Reader) (data.PrivateKey, error) {
pub, priv, err := ed25519.GenerateKey(random)
if err != nil {
return nil, err
}
var serialized [ed25519.PublicKeySize + ed25519.PrivateKeySize]byte
copy(serialized[:], pub[:])
copy(serialized[ed25519.PublicKeySize:], priv[:])
tufPrivKey, err := ED25519ToPrivateKey(serialized[:])
if err != nil {
return nil, err
}
logrus.Debugf("generated ED25519 key with keyID: %s", tufPrivKey.ID())
return tufPrivKey, nil
}
// ECDSAToPrivateKey converts an ecdsa.Private key to a TUF data.PrivateKey type
func ECDSAToPrivateKey(ecdsaPrivKey *ecdsa.PrivateKey) (data.PrivateKey, error) {
// Get a DER-encoded representation of the PublicKey
ecdsaPubBytes, err := x509.MarshalPKIXPublicKey(&ecdsaPrivKey.PublicKey)
if err != nil {
return nil, fmt.Errorf("failed to marshal public key: %v", err)
}
// Get a DER-encoded representation of the PrivateKey
ecdsaPrivKeyBytes, err := x509.MarshalECPrivateKey(ecdsaPrivKey)
if err != nil {
return nil, fmt.Errorf("failed to marshal private key: %v", err)
}
pubKey := data.NewECDSAPublicKey(ecdsaPubBytes)
return data.NewECDSAPrivateKey(pubKey, ecdsaPrivKeyBytes)
}
// ED25519ToPrivateKey converts a serialized ED25519 key to a TUF
// data.PrivateKey type
func ED25519ToPrivateKey(privKeyBytes []byte) (data.PrivateKey, error) {
if len(privKeyBytes) != ed25519.PublicKeySize+ed25519.PrivateKeySize {
return nil, errors.New("malformed ed25519 private key")
}
pubKey := data.NewED25519PublicKey(privKeyBytes[:ed25519.PublicKeySize])
return data.NewED25519PrivateKey(*pubKey, privKeyBytes)
}
// ExtractPrivateKeyAttributes extracts role and gun values from private key bytes
func ExtractPrivateKeyAttributes(pemBytes []byte) (data.RoleName, data.GUN, error) {
return extractPrivateKeyAttributes(pemBytes, notary.FIPSEnabled())
}
func extractPrivateKeyAttributes(pemBytes []byte, fips bool) (data.RoleName, data.GUN, error) {
block, _ := pem.Decode(pemBytes)
if block == nil {
return "", "", errors.New("PEM block is empty")
}
switch block.Type {
case "RSA PRIVATE KEY", "EC PRIVATE KEY", "ED25519 PRIVATE KEY":
if fips {
return "", "", fmt.Errorf("%s not supported in FIPS mode", block.Type)
}
case "PRIVATE KEY", "ENCRYPTED PRIVATE KEY":
// do nothing for PKCS#8 keys
default:
return "", "", errors.New("unknown key format")
}
return data.RoleName(block.Headers["role"]), data.GUN(block.Headers["gun"]), nil
}
// ConvertPrivateKeyToPKCS8 converts a data.PrivateKey to PKCS#8 Format
func ConvertPrivateKeyToPKCS8(key data.PrivateKey, role data.RoleName, gun data.GUN, passphrase string) ([]byte, error) {
var (
err error
der []byte
blockType = "PRIVATE KEY"
)
if passphrase == "" {
der, err = ConvertTUFKeyToPKCS8(key, nil)
} else {
blockType = "ENCRYPTED PRIVATE KEY"
der, err = ConvertTUFKeyToPKCS8(key, []byte(passphrase))
}
if err != nil {
return nil, fmt.Errorf("unable to convert to PKCS8 key")
}
headers := make(map[string]string)
if role != "" {
headers["role"] = role.String()
}
if gun != "" {
headers["gun"] = gun.String()
}
return pem.EncodeToMemory(&pem.Block{Bytes: der, Type: blockType, Headers: headers}), nil
}
// CertToKey transforms a single input certificate into its corresponding
// PublicKey
func CertToKey(cert *x509.Certificate) data.PublicKey {
block := pem.Block{Type: "CERTIFICATE", Bytes: cert.Raw}
pemdata := pem.EncodeToMemory(&block)
switch cert.PublicKeyAlgorithm {
case x509.RSA:
return data.NewRSAx509PublicKey(pemdata)
case x509.ECDSA:
return data.NewECDSAx509PublicKey(pemdata)
default:
logrus.Debugf("Unknown key type parsed from certificate: %v", cert.PublicKeyAlgorithm)
return nil
}
}
// CertsToKeys transforms each of the input certificate chains into its corresponding
// PublicKey
func CertsToKeys(leafCerts map[string]*x509.Certificate, intCerts map[string][]*x509.Certificate) map[string]data.PublicKey {
keys := make(map[string]data.PublicKey)
for id, leafCert := range leafCerts {
if key, err := CertBundleToKey(leafCert, intCerts[id]); err == nil {
keys[key.ID()] = key
}
}
return keys
}
// CertBundleToKey creates a TUF key from a leaf certs and a list of
// intermediates
func CertBundleToKey(leafCert *x509.Certificate, intCerts []*x509.Certificate) (data.PublicKey, error) {
certBundle := []*x509.Certificate{leafCert}
certBundle = append(certBundle, intCerts...)
certChainPEM, err := CertChainToPEM(certBundle)
if err != nil {
return nil, err
}
var newKey data.PublicKey
// Use the leaf cert's public key algorithm for typing
switch leafCert.PublicKeyAlgorithm {
case x509.RSA:
newKey = data.NewRSAx509PublicKey(certChainPEM)
case x509.ECDSA:
newKey = data.NewECDSAx509PublicKey(certChainPEM)
default:
logrus.Debugf("Unknown key type parsed from certificate: %v", leafCert.PublicKeyAlgorithm)
return nil, x509.ErrUnsupportedAlgorithm
}
return newKey, nil
}
// NewCertificate returns an X509 Certificate following a template, given a Common Name and validity interval.
func NewCertificate(commonName string, startTime, endTime time.Time) (*x509.Certificate, error) {
serialNumberLimit := new(big.Int).Lsh(big.NewInt(1), 128)
serialNumber, err := rand.Int(rand.Reader, serialNumberLimit)
if err != nil {
return nil, fmt.Errorf("failed to generate new certificate: %v", err)
}
return &x509.Certificate{
SerialNumber: serialNumber,
Subject: pkix.Name{
CommonName: commonName,
},
NotBefore: startTime,
NotAfter: endTime,
KeyUsage: x509.KeyUsageKeyEncipherment | x509.KeyUsageDigitalSignature,
ExtKeyUsage: []x509.ExtKeyUsage{x509.ExtKeyUsageCodeSigning},
BasicConstraintsValid: true,
}, nil
}
package validation
import (
"encoding/json"
"fmt"
)
// VALIDATION ERRORS
// ErrValidation represents a general validation error
type ErrValidation struct {
Msg string
}
func (err ErrValidation) Error() string {
return fmt.Sprintf("An error occurred during validation: %s", err.Msg)
}
// ErrBadHierarchy represents missing metadata. Currently: a missing snapshot
// at this current time. When delegations are implemented it will also
// represent a missing delegation parent
type ErrBadHierarchy struct {
Missing string
Msg string
}
func (err ErrBadHierarchy) Error() string {
return fmt.Sprintf("Metadata hierarchy is incomplete: %s", err.Msg)
}
// ErrBadRoot represents a failure validating the root
type ErrBadRoot struct {
Msg string
}
func (err ErrBadRoot) Error() string {
return fmt.Sprintf("The root metadata is invalid: %s", err.Msg)
}
// ErrBadTargets represents a failure to validate a targets (incl delegations)
type ErrBadTargets struct {
Msg string
}
func (err ErrBadTargets) Error() string {
return fmt.Sprintf("The targets metadata is invalid: %s", err.Msg)
}
// ErrBadSnapshot represents a failure to validate the snapshot
type ErrBadSnapshot struct {
Msg string
}
func (err ErrBadSnapshot) Error() string {
return fmt.Sprintf("The snapshot metadata is invalid: %s", err.Msg)
}
// END VALIDATION ERRORS
// SerializableError is a struct that can be used to serialize an error as JSON
type SerializableError struct {
Name string
Error error
}
// UnmarshalJSON attempts to unmarshal the error into the right type
func (s *SerializableError) UnmarshalJSON(text []byte) (err error) {
var x struct{ Name string }
err = json.Unmarshal(text, &x)
if err != nil {
return
}
var theError error
switch x.Name {
case "ErrValidation":
var e struct{ Error ErrValidation }
err = json.Unmarshal(text, &e)
theError = e.Error
case "ErrBadHierarchy":
var e struct{ Error ErrBadHierarchy }
err = json.Unmarshal(text, &e)
theError = e.Error
case "ErrBadRoot":
var e struct{ Error ErrBadRoot }
err = json.Unmarshal(text, &e)
theError = e.Error
case "ErrBadTargets":
var e struct{ Error ErrBadTargets }
err = json.Unmarshal(text, &e)
theError = e.Error
case "ErrBadSnapshot":
var e struct{ Error ErrBadSnapshot }
err = json.Unmarshal(text, &e)
theError = e.Error
default:
err = fmt.Errorf("do not know how to unmarshal %s", x.Name)
return
}
if err != nil {
return
}
s.Name = x.Name
s.Error = theError
return nil
}
// NewSerializableError serializes one of the above errors into JSON
func NewSerializableError(err error) (*SerializableError, error) {
// make sure it's one of our errors
var name string
switch err.(type) {
case ErrValidation:
name = "ErrValidation"
case ErrBadHierarchy:
name = "ErrBadHierarchy"
case ErrBadRoot:
name = "ErrBadRoot"
case ErrBadTargets:
name = "ErrBadTargets"
case ErrBadSnapshot:
name = "ErrBadSnapshot"
default:
return nil, fmt.Errorf("does not support serializing non-validation errors")
}
return &SerializableError{Name: name, Error: err}, nil
}
// Copyright The Notary Project Authors.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package cose
import (
"crypto"
"crypto/rand"
"crypto/x509"
"errors"
"fmt"
"io"
"strconv"
"time"
"github.com/fxamacker/cbor/v2"
"github.com/notaryproject/notation-core-go/signature"
"github.com/notaryproject/notation-core-go/signature/internal/base"
"github.com/veraison/go-cose"
)
// MediaTypeEnvelope is the COSE signature envelope blob mediaType.
const MediaTypeEnvelope = "application/cose"
var (
// encMode is the encoding mode used in Sign
encMode cbor.EncMode
// decMode is the decoding mode used in Content
decMode cbor.DecMode
)
func init() {
err := signature.RegisterEnvelopeType(MediaTypeEnvelope, NewEnvelope, ParseEnvelope)
if err != nil {
panic(err)
}
encOpts := cbor.EncOptions{
Time: cbor.TimeUnix,
TimeTag: cbor.EncTagRequired,
}
encMode, err = encOpts.EncMode()
if err != nil {
panic(err)
}
decOpts := cbor.DecOptions{
TimeTag: cbor.DecTagRequired,
}
decMode, err = decOpts.DecMode()
if err != nil {
panic(err)
}
}
// Protected Headers
// https://github.com/notaryproject/notaryproject/blob/cose-envelope/signature-envelope-cose.md
const (
headerLabelExpiry = "io.cncf.notary.expiry"
headerLabelSigningScheme = "io.cncf.notary.signingScheme"
headerLabelSigningTime = "io.cncf.notary.signingTime"
headerLabelAuthenticSigningTime = "io.cncf.notary.authenticSigningTime"
)
// Unprotected Headers
// https://github.com/notaryproject/notaryproject/blob/cose-envelope/signature-envelope-cose.md
const (
headerLabelTimeStampSignature = "io.cncf.notary.timestampSignature"
headerLabelSigningAgent = "io.cncf.notary.signingAgent"
)
// Map of cose.Algorithm to signature.Algorithm
var coseAlgSignatureAlgMap = map[cose.Algorithm]signature.Algorithm{
cose.AlgorithmPS256: signature.AlgorithmPS256,
cose.AlgorithmPS384: signature.AlgorithmPS384,
cose.AlgorithmPS512: signature.AlgorithmPS512,
cose.AlgorithmES256: signature.AlgorithmES256,
cose.AlgorithmES384: signature.AlgorithmES384,
cose.AlgorithmES512: signature.AlgorithmES512,
}
// Map of signingScheme to signingTime header label
var signingSchemeTimeLabelMap = map[signature.SigningScheme]string{
signature.SigningSchemeX509: headerLabelSigningTime,
signature.SigningSchemeX509SigningAuthority: headerLabelAuthenticSigningTime,
}
// signer interface is a cose.Signer with certificate chain fetcher.
type signer interface {
cose.Signer
CertificateChain() []*x509.Certificate
}
// remoteSigner implements signer interface.
// It is used in Sign process when base's Sign implementation is desired.
type remoteSigner struct {
base signature.Signer
alg cose.Algorithm
certs []*x509.Certificate
}
func newRemoteSigner(base signature.Signer) (*remoteSigner, error) {
keySpec, err := base.KeySpec()
if err != nil {
return nil, err
}
alg, err := getSignatureAlgorithmFromKeySpec(keySpec)
if err != nil {
return nil, err
}
return &remoteSigner{
base: base,
alg: alg,
}, nil
}
// Algorithm implements cose.Signer interface.
func (signer *remoteSigner) Algorithm() cose.Algorithm {
return signer.alg
}
// Sign implements cose.Signer interface.
func (signer *remoteSigner) Sign(rand io.Reader, payload []byte) ([]byte, error) {
signature, certs, err := signer.base.Sign(payload)
if err != nil {
return nil, err
}
signer.certs = certs
return signature, nil
}
// CertificateChain implements signer interface.
func (signer *remoteSigner) CertificateChain() []*x509.Certificate {
return signer.certs
}
type localSigner struct {
cose.Signer
certs []*x509.Certificate
}
func newLocalSigner(base signature.LocalSigner) (*localSigner, error) {
key := base.PrivateKey()
if cryptoSigner, ok := key.(crypto.Signer); ok {
certs, err := base.CertificateChain()
if err != nil {
return nil, err
}
keySpec, err := base.KeySpec()
if err != nil {
return nil, err
}
alg, err := getSignatureAlgorithmFromKeySpec(keySpec)
if err != nil {
return nil, err
}
coseSigner, err := cose.NewSigner(alg, cryptoSigner)
if err != nil {
return nil, err
}
return &localSigner{
Signer: coseSigner,
certs: certs,
}, nil
}
return nil, &signature.UnsupportedSigningKeyError{}
}
// CertificateChain implements signer interface.
func (signer *localSigner) CertificateChain() []*x509.Certificate {
return signer.certs
}
type envelope struct {
base *cose.Sign1Message
}
// NewEnvelope initializes an empty COSE signature envelope.
func NewEnvelope() signature.Envelope {
return &base.Envelope{
Envelope: &envelope{},
}
}
// ParseEnvelope parses envelopeBytes to a COSE signature envelope.
func ParseEnvelope(envelopeBytes []byte) (signature.Envelope, error) {
var msg cose.Sign1Message
if err := msg.UnmarshalCBOR(envelopeBytes); err != nil {
return nil, &signature.InvalidSignatureError{Msg: err.Error()}
}
return &base.Envelope{
Envelope: &envelope{
base: &msg,
},
Raw: envelopeBytes,
}, nil
}
// Sign implements signature.Envelope interface.
// On success, this function returns the COSE signature envelope byte slice.
func (e *envelope) Sign(req *signature.SignRequest) ([]byte, error) {
// get built-in signer from go-cose or remote signer based on req.Signer
signer, err := getSigner(req.Signer)
if err != nil {
return nil, &signature.InvalidSignRequestError{Msg: err.Error()}
}
// prepare COSE_Sign1 message
msg := cose.NewSign1Message()
// generate protected headers of COSE envelope
msg.Headers.Protected.SetAlgorithm(signer.Algorithm())
if err := generateProtectedHeaders(req, msg.Headers.Protected); err != nil {
return nil, &signature.InvalidSignRequestError{Msg: err.Error()}
}
// generate payload of COSE envelope
msg.Headers.Protected[cose.HeaderLabelContentType] = req.Payload.ContentType
msg.Payload = req.Payload.Content
// core sign process, generate signature of COSE envelope
if err := msg.Sign(rand.Reader, nil, signer); err != nil {
return nil, &signature.InvalidSignRequestError{Msg: err.Error()}
}
// generate unprotected headers of COSE envelope
generateUnprotectedHeaders(req, signer, msg.Headers.Unprotected)
// TODO: needs to add headerKeyTimeStampSignature.
// encode Sign1Message into COSE_Sign1_Tagged object
encoded, err := msg.MarshalCBOR()
if err != nil {
return nil, &signature.InvalidSignatureError{Msg: err.Error()}
}
e.base = msg
return encoded, nil
}
// Verify implements signature.Envelope interface.
// Note: Verfiy only verifies integrity of the given COSE envelope.
func (e *envelope) Verify() (*signature.EnvelopeContent, error) {
// sanity check
if e.base == nil {
return nil, &signature.SignatureEnvelopeNotFoundError{}
}
certs, ok := e.base.Headers.Unprotected[cose.HeaderLabelX5Chain].([]any)
if !ok || len(certs) == 0 {
return nil, &signature.InvalidSignatureError{Msg: "certificate chain is not present"}
}
certRaw, ok := certs[0].([]byte)
if !ok {
return nil, &signature.InvalidSignatureError{Msg: "COSE envelope malformed leaf certificate"}
}
cert, err := x509.ParseCertificate(certRaw)
if err != nil {
return nil, &signature.InvalidSignatureError{Msg: "malformed leaf certificate"}
}
// core verify process, verify integrity of COSE envelope
publicKeyAlg, err := getSignatureAlgorithm(cert)
if err != nil {
return nil, &signature.InvalidSignatureError{Msg: err.Error()}
}
verifier, err := cose.NewVerifier(publicKeyAlg, cert.PublicKey)
if err != nil {
return nil, &signature.InvalidSignatureError{Msg: err.Error()}
}
err = e.base.Verify(nil, verifier)
if err != nil {
return nil, &signature.SignatureIntegrityError{Err: err}
}
// extract content
return e.Content()
}
// Content implements signature.Envelope interface.
func (e *envelope) Content() (*signature.EnvelopeContent, error) {
// sanity check
if e.base == nil {
return nil, &signature.SignatureEnvelopeNotFoundError{}
}
payload, err := e.payload()
if err != nil {
return nil, err
}
signerInfo, err := e.signerInfo()
if err != nil {
return nil, err
}
return &signature.EnvelopeContent{
SignerInfo: *signerInfo,
Payload: *payload,
}, nil
}
// Given a COSE envelope, extracts its signature.Payload.
func (e *envelope) payload() (*signature.Payload, error) {
cty, ok := e.base.Headers.Protected[cose.HeaderLabelContentType]
if !ok {
return nil, &signature.InvalidSignatureError{Msg: "missing content type"}
}
var contentType string
if contentType, ok = cty.(string); !ok {
return nil, &signature.InvalidSignatureError{Msg: "content type should be of 'tstr' type"}
}
return &signature.Payload{
ContentType: contentType,
Content: e.base.Payload,
}, nil
}
// Given a COSE envelope, extracts its signature.SignerInfo.
func (e *envelope) signerInfo() (*signature.SignerInfo, error) {
var signerInfo signature.SignerInfo
// parse signature of COSE envelope, populate signerInfo.Signature
sig := e.base.Signature
if len(sig) == 0 {
return nil, &signature.InvalidSignatureError{Msg: "signature missing in COSE envelope"}
}
signerInfo.Signature = sig
// parse protected headers of COSE envelope and populate related
// signerInfo fields
err := parseProtectedHeaders(e.base.Headers.RawProtected, e.base.Headers.Protected, &signerInfo)
if err != nil {
return nil, &signature.InvalidSignatureError{Msg: err.Error()}
}
// parse unprotected headers of COSE envelope
certs, ok := e.base.Headers.Unprotected[cose.HeaderLabelX5Chain].([]any)
if !ok || len(certs) == 0 {
return nil, &signature.InvalidSignatureError{Msg: "certificate chain is not present"}
}
var certChain []*x509.Certificate
for _, c := range certs {
certRaw, ok := c.([]byte)
if !ok {
return nil, &signature.InvalidSignatureError{Msg: "certificate chain is not present"}
}
cert, err := x509.ParseCertificate(certRaw)
if err != nil {
return nil, &signature.InvalidSignatureError{Msg: err.Error()}
}
certChain = append(certChain, cert)
}
// populate signerInfo.CertificateChain
signerInfo.CertificateChain = certChain
// populate signerInfo.UnsignedAttributes.SigningAgent
if h, ok := e.base.Headers.Unprotected[headerLabelSigningAgent].(string); ok {
signerInfo.UnsignedAttributes.SigningAgent = h
}
// TODO: needs to add headerKeyTimeStampSignature.
return &signerInfo, nil
}
// getSignatureAlgorithm picks up a recommended signing algorithm for given
// certificate.
func getSignatureAlgorithm(signingCert *x509.Certificate) (cose.Algorithm, error) {
keySpec, err := signature.ExtractKeySpec(signingCert)
if err != nil {
return 0, err
}
return getSignatureAlgorithmFromKeySpec(keySpec)
}
// getSignatureAlgorithmFromKeySpec ensures the signing algorithm satisfies
// algorithm requirements.
func getSignatureAlgorithmFromKeySpec(keySpec signature.KeySpec) (cose.Algorithm, error) {
switch keySpec.Type {
case signature.KeyTypeRSA:
switch keySpec.Size {
case 2048:
return cose.AlgorithmPS256, nil
case 3072:
return cose.AlgorithmPS384, nil
case 4096:
return cose.AlgorithmPS512, nil
default:
return 0, &signature.UnsupportedSigningKeyError{Msg: fmt.Sprintf("RSA: key size %d not supported", keySpec.Size)}
}
case signature.KeyTypeEC:
switch keySpec.Size {
case 256:
return cose.AlgorithmES256, nil
case 384:
return cose.AlgorithmES384, nil
case 521:
return cose.AlgorithmES512, nil
default:
return 0, &signature.UnsupportedSigningKeyError{Msg: fmt.Sprintf("EC: key size %d not supported", keySpec.Size)}
}
default:
return 0, &signature.UnsupportedSigningKeyError{Msg: "key type not supported"}
}
}
// getSigner returns the built-in implementation of cose.Signer from go-cose
// or a remote signer implementation of cose.Signer.
func getSigner(signer signature.Signer) (signer, error) {
if localSigner, ok := signer.(signature.LocalSigner); ok {
return newLocalSigner(localSigner)
}
return newRemoteSigner(signer)
}
// generateProtectedHeaders creates Protected Headers of the COSE envelope
// during Sign process.
func generateProtectedHeaders(req *signature.SignRequest, protected cose.ProtectedHeader) error {
// signingScheme
crit := []any{headerLabelSigningScheme}
protected[headerLabelSigningScheme] = string(req.SigningScheme)
// signingTime/authenticSigningTime
signingTimeLabel, ok := signingSchemeTimeLabelMap[req.SigningScheme]
if !ok {
return &signature.InvalidSignRequestError{Msg: "signing scheme: require notary.x509 or notary.x509.signingAuthority"}
}
rawTimeCBOR, err := encodeTime(req.SigningTime)
if err != nil {
return &signature.InvalidSignRequestError{Msg: fmt.Sprintf("signing time: %q", err)}
}
protected[signingTimeLabel] = rawTimeCBOR
if signingTimeLabel == headerLabelAuthenticSigningTime {
crit = append(crit, headerLabelAuthenticSigningTime)
}
// expiry
if !req.Expiry.IsZero() {
crit = append(crit, headerLabelExpiry)
rawExpiryCBOR, err := encodeTime(req.Expiry)
if err != nil {
return &signature.InvalidSignRequestError{Msg: fmt.Sprintf("expiry: %q", err)}
}
protected[headerLabelExpiry] = rawExpiryCBOR
}
// extended attributes
for _, elm := range req.ExtendedSignedAttributes {
if _, ok := protected[elm.Key]; ok {
return &signature.InvalidSignRequestError{Msg: fmt.Sprintf("%q already exists in the protected header", elm.Key)}
}
if elm.Critical {
crit = append(crit, elm.Key)
}
protected[elm.Key] = elm.Value
}
// critical headers
protected[cose.HeaderLabelCritical] = crit
return nil
}
// generateUnprotectedHeaders creates Unprotected Headers of the COSE envelope
// during Sign process.
func generateUnprotectedHeaders(req *signature.SignRequest, signer signer, unprotected cose.UnprotectedHeader) {
// signing agent
if req.SigningAgent != "" {
unprotected[headerLabelSigningAgent] = req.SigningAgent
}
// certChain
certs := signer.CertificateChain()
certChain := make([]any, len(certs))
for i, c := range certs {
certChain[i] = c.Raw
}
unprotected[cose.HeaderLabelX5Chain] = certChain
}
// parseProtectedHeaders parses COSE envelope's protected headers and
// populates signature.SignerInfo.
func parseProtectedHeaders(rawProtected cbor.RawMessage, protected cose.ProtectedHeader, signerInfo *signature.SignerInfo) error {
// validate critical headers and return extendedAttributeKeys
extendedAttributeKeys, err := validateCritHeaders(protected)
if err != nil {
return err
}
// populate signerInfo.SignatureAlgorithm
alg, err := protected.Algorithm()
if err != nil {
return err
}
sigAlg, ok := coseAlgSignatureAlgMap[alg]
if !ok {
return &signature.InvalidSignatureError{Msg: "signature algorithm not supported: " + strconv.Itoa(int(alg))}
}
signerInfo.SignatureAlgorithm = sigAlg
// populate signerInfo.SignedAttributes.SigningScheme
// headerLabelSigningScheme header has already been checked by
// validateCritHeaders() at the beginning of this function.
signingSchemeString := protected[headerLabelSigningScheme].(string)
signingScheme := signature.SigningScheme(signingSchemeString)
signerInfo.SignedAttributes.SigningScheme = signingScheme
signingTimeLabel, ok := signingSchemeTimeLabelMap[signingScheme]
if !ok {
return &signature.InvalidSignatureError{Msg: "unsupported signingScheme: " + signingSchemeString}
}
// parse CBOR map from raw protected header for tag validation
headerMap, err := generateRawProtectedCBORMap(rawProtected)
if err != nil {
return &signature.InvalidSignatureError{Msg: "generateRawProtectedCBORMap failed: " + err.Error()}
}
// populate signerInfo.SignedAttributes.SigningTime
signingTime, err := parseTime(headerMap, signingTimeLabel, protected)
if err != nil {
return &signature.InvalidSignatureError{Msg: fmt.Sprintf("invalid signingTime: %v", err)}
}
signerInfo.SignedAttributes.SigningTime = signingTime
// populate signerInfo.SignedAttributes.Expiry
if _, ok := protected[headerLabelExpiry]; ok {
expiry, err := parseTime(headerMap, headerLabelExpiry, protected)
if err != nil {
return &signature.InvalidSignatureError{Msg: fmt.Sprintf("invalid expiry: %v", err)}
}
signerInfo.SignedAttributes.Expiry = expiry
}
// populate signerInfo.SignedAttributes.ExtendedAttributes
signerInfo.SignedAttributes.ExtendedAttributes, err = generateExtendedAttributes(extendedAttributeKeys, protected)
return err
}
// validateCritHeaders does a two-way check, namely:
// 1. validate that all critical headers are present in the protected bucket
// 2. validate that all required headers(as per spec) are marked critical
// Returns list of extended attribute keys
func validateCritHeaders(protected cose.ProtectedHeader) ([]any, error) {
// This ensures all critical headers are present in the protected bucket.
labels, err := protected.Critical()
if err != nil {
return nil, err
}
// set of headers that must be marked as crit
mustMarkedCrit := make(map[any]struct{})
mustMarkedCrit[headerLabelSigningScheme] = struct{}{}
signingScheme, ok := protected[headerLabelSigningScheme].(string)
if !ok {
return nil, &signature.InvalidSignatureError{Msg: "invalid signingScheme"}
}
if signature.SigningScheme(signingScheme) == signature.SigningSchemeX509SigningAuthority {
mustMarkedCrit[headerLabelAuthenticSigningTime] = struct{}{}
}
if _, ok := protected[headerLabelExpiry]; ok {
mustMarkedCrit[headerLabelExpiry] = struct{}{}
}
// validate that all required headers(as per spec) are marked as critical
for _, label := range labels {
delete(mustMarkedCrit, label)
}
if len(mustMarkedCrit) != 0 {
headers := make([]any, 0, len(mustMarkedCrit))
for k := range mustMarkedCrit {
headers = append(headers, k)
}
return nil, &signature.InvalidSignatureError{Msg: fmt.Sprintf("these required headers are not marked as critical: %v", headers)}
}
// fetch all the extended signed attributes
systemHeaders := []any{cose.HeaderLabelAlgorithm, cose.HeaderLabelCritical, cose.HeaderLabelContentType,
headerLabelExpiry, headerLabelSigningScheme, headerLabelSigningTime, headerLabelAuthenticSigningTime}
var extendedAttributeKeys []any
for label := range protected {
if contains(systemHeaders, label) {
continue
}
extendedAttributeKeys = append(extendedAttributeKeys, label)
}
return extendedAttributeKeys, nil
}
// generateExtendedAttributes generates []signature.Attribute during
// SignerInfo process.
func generateExtendedAttributes(extendedAttributeKeys []any, protected cose.ProtectedHeader) ([]signature.Attribute, error) {
criticalHeaders, ok := protected[cose.HeaderLabelCritical].([]any)
if !ok {
return nil, &signature.InvalidSignatureError{Msg: "invalid critical headers"}
}
var extendedAttr []signature.Attribute
for _, key := range extendedAttributeKeys {
extendedAttr = append(extendedAttr, signature.Attribute{
Key: key,
Critical: contains(criticalHeaders, key),
Value: protected[key],
})
}
return extendedAttr, nil
}
// contains checks if e is in s
func contains(s []any, e any) bool {
for _, a := range s {
if a == e {
return true
}
}
return false
}
// encodeTime generates a Tag1 Datetime CBOR object and casts it to
// cbor.RawMessage
func encodeTime(t time.Time) (cbor.RawMessage, error) {
timeCBOR, err := encMode.Marshal(t)
if err != nil {
return nil, err
}
return cbor.RawMessage(timeCBOR), nil
}
// decodeTime decodes cbor.RawMessage of Tag1 Datetime CBOR object
// into time.Time
//
// For more details: https://github.com/fxamacker/cbor/blob/7704fa5efaf3ef4ac35aff38f50f6ff567793072/decode.go#L52
func decodeTime(timeRaw cbor.RawMessage) (time.Time, error) {
var t time.Time
err := decMode.Unmarshal([]byte(timeRaw), &t)
if err != nil {
return time.Time{}, err
}
return t, nil
}
// parseTime validates Tag1 Datetime in headerMap given label, then returns
// time.Time value from cose.ProtectedHeader.
func parseTime(headerMap map[any]cbor.RawMessage, label string, protected cose.ProtectedHeader) (time.Time, error) {
switch t := protected[label].(type) {
// cbor.RawMessage indicates the signing process.
case cbor.RawMessage:
return decodeTime(t)
// time.Time indicates the verififcation process.
// only need to validate Tag1 Datetime during verification.
case time.Time:
rawMsg, ok := headerMap[label]
if !ok {
return time.Time{}, fmt.Errorf("headerMap is missing label %q", label)
}
rawTag := &cbor.RawTag{}
err := rawTag.UnmarshalCBOR([]byte(rawMsg))
if err != nil {
return time.Time{}, fmt.Errorf("header %q time value does not have a tag", label)
}
if rawTag.Number != 1 {
return time.Time{}, errors.New("only Tag `1` Datetime CBOR object is supported")
}
return t, nil
case nil:
return time.Time{}, fmt.Errorf("protected header %q is missing", label)
}
return time.Time{}, errors.New("invalid timeValue type")
}
// generateRawProtectedCBORMap unmarshals rawProtected Header of COSE
// envelope into a headerMap.
func generateRawProtectedCBORMap(rawProtected cbor.RawMessage) (map[any]cbor.RawMessage, error) {
// empty rawProtected indicates signing process
if len(rawProtected) == 0 {
return nil, nil
}
var decoded []byte
err := decMode.Unmarshal(rawProtected, &decoded)
if err != nil {
return nil, err
}
var headerMap map[any]cbor.RawMessage
err = cbor.Unmarshal(decoded, &headerMap)
if err != nil {
return nil, err
}
return headerMap, nil
}
// Copyright 2023 the cncf-fuzzing authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package cose
import (
"testing"
)
func FuzzSignatureCose(f *testing.F) {
f.Fuzz(func(t *testing.T, envelopeBytes []byte, shouldVerify bool) {
e, err := ParseEnvelope(envelopeBytes)
if err != nil {
t.Skip()
}
if shouldVerify {
_, _ = e.Verify()
} else {
_, _ = e.Content()
}
})
}
// Copyright The Notary Project Authors.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package jws
import (
"crypto/x509"
"encoding/base64"
"encoding/json"
"fmt"
"github.com/golang-jwt/jwt/v4"
"github.com/notaryproject/notation-core-go/signature"
"github.com/notaryproject/notation-core-go/signature/internal/base"
)
// MediaTypeEnvelope defines the media type name of JWS envelope.
const MediaTypeEnvelope = "application/jose+json"
func init() {
if err := signature.RegisterEnvelopeType(MediaTypeEnvelope, NewEnvelope, ParseEnvelope); err != nil {
panic(err)
}
}
type envelope struct {
base *jwsEnvelope
}
// NewEnvelope generates an JWS envelope.
func NewEnvelope() signature.Envelope {
return &base.Envelope{
Envelope: &envelope{},
}
}
// ParseEnvelope parses the envelope bytes and return a JWS envelope.
func ParseEnvelope(envelopeBytes []byte) (signature.Envelope, error) {
var e jwsEnvelope
err := json.Unmarshal(envelopeBytes, &e)
if err != nil {
return nil, &signature.InvalidSignatureError{Msg: err.Error()}
}
return &base.Envelope{
Envelope: &envelope{base: &e},
Raw: envelopeBytes,
}, nil
}
// Sign generates and sign the envelope according to the sign request.
func (e *envelope) Sign(req *signature.SignRequest) ([]byte, error) {
// get signingMethod for JWT package
method, err := getSigningMethod(req.Signer)
if err != nil {
return nil, &signature.InvalidSignRequestError{Msg: err.Error()}
}
// get all attributes ready to be signed
signedAttrs, err := getSignedAttributes(req, method.Alg())
if err != nil {
return nil, &signature.InvalidSignRequestError{Msg: err.Error()}
}
// parse payload as jwt.MapClaims
// [jwt-go]: https://pkg.go.dev/github.com/dgrijalva/jwt-go#MapClaims
var payload jwt.MapClaims
if err = json.Unmarshal(req.Payload.Content, &payload); err != nil {
return nil, &signature.InvalidSignRequestError{
Msg: fmt.Sprintf("payload format error: %v", err.Error())}
}
// JWT sign and get certificate chain
compact, certs, err := sign(payload, signedAttrs, method)
if err != nil {
return nil, &signature.InvalidSignRequestError{Msg: err.Error()}
}
// generate envelope
env, err := generateJWS(compact, req, certs)
if err != nil {
return nil, &signature.InvalidSignatureError{Msg: err.Error()}
}
encoded, err := json.Marshal(env)
if err != nil {
return nil, &signature.InvalidSignatureError{Msg: err.Error()}
}
e.base = env
return encoded, nil
}
// Verify verifies the envelope and returns its enclosed payload and signer info.
func (e *envelope) Verify() (*signature.EnvelopeContent, error) {
if e.base == nil {
return nil, &signature.SignatureEnvelopeNotFoundError{}
}
if len(e.base.Header.CertChain) == 0 {
return nil, &signature.InvalidSignatureError{Msg: "certificate chain is not present"}
}
cert, err := x509.ParseCertificate(e.base.Header.CertChain[0])
if err != nil {
return nil, &signature.InvalidSignatureError{Msg: "malformed leaf certificate"}
}
// verify JWT
compact := compactJWS(e.base)
if err = verifyJWT(compact, cert.PublicKey); err != nil {
return nil, err
}
return e.Content()
}
// Content returns the payload and signer information of the envelope.
// Content is trusted only after the successful call to `Verify()`.
func (e *envelope) Content() (*signature.EnvelopeContent, error) {
if e.base == nil {
return nil, &signature.SignatureEnvelopeNotFoundError{}
}
// parse protected headers
protected, err := parseProtectedHeaders(e.base.Protected)
if err != nil {
return nil, err
}
// extract payload
payload, err := e.payload(protected)
if err != nil {
return nil, err
}
// extract signer info
signerInfo, err := e.signerInfo(protected)
if err != nil {
return nil, err
}
return &signature.EnvelopeContent{
SignerInfo: *signerInfo,
Payload: *payload,
}, nil
}
// payload returns the payload of JWS envelope.
func (e *envelope) payload(protected *jwsProtectedHeader) (*signature.Payload, error) {
payload, err := base64.RawURLEncoding.DecodeString(e.base.Payload)
if err != nil {
return nil, &signature.InvalidSignatureError{
Msg: fmt.Sprintf("payload error: %v", err)}
}
return &signature.Payload{
Content: payload,
ContentType: protected.ContentType,
}, nil
}
// signerInfo returns the SignerInfo of JWS envelope.
func (e *envelope) signerInfo(protected *jwsProtectedHeader) (*signature.SignerInfo, error) {
var signerInfo signature.SignerInfo
// populate protected header to signerInfo
if err := populateProtectedHeaders(protected, &signerInfo); err != nil {
return nil, err
}
// parse signature
sig, err := base64.RawURLEncoding.DecodeString(e.base.Signature)
if err != nil {
return nil, &signature.InvalidSignatureError{Msg: err.Error()}
}
if len(sig) == 0 {
return nil, &signature.InvalidSignatureError{Msg: "signature missing in jws-json envelope"}
}
signerInfo.Signature = sig
// parse headers
var certs []*x509.Certificate
for _, certBytes := range e.base.Header.CertChain {
cert, err := x509.ParseCertificate(certBytes)
if err != nil {
return nil, &signature.InvalidSignatureError{Msg: err.Error()}
}
certs = append(certs, cert)
}
signerInfo.CertificateChain = certs
signerInfo.UnsignedAttributes.SigningAgent = e.base.Header.SigningAgent
signerInfo.UnsignedAttributes.TimestampSignature = e.base.Header.TimestampSignature
return &signerInfo, nil
}
// sign the given payload and headers using the given signature provider.
func sign(payload jwt.MapClaims, headers map[string]interface{}, method signingMethod) (string, []*x509.Certificate, error) {
// generate token
token := jwt.NewWithClaims(method, payload)
token.Header = headers
// sign and return compact JWS
compact, err := token.SignedString(method.PrivateKey())
if err != nil {
return "", nil, err
}
// access certificate chain after sign
certs, err := method.CertificateChain()
if err != nil {
return "", nil, err
}
return compact, certs, nil
}
// Copyright 2023 the cncf-fuzzing authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package jws
import (
"testing"
)
func FuzzSignatureJws(f *testing.F) {
f.Fuzz(func(t *testing.T, envelopeBytes []byte, shouldVerify bool) {
e, err := ParseEnvelope(envelopeBytes)
if err != nil {
t.Skip()
}
if shouldVerify {
_, _ = e.Verify()
} else {
_, _ = e.Content()
}
})
}
// Copyright The Notary Project Authors.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package jws
import (
"crypto/x509"
"encoding/base64"
"encoding/json"
"fmt"
"strings"
"github.com/notaryproject/notation-core-go/signature"
)
func parseProtectedHeaders(encoded string) (*jwsProtectedHeader, error) {
rawProtected, err := base64.RawURLEncoding.DecodeString(encoded)
if err != nil {
return nil, &signature.InvalidSignatureError{
Msg: fmt.Sprintf("jws envelope protected header can't be decoded: %s", err.Error())}
}
// To Unmarshal JSON with some known(jwsProtectedHeader), and some unknown(jwsProtectedHeader.ExtendedAttributes) field names.
// We unmarshal twice: once into a value of type jwsProtectedHeader and once into a value of type jwsProtectedHeader.ExtendedAttributes(map[string]interface{})
// and removing the keys are already been defined in jwsProtectedHeader.
var protected jwsProtectedHeader
if err = json.Unmarshal(rawProtected, &protected); err != nil {
return nil, &signature.InvalidSignatureError{
Msg: fmt.Sprintf("jws envelope protected header can't be decoded: %s", err.Error())}
}
if err = json.Unmarshal(rawProtected, &protected.ExtendedAttributes); err != nil {
return nil, &signature.InvalidSignatureError{
Msg: fmt.Sprintf("jws envelope protected header can't be decoded: %s", err.Error())}
}
// delete attributes that are already defined in jwsProtectedHeader.
for _, headerKey := range headerKeys {
delete(protected.ExtendedAttributes, headerKey)
}
return &protected, nil
}
func populateProtectedHeaders(protectedHeader *jwsProtectedHeader, signerInfo *signature.SignerInfo) error {
err := validateProtectedHeaders(protectedHeader)
if err != nil {
return err
}
if signerInfo.SignatureAlgorithm, err = getSignatureAlgorithm(protectedHeader.Algorithm); err != nil {
return err
}
signerInfo.SignedAttributes.ExtendedAttributes = getExtendedAttributes(protectedHeader.ExtendedAttributes, protectedHeader.Critical)
signerInfo.SignedAttributes.SigningScheme = protectedHeader.SigningScheme
if protectedHeader.Expiry != nil {
signerInfo.SignedAttributes.Expiry = *protectedHeader.Expiry
}
switch protectedHeader.SigningScheme {
case signature.SigningSchemeX509:
if protectedHeader.SigningTime != nil {
signerInfo.SignedAttributes.SigningTime = *protectedHeader.SigningTime
}
case signature.SigningSchemeX509SigningAuthority:
if protectedHeader.AuthenticSigningTime != nil {
signerInfo.SignedAttributes.SigningTime = *protectedHeader.AuthenticSigningTime
}
default:
return &signature.InvalidSignatureError{
Msg: fmt.Sprintf("unsupported SigningScheme: `%v`", protectedHeader.SigningScheme),
}
}
return nil
}
func validateProtectedHeaders(protectedHeader *jwsProtectedHeader) error {
// validate headers that should not be present as per signing schemes
switch protectedHeader.SigningScheme {
case signature.SigningSchemeX509:
if protectedHeader.AuthenticSigningTime != nil {
return &signature.InvalidSignatureError{Msg: fmt.Sprintf("%q header must not be present for %s signing scheme", headerKeyAuthenticSigningTime, signature.SigningSchemeX509)}
}
case signature.SigningSchemeX509SigningAuthority:
if protectedHeader.SigningTime != nil {
return &signature.InvalidSignatureError{Msg: fmt.Sprintf("%q header must not be present for %s signing scheme", headerKeySigningTime, signature.SigningSchemeX509SigningAuthority)}
}
if protectedHeader.AuthenticSigningTime == nil {
return &signature.InvalidSignatureError{Msg: fmt.Sprintf("%q header must be present for %s signing scheme", headerKeyAuthenticSigningTime, signature.SigningSchemeX509)}
}
default:
return &signature.InvalidSignatureError{Msg: fmt.Sprintf("unsupported SigningScheme: `%v`", protectedHeader.SigningScheme)}
}
return validateCriticalHeaders(protectedHeader)
}
func validateCriticalHeaders(protectedHeader *jwsProtectedHeader) error {
if len(protectedHeader.Critical) == 0 {
return &signature.InvalidSignatureError{Msg: `missing "crit" header`}
}
mustMarkedCrit := map[string]bool{headerKeySigningScheme: true}
if protectedHeader.Expiry != nil && !protectedHeader.Expiry.IsZero() {
mustMarkedCrit[headerKeyExpiry] = true
}
if protectedHeader.SigningScheme == signature.SigningSchemeX509SigningAuthority {
mustMarkedCrit[headerKeyAuthenticSigningTime] = true
}
for _, val := range protectedHeader.Critical {
if _, ok := mustMarkedCrit[val]; ok {
delete(mustMarkedCrit, val)
} else {
if _, ok := protectedHeader.ExtendedAttributes[val]; !ok {
return &signature.InvalidSignatureError{
Msg: fmt.Sprintf("%q header is marked critical but not present", val)}
}
}
}
// validate all required critical headers headers(as per spec) are marked as critical.
if len(mustMarkedCrit) != 0 {
// This is not taken care by VerifySignerInfo method
keys := make([]string, 0, len(mustMarkedCrit))
for k := range mustMarkedCrit {
keys = append(keys, k)
}
return &signature.InvalidSignatureError{Msg: fmt.Sprintf("these required headers are not marked as critical: %v", keys)}
}
return nil
}
func getSignatureAlgorithm(alg string) (signature.Algorithm, error) {
signatureAlg, ok := jwsAlgSignatureAlgMap[alg]
if !ok {
return 0, &signature.UnsupportedSignatureAlgoError{Alg: alg}
}
return signatureAlg, nil
}
func getExtendedAttributes(attrs map[string]interface{}, critical []string) []signature.Attribute {
extendedAttr := make([]signature.Attribute, 0, len(attrs))
for key, value := range attrs {
extendedAttr = append(extendedAttr, signature.Attribute{
Key: key,
Critical: contains(critical, key),
Value: value,
})
}
return extendedAttr
}
func contains(s []string, e string) bool {
for _, a := range s {
if a == e {
return true
}
}
return false
}
func generateJWS(compact string, req *signature.SignRequest, certs []*x509.Certificate) (*jwsEnvelope, error) {
parts := strings.Split(compact, ".")
if len(parts) != 3 {
// this should never happen
return nil, fmt.Errorf(
"unexpected error occurred while generating a JWS-JSON serialization from compact serialization. want: len(parts) == 3, got: len(parts) == %d", len(parts))
}
rawCerts := make([][]byte, len(certs))
for i, cert := range certs {
rawCerts[i] = cert.Raw
}
return &jwsEnvelope{
Protected: parts[0],
Payload: parts[1],
Signature: parts[2],
Header: jwsUnprotectedHeader{
CertChain: rawCerts,
SigningAgent: req.SigningAgent,
},
}, nil
}
// getSignerAttributes merge extended signed attributes and protected header to be signed attributes.
func getSignedAttributes(req *signature.SignRequest, algorithm string) (map[string]interface{}, error) {
extAttrs := make(map[string]interface{})
crit := []string{headerKeySigningScheme}
// write extended signed attributes to the extAttrs map
for _, elm := range req.ExtendedSignedAttributes {
key, ok := elm.Key.(string)
if !ok {
return nil, &signature.InvalidSignRequestError{Msg: "JWS envelope format only supports key of type string"}
}
if _, ok := extAttrs[key]; ok {
return nil, &signature.InvalidSignRequestError{Msg: fmt.Sprintf("%q already exists in the extAttrs", key)}
}
extAttrs[key] = elm.Value
if elm.Critical {
crit = append(crit, key)
}
}
jwsProtectedHeader := jwsProtectedHeader{
Algorithm: algorithm,
ContentType: req.Payload.ContentType,
SigningScheme: req.SigningScheme,
}
switch req.SigningScheme {
case signature.SigningSchemeX509:
jwsProtectedHeader.SigningTime = &req.SigningTime
case signature.SigningSchemeX509SigningAuthority:
crit = append(crit, headerKeyAuthenticSigningTime)
jwsProtectedHeader.AuthenticSigningTime = &req.SigningTime
default:
return nil, fmt.Errorf("unsupported SigningScheme: `%v`", req.SigningScheme)
}
if !req.Expiry.IsZero() {
crit = append(crit, headerKeyExpiry)
jwsProtectedHeader.Expiry = &req.Expiry
}
jwsProtectedHeader.Critical = crit
m, err := convertToMap(jwsProtectedHeader)
if err != nil {
return nil, fmt.Errorf("unexpected error occurred while creating protected headers, Error: %s", err.Error())
}
return mergeMaps(m, extAttrs)
}
func convertToMap(i interface{}) (map[string]interface{}, error) {
s, err := json.Marshal(i)
if err != nil {
return nil, err
}
var m map[string]interface{}
err = json.Unmarshal(s, &m)
return m, err
}
func mergeMaps(maps ...map[string]interface{}) (map[string]interface{}, error) {
result := make(map[string]interface{})
for _, m := range maps {
for k, v := range m {
if _, ok := result[k]; ok {
return nil, fmt.Errorf("attribute key:%s repeated", k)
}
result[k] = v
}
}
return result, nil
}
// compactJWS converts Flattened JWS JSON Serialization Syntax (section-7.2.2) to
// JWS Compact Serialization (section-7.1)
//
// [RFC 7515]: https://www.rfc-editor.org/rfc/rfc7515.html
func compactJWS(envelope *jwsEnvelope) string {
return strings.Join([]string{
envelope.Protected,
envelope.Payload,
envelope.Signature}, ".")
}
// Copyright The Notary Project Authors.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package jws
import (
"crypto"
"crypto/x509"
"encoding/base64"
"errors"
"fmt"
"github.com/golang-jwt/jwt/v4"
"github.com/notaryproject/notation-core-go/signature"
)
// signingMethod is the interface for jwt.SigingMethod with additional method to
// access certificate chain after calling Sign().
type signingMethod interface {
jwt.SigningMethod
// CertificateChain returns the certificate chain.
//
// It should be called after calling Sign().
CertificateChain() ([]*x509.Certificate, error)
// PrivateKey returns the private key.
PrivateKey() crypto.PrivateKey
}
// remoteSigningMethod wraps the remote signer to be a SigningMethod.
type remoteSigningMethod struct {
signer signature.Signer
certs []*x509.Certificate
algorithm string
}
func newRemoteSigningMethod(signer signature.Signer) (signingMethod, error) {
algorithm, err := extractJwtAlgorithm(signer)
if err != nil {
return nil, err
}
return &remoteSigningMethod{
signer: signer,
algorithm: algorithm,
}, nil
}
// Verify doesn't need to be implemented.
func (s *remoteSigningMethod) Verify(signingString, signature string, key interface{}) error {
return errors.New("not implemented")
}
// Sign hashes the signingString and call the remote signer to sign the digest.
func (s *remoteSigningMethod) Sign(signingString string, key interface{}) (string, error) {
// sign by external signer
sig, certs, err := s.signer.Sign([]byte(signingString))
if err != nil {
return "", err
}
s.certs = certs
return base64.RawURLEncoding.EncodeToString(sig), nil
}
// Alg implements jwt.SigningMethod interface.
func (s *remoteSigningMethod) Alg() string {
return s.algorithm
}
// CertificateChain returns the certificate chain.
//
// It should be called after Sign().
func (s *remoteSigningMethod) CertificateChain() ([]*x509.Certificate, error) {
if s.certs == nil {
return nil, &signature.InvalidSignRequestError{Msg: "certificate chain is not set"}
}
return s.certs, nil
}
// PrivateKey returns nil for remote signer.
func (s *remoteSigningMethod) PrivateKey() crypto.PrivateKey {
return nil
}
// localSigningMethod wraps the local signer to be a SigningMethod.
type localSigningMethod struct {
jwt.SigningMethod
signer signature.LocalSigner
}
func newLocalSigningMethod(signer signature.LocalSigner) (signingMethod, error) {
alg, err := extractJwtAlgorithm(signer)
if err != nil {
return nil, err
}
return &localSigningMethod{
SigningMethod: jwt.GetSigningMethod(alg),
signer: signer,
}, nil
}
// CertificateChain returns the certificate chain.
func (s *localSigningMethod) CertificateChain() ([]*x509.Certificate, error) {
return s.signer.CertificateChain()
}
// PrivateKey returns the private key.
func (s *localSigningMethod) PrivateKey() crypto.PrivateKey {
return s.signer.PrivateKey()
}
// getSigningMethod return signingMethod for the given signer.
func getSigningMethod(signer signature.Signer) (signingMethod, error) {
if localSigner, ok := signer.(signature.LocalSigner); ok {
// for local signer
return newLocalSigningMethod(localSigner)
}
// for remote signer
return newRemoteSigningMethod(signer)
}
// verifyJWT verifies the JWT token against the specified verification key.
func verifyJWT(tokenString string, publicKey interface{}) error {
parser := jwt.NewParser(
jwt.WithValidMethods(validMethods),
jwt.WithJSONNumber(),
jwt.WithoutClaimsValidation(),
)
if _, err := parser.Parse(tokenString, func(t *jwt.Token) (interface{}, error) {
return publicKey, nil
}); err != nil {
return &signature.SignatureIntegrityError{Err: err}
}
return nil
}
func extractJwtAlgorithm(signer signature.Signer) (string, error) {
// extract algorithm from signer
keySpec, err := signer.KeySpec()
if err != nil {
return "", err
}
alg := keySpec.SignatureAlgorithm()
// converts the signature.Algorithm to be jwt package defined
// algorithm name.
jwsAlg, ok := signatureAlgJWSAlgMap[alg]
if !ok {
return "", &signature.UnsupportedSignatureAlgoError{
Alg: fmt.Sprintf("#%d", alg)}
}
return jwsAlg, nil
}
// Copyright The Notary Project Authors.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package jws
import (
"time"
"github.com/golang-jwt/jwt/v4"
"github.com/notaryproject/notation-core-go/signature"
)
const (
headerKeyAlg = "alg"
headerKeyCty = "cty"
headerKeyCrit = "crit"
headerKeyExpiry = "io.cncf.notary.expiry"
headerKeySigningTime = "io.cncf.notary.signingTime"
headerKeySigningScheme = "io.cncf.notary.signingScheme"
headerKeyAuthenticSigningTime = "io.cncf.notary.authenticSigningTime"
)
// headerKeys includes all system aware keys for JWS protected header
// [JWS envelope]: https://github.com/notaryproject/notaryproject/blob/main/specs/signature-envelope-jws.md#protected-headers
var headerKeys = []string{
headerKeyAlg,
headerKeyCty,
headerKeyCrit,
headerKeyExpiry,
headerKeySigningTime,
headerKeySigningScheme,
headerKeyAuthenticSigningTime,
}
// jwsProtectedHeader contains the set of protected headers.
type jwsProtectedHeader struct {
// Defines which algorithm was used to generate the signature.
Algorithm string `json:"alg"`
// Media type of the secured content (the payload).
ContentType string `json:"cty"`
// Lists the headers that implementation MUST understand and process.
Critical []string `json:"crit,omitempty"`
// The "best by use" time for the artifact, as defined by the signer.
Expiry *time.Time `json:"io.cncf.notary.expiry,omitempty"`
// Specifies the Notary Project Signing Scheme used by the signature.
SigningScheme signature.SigningScheme `json:"io.cncf.notary.signingScheme"`
// The time at which the signature was generated. only valid when signing
// scheme is `notary.x509`.
SigningTime *time.Time `json:"io.cncf.notary.signingTime,omitempty"`
// The time at which the signature was generated. only valid when signing
// scheme is `notary.x509.signingAuthority`.
AuthenticSigningTime *time.Time `json:"io.cncf.notary.authenticSigningTime,omitempty"`
// The user defined attributes.
ExtendedAttributes map[string]interface{} `json:"-"`
}
// jwsUnprotectedHeader contains the set of unprotected headers.
type jwsUnprotectedHeader struct {
// RFC3161 time stamp token Base64-encoded.
TimestampSignature []byte `json:"io.cncf.notary.timestampSignature,omitempty"`
// List of X.509 Base64-DER-encoded certificates
// as defined at https://datatracker.ietf.org/doc/html/rfc7515#section-4.1.6.
CertChain [][]byte `json:"x5c"`
// SigningAgent used for signing.
SigningAgent string `json:"io.cncf.notary.signingAgent,omitempty"`
}
// jwsEnvelope is the final Signature envelope.
type jwsEnvelope struct {
// JWSPayload Base64URL-encoded. Raw data should be JSON format.
Payload string `json:"payload"`
// jwsProtectedHeader Base64URL-encoded.
Protected string `json:"protected"`
// Signature metadata that is not integrity Protected
Header jwsUnprotectedHeader `json:"header"`
// Base64URL-encoded Signature.
Signature string `json:"signature"`
}
var (
ps256 = jwt.SigningMethodPS256.Name
ps384 = jwt.SigningMethodPS384.Name
ps512 = jwt.SigningMethodPS512.Name
es256 = jwt.SigningMethodES256.Name
es384 = jwt.SigningMethodES384.Name
es512 = jwt.SigningMethodES512.Name
)
var validMethods = []string{ps256, ps384, ps512, es256, es384, es512}
var signatureAlgJWSAlgMap = map[signature.Algorithm]string{
signature.AlgorithmPS256: ps256,
signature.AlgorithmPS384: ps384,
signature.AlgorithmPS512: ps512,
signature.AlgorithmES256: es256,
signature.AlgorithmES384: es384,
signature.AlgorithmES512: es512,
}
var jwsAlgSignatureAlgMap = reverseMap(signatureAlgJWSAlgMap)
func reverseMap(m map[signature.Algorithm]string) map[string]signature.Algorithm {
n := make(map[string]signature.Algorithm, len(m))
for k, v := range m {
n[v] = k
}
return n
}
// Copyright The Notary Project Authors.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package notation
// ErrorPushSignatureFailed is used when failed to push signature to the
// target registry.
type ErrorPushSignatureFailed struct {
Msg string
}
func (e ErrorPushSignatureFailed) Error() string {
if e.Msg != "" {
return "failed to push signature to registry with error: " + e.Msg
}
return "failed to push signature to registry"
}
// ErrorVerificationInconclusive is used when signature verification fails due
// to a runtime error (e.g. a network error)
type ErrorVerificationInconclusive struct {
Msg string
}
func (e ErrorVerificationInconclusive) Error() string {
if e.Msg != "" {
return e.Msg
}
return "signature verification was inclusive due to an unexpected error"
}
// ErrorNoApplicableTrustPolicy is used when there is no trust policy that
// applies to the given artifact
type ErrorNoApplicableTrustPolicy struct {
Msg string
}
func (e ErrorNoApplicableTrustPolicy) Error() string {
if e.Msg != "" {
return e.Msg
}
return "there is no applicable trust policy for the given artifact"
}
// ErrorSignatureRetrievalFailed is used when notation is unable to retrieve the
// digital signature/s for the given artifact
type ErrorSignatureRetrievalFailed struct {
Msg string
}
func (e ErrorSignatureRetrievalFailed) Error() string {
if e.Msg != "" {
return e.Msg
}
return "unable to retrieve the digital signature from the registry"
}
// ErrorVerificationFailed is used when it is determined that the digital
// signature/s is not valid for the given artifact
type ErrorVerificationFailed struct {
Msg string
}
func (e ErrorVerificationFailed) Error() string {
if e.Msg != "" {
return e.Msg
}
return "signature verification failed"
}
// ErrorUserMetadataVerificationFailed is used when the signature does not
// contain the user specified metadata
type ErrorUserMetadataVerificationFailed struct {
Msg string
}
func (e ErrorUserMetadataVerificationFailed) Error() string {
if e.Msg != "" {
return e.Msg
}
return "unable to find specified metadata in the signature"
}
// Copyright 2023 the cncf-fuzzing authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package notation
import (
"testing"
orasRegistry "oras.land/oras-go/v2/registry"
)
func FuzzArtifactReferenceParsing(f *testing.F) {
f.Fuzz(func(t *testing.T, artifactRef string) {
ref, err := orasRegistry.ParseReference(artifactRef)
if err != nil {
t.Skip()
}
if ref.Reference == "" {
t.Skip()
}
ref.ValidateReferenceAsDigest()
})
}
// Copyright The Notary Project Authors.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package pkix
import (
"fmt"
"strings"
ldapv3 "github.com/go-ldap/ldap/v3"
)
// ParseDistinguishedName parses a DN name and validates Notary Project rules
func ParseDistinguishedName(name string) (map[string]string, error) {
if strings.Contains(name, "=#") {
return nil, fmt.Errorf("unsupported distinguished name (DN) %q: notation does not support x509.subject identities containing \"=#\"", name)
}
mandatoryFields := []string{"C", "ST", "O"}
attrKeyValue := make(map[string]string)
dn, err := ldapv3.ParseDN(name)
if err != nil {
return nil, fmt.Errorf("parsing distinguished name (DN) %q failed with err: %v. A valid DN must contain 'C', 'ST', and 'O' RDN attributes at a minimum, and follow RFC 4514 standard", name, err)
}
for _, rdn := range dn.RDNs {
// multi-valued RDNs are not supported (TODO: add spec reference here)
if len(rdn.Attributes) > 1 {
return nil, fmt.Errorf("distinguished name (DN) %q has multi-valued RDN attributes, remove multi-valued RDN attributes as they are not supported", name)
}
for _, attribute := range rdn.Attributes {
if attrKeyValue[attribute.Type] == "" {
attrKeyValue[attribute.Type] = attribute.Value
} else {
return nil, fmt.Errorf("distinguished name (DN) %q has duplicate RDN attribute for %q, DN can only have unique RDN attributes", name, attribute.Type)
}
}
}
// Verify mandatory fields are present
for _, field := range mandatoryFields {
if attrKeyValue[field] == "" {
return nil, fmt.Errorf("distinguished name (DN) %q has no mandatory RDN attribute for %q, it must contain 'C', 'ST', and 'O' RDN attributes at a minimum", name, field)
}
}
// No errors
return attrKeyValue, nil
}
// IsSubsetDN returns true if dn1 is a subset of dn2 i.e. every key/value pair
// of dn1 has a matching key/value pair in dn2, otherwise returns false
func IsSubsetDN(dn1 map[string]string, dn2 map[string]string) bool {
for key := range dn1 {
if dn1[key] != dn2[key] {
return false
}
}
return true
}
// Copyright The Notary Project Authors.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package notation provides signer and verifier for notation Sign
// and Verification.
package notation
import (
"context"
"crypto/sha256"
"encoding/hex"
"encoding/json"
"errors"
"fmt"
"io"
"mime"
"strings"
"time"
orasRegistry "oras.land/oras-go/v2/registry"
"oras.land/oras-go/v2/registry/remote"
"github.com/notaryproject/notation-core-go/signature"
"github.com/notaryproject/notation-core-go/signature/cose"
"github.com/notaryproject/notation-core-go/signature/jws"
"github.com/notaryproject/notation-go/internal/envelope"
"github.com/notaryproject/notation-go/log"
"github.com/notaryproject/notation-go/registry"
"github.com/notaryproject/notation-go/verifier/trustpolicy"
"github.com/opencontainers/go-digest"
ocispec "github.com/opencontainers/image-spec/specs-go/v1"
)
var errDoneVerification = errors.New("done verification")
var reservedAnnotationPrefixes = [...]string{"io.cncf.notary"}
// SignerSignOptions contains parameters for Signer.Sign.
type SignerSignOptions struct {
// SignatureMediaType is the envelope type of the signature.
// Currently, both `application/jose+json` and `application/cose` are
// supported.
SignatureMediaType string
// ExpiryDuration identifies the expiry duration of the resulted signature.
// Zero value represents no expiry duration.
ExpiryDuration time.Duration
// PluginConfig sets or overrides the plugin configuration.
PluginConfig map[string]string
// SigningAgent sets the signing agent name
SigningAgent string
}
// Signer is a generic interface for signing an OCI artifact.
// The interface allows signing with local or remote keys,
// and packing in various signature formats.
type Signer interface {
// Sign signs the OCI artifact described by its descriptor,
// and returns the signature and SignerInfo.
Sign(ctx context.Context, desc ocispec.Descriptor, opts SignerSignOptions) ([]byte, *signature.SignerInfo, error)
}
// SignBlobOptions contains parameters for notation.SignBlob.
type SignBlobOptions struct {
SignerSignOptions
// ContentMediaType is the media-type of the blob being signed.
ContentMediaType string
// UserMetadata contains key-value pairs that are added to the signature
// payload
UserMetadata map[string]string
}
// BlobDescriptorGenerator creates descriptor using the digest Algorithm.
type BlobDescriptorGenerator func(digest.Algorithm) (ocispec.Descriptor, error)
// BlobSigner is a generic interface for signing arbitrary data.
// The interface allows signing with local or remote keys,
// and packing in various signature formats.
type BlobSigner interface {
// SignBlob signs the descriptor returned by genDesc ,
// and returns the signature and SignerInfo
SignBlob(ctx context.Context, genDesc BlobDescriptorGenerator, opts SignerSignOptions) ([]byte, *signature.SignerInfo, error)
}
// signerAnnotation facilitates return of manifest annotations by signers
type signerAnnotation interface {
// PluginAnnotations returns signature manifest annotations returned from
// plugin
PluginAnnotations() map[string]string
}
// SignOptions contains parameters for notation.Sign.
type SignOptions struct {
SignerSignOptions
// ArtifactReference sets the reference of the artifact that needs to be
// signed. It can be a tag, a digest or a full reference.
ArtifactReference string
// UserMetadata contains key-value pairs that are added to the signature
// payload
UserMetadata map[string]string
}
// Sign signs the OCI artifact and push the signature to the Repository.
// The descriptor of the sign content is returned upon successful signing.
func Sign(ctx context.Context, signer Signer, repo registry.Repository, signOpts SignOptions) (ocispec.Descriptor, error) {
// sanity check
if err := validateSignArguments(signer, signOpts.SignerSignOptions); err != nil {
return ocispec.Descriptor{}, err
}
if repo == nil {
return ocispec.Descriptor{}, errors.New("repo cannot be nil")
}
logger := log.GetLogger(ctx)
artifactRef := signOpts.ArtifactReference
if ref, err := orasRegistry.ParseReference(artifactRef); err == nil {
// artifactRef is a valid full reference
artifactRef = ref.Reference
}
targetDesc, err := repo.Resolve(ctx, artifactRef)
if err != nil {
return ocispec.Descriptor{}, fmt.Errorf("failed to resolve reference: %w", err)
}
// artifactRef is a tag or a digest, if it's a digest it has to match
// the resolved digest
if artifactRef != targetDesc.Digest.String() {
if _, err := digest.Parse(artifactRef); err == nil {
// artifactRef is a digest, but does not match the resolved digest
return ocispec.Descriptor{}, fmt.Errorf("user input digest %s does not match the resolved digest %s", artifactRef, targetDesc.Digest.String())
}
// artifactRef is a tag
logger.Warnf("Always sign the artifact using digest(`@sha256:...`) rather than a tag(`:%s`) because tags are mutable and a tag reference can point to a different artifact than the one signed", artifactRef)
logger.Infof("Resolved artifact tag `%s` to digest `%s` before signing", artifactRef, targetDesc.Digest.String())
}
descToSign, err := addUserMetadataToDescriptor(ctx, targetDesc, signOpts.UserMetadata)
if err != nil {
return ocispec.Descriptor{}, err
}
sig, signerInfo, err := signer.Sign(ctx, descToSign, signOpts.SignerSignOptions)
if err != nil {
return ocispec.Descriptor{}, err
}
var pluginAnnotations map[string]string
if signerAnts, ok := signer.(signerAnnotation); ok {
pluginAnnotations = signerAnts.PluginAnnotations()
}
logger.Debug("Generating annotation")
annotations, err := generateAnnotations(signerInfo, pluginAnnotations)
if err != nil {
return ocispec.Descriptor{}, err
}
logger.Debugf("Generated annotations: %+v", annotations)
logger.Debugf("Pushing signature of artifact descriptor: %+v, signature media type: %v", targetDesc, signOpts.SignatureMediaType)
_, _, err = repo.PushSignature(ctx, signOpts.SignatureMediaType, sig, targetDesc, annotations)
if err != nil {
var referrerError *remote.ReferrersError
// do not log an error for failing to delete referral index
if !errors.As(err, &referrerError) || !referrerError.IsReferrersIndexDelete() {
logger.Error("Failed to push the signature")
}
return ocispec.Descriptor{}, ErrorPushSignatureFailed{Msg: err.Error()}
}
return targetDesc, nil
}
// SignBlob signs the arbitrary data and returns the signature
func SignBlob(ctx context.Context, signer BlobSigner, blobReader io.Reader, signBlobOpts SignBlobOptions) ([]byte, *signature.SignerInfo, error) {
// sanity checks
if err := validateSignArguments(signer, signBlobOpts.SignerSignOptions); err != nil {
return nil, nil, err
}
if blobReader == nil {
return nil, nil, errors.New("blobReader cannot be nil")
}
if signBlobOpts.ContentMediaType == "" {
return nil, nil, errors.New("content media-type cannot be empty")
}
if _, _, err := mime.ParseMediaType(signBlobOpts.ContentMediaType); err != nil {
return nil, nil, fmt.Errorf("invalid content media-type '%s': %v", signBlobOpts.ContentMediaType, err)
}
getDescFunc := getDescriptorFunc(ctx, blobReader, signBlobOpts.ContentMediaType, signBlobOpts.UserMetadata)
return signer.SignBlob(ctx, getDescFunc, signBlobOpts.SignerSignOptions)
}
func validateSignArguments(signer any, signOpts SignerSignOptions) error {
if signer == nil {
return errors.New("signer cannot be nil")
}
if signOpts.ExpiryDuration < 0 {
return errors.New("expiry duration cannot be a negative value")
}
if signOpts.ExpiryDuration%time.Second != 0 {
return errors.New("expiry duration supports minimum granularity of seconds")
}
if signOpts.SignatureMediaType == "" {
return errors.New("signature media-type cannot be empty")
}
if !(signOpts.SignatureMediaType == jws.MediaTypeEnvelope || signOpts.SignatureMediaType == cose.MediaTypeEnvelope) {
return fmt.Errorf("invalid signature media-type '%s'", signOpts.SignatureMediaType)
}
return nil
}
func addUserMetadataToDescriptor(ctx context.Context, desc ocispec.Descriptor, userMetadata map[string]string) (ocispec.Descriptor, error) {
logger := log.GetLogger(ctx)
if desc.Annotations == nil && len(userMetadata) > 0 {
desc.Annotations = map[string]string{}
}
for k, v := range userMetadata {
logger.Debugf("Adding metadata %v=%v to annotations", k, v)
for _, reservedPrefix := range reservedAnnotationPrefixes {
if strings.HasPrefix(k, reservedPrefix) {
return desc, fmt.Errorf("error adding user metadata: metadata key %v has reserved prefix %v", k, reservedPrefix)
}
}
if _, ok := desc.Annotations[k]; ok {
return desc, fmt.Errorf("error adding user metadata: metadata key %v is already present in the target artifact", k)
}
desc.Annotations[k] = v
}
return desc, nil
}
// ValidationResult encapsulates the verification result (passed or failed)
// for a verification type, including the desired verification action as
// specified in the trust policy
type ValidationResult struct {
// Type of verification that is performed
Type trustpolicy.ValidationType
// Action is the intended action for the given verification type as defined
// in the trust policy
Action trustpolicy.ValidationAction
// Error is set if there are any errors during the verification process
Error error
}
// VerificationOutcome encapsulates a signature envelope blob, its content,
// the verification level and results for each verification type that was
// performed.
type VerificationOutcome struct {
// RawSignature is the signature envelope blob
RawSignature []byte
// EnvelopeContent contains the details of the digital signature and
// associated metadata
EnvelopeContent *signature.EnvelopeContent
// VerificationLevel describes what verification level was used for
// performing signature verification
VerificationLevel *trustpolicy.VerificationLevel
// VerificationResults contains the verifications performed on the signature
// and their results
VerificationResults []*ValidationResult
// Error that caused the verification to fail (if it fails)
Error error
}
func (outcome *VerificationOutcome) UserMetadata() (map[string]string, error) {
if outcome.EnvelopeContent == nil {
return nil, errors.New("unable to find envelope content for verification outcome")
}
var payload envelope.Payload
err := json.Unmarshal(outcome.EnvelopeContent.Payload.Content, &payload)
if err != nil {
return nil, errors.New("failed to unmarshal the payload content in the signature blob to envelope.Payload")
}
if payload.TargetArtifact.Annotations == nil {
return map[string]string{}, nil
}
return payload.TargetArtifact.Annotations, nil
}
// VerifierVerifyOptions contains parameters for Verifier.Verify.
type VerifierVerifyOptions struct {
// ArtifactReference is the reference of the artifact that is being
// verified against to. It must be a full reference.
ArtifactReference string
// SignatureMediaType is the envelope type of the signature.
// Currently both `application/jose+json` and `application/cose` are
// supported.
SignatureMediaType string
// PluginConfig is a map of plugin configs.
PluginConfig map[string]string
// UserMetadata contains key-value pairs that must be present in the
// signature.
UserMetadata map[string]string
}
// Verifier is a generic interface for verifying an artifact.
type Verifier interface {
// Verify verifies the signature blob `signature` against the target OCI
// artifact with manifest descriptor `desc`, and returns the outcome upon
// successful verification.
// If nil signature is present and the verification level is not 'skip',
// an error will be returned.
Verify(ctx context.Context, desc ocispec.Descriptor, signature []byte, opts VerifierVerifyOptions) (*VerificationOutcome, error)
}
type verifySkipper interface {
// SkipVerify validates whether the verification level is skip.
SkipVerify(ctx context.Context, opts VerifierVerifyOptions) (bool, *trustpolicy.VerificationLevel, error)
}
// VerifyOptions contains parameters for notation.Verify.
type VerifyOptions struct {
// ArtifactReference is the reference of the artifact that is being
// verified against to.
ArtifactReference string
// PluginConfig is a map of plugin configs.
PluginConfig map[string]string
// MaxSignatureAttempts is the maximum number of signature envelopes that
// will be processed for verification. If set to less than or equals
// to zero, an error will be returned.
MaxSignatureAttempts int
// UserMetadata contains key-value pairs that must be present in the
// signature
UserMetadata map[string]string
}
// Verify performs signature verification on each of the notation supported
// verification types (like integrity, authenticity, etc.) and return the
// successful signature verification outcome.
// For more details on signature verification, see
// https://github.com/notaryproject/notaryproject/blob/main/specs/trust-store-trust-policy.md#signature-verification
func Verify(ctx context.Context, verifier Verifier, repo registry.Repository, verifyOpts VerifyOptions) (ocispec.Descriptor, []*VerificationOutcome, error) {
logger := log.GetLogger(ctx)
// sanity check
if verifier == nil {
return ocispec.Descriptor{}, nil, errors.New("verifier cannot be nil")
}
if repo == nil {
return ocispec.Descriptor{}, nil, errors.New("repo cannot be nil")
}
if verifyOpts.MaxSignatureAttempts <= 0 {
return ocispec.Descriptor{}, nil, ErrorSignatureRetrievalFailed{Msg: fmt.Sprintf("verifyOptions.MaxSignatureAttempts expects a positive number, got %d", verifyOpts.MaxSignatureAttempts)}
}
// opts to be passed in verifier.Verify()
opts := VerifierVerifyOptions{
ArtifactReference: verifyOpts.ArtifactReference,
PluginConfig: verifyOpts.PluginConfig,
UserMetadata: verifyOpts.UserMetadata,
}
if skipChecker, ok := verifier.(verifySkipper); ok {
logger.Info("Checking whether signature verification should be skipped or not")
skip, verificationLevel, err := skipChecker.SkipVerify(ctx, opts)
if err != nil {
return ocispec.Descriptor{}, nil, err
}
if skip {
logger.Infoln("Verification skipped for", verifyOpts.ArtifactReference)
return ocispec.Descriptor{}, []*VerificationOutcome{{VerificationLevel: verificationLevel}}, nil
}
logger.Info("Check over. Trust policy is not configured to skip signature verification")
}
// get artifact descriptor
artifactRef := verifyOpts.ArtifactReference
ref, err := orasRegistry.ParseReference(artifactRef)
if err != nil {
return ocispec.Descriptor{}, nil, ErrorSignatureRetrievalFailed{Msg: err.Error()}
}
if ref.Reference == "" {
return ocispec.Descriptor{}, nil, ErrorSignatureRetrievalFailed{Msg: "reference is missing digest or tag"}
}
artifactDescriptor, err := repo.Resolve(ctx, ref.Reference)
if err != nil {
return ocispec.Descriptor{}, nil, ErrorSignatureRetrievalFailed{Msg: err.Error()}
}
if ref.ValidateReferenceAsDigest() != nil {
// artifactRef is not a digest reference
logger.Infof("Resolved artifact tag `%s` to digest `%s` before verification", ref.Reference, artifactDescriptor.Digest.String())
logger.Warn("The resolved digest may not point to the same signed artifact, since tags are mutable")
} else if ref.Reference != artifactDescriptor.Digest.String() {
return ocispec.Descriptor{}, nil, ErrorSignatureRetrievalFailed{Msg: fmt.Sprintf("user input digest %s does not match the resolved digest %s", ref.Reference, artifactDescriptor.Digest.String())}
}
var verificationSucceeded bool
var verificationOutcomes []*VerificationOutcome
var verificationFailedErrorArray = []error{ErrorVerificationFailed{}}
errExceededMaxVerificationLimit := ErrorVerificationFailed{Msg: fmt.Sprintf("signature evaluation stopped. The configured limit of %d signatures to verify per artifact exceeded", verifyOpts.MaxSignatureAttempts)}
numOfSignatureProcessed := 0
// get signature manifests
logger.Debug("Fetching signature manifests")
err = repo.ListSignatures(ctx, artifactDescriptor, func(signatureManifests []ocispec.Descriptor) error {
// process signatures
for _, sigManifestDesc := range signatureManifests {
if numOfSignatureProcessed >= verifyOpts.MaxSignatureAttempts {
break
}
numOfSignatureProcessed++
logger.Infof("Processing signature with manifest mediaType: %v and digest: %v", sigManifestDesc.MediaType, sigManifestDesc.Digest)
// get signature envelope
sigBlob, sigDesc, err := repo.FetchSignatureBlob(ctx, sigManifestDesc)
if err != nil {
return ErrorSignatureRetrievalFailed{Msg: fmt.Sprintf("unable to retrieve digital signature with digest %q associated with %q from the Repository, error : %v", sigManifestDesc.Digest, artifactRef, err.Error())}
}
// using signature media type fetched from registry
opts.SignatureMediaType = sigDesc.MediaType
// verify each signature
outcome, err := verifier.Verify(ctx, artifactDescriptor, sigBlob, opts)
if err != nil {
logger.Warnf("Signature %v failed verification with error: %v", sigManifestDesc.Digest, err)
if outcome == nil {
logger.Error("Got nil outcome. Expecting non-nil outcome on verification failure")
return err
}
outcome.Error = fmt.Errorf("failed to verify signature with digest %v, %w", sigManifestDesc.Digest, outcome.Error)
verificationFailedErrorArray = append(verificationFailedErrorArray, outcome.Error)
continue
}
// at this point, the signature is verified successfully
verificationSucceeded = true
// on success, verificationOutcomes only contains the
// succeeded outcome
verificationOutcomes = []*VerificationOutcome{outcome}
logger.Debugf("Signature verification succeeded for artifact %v with signature digest %v", artifactDescriptor.Digest, sigManifestDesc.Digest)
// early break on success
return errDoneVerification
}
if numOfSignatureProcessed >= verifyOpts.MaxSignatureAttempts {
return errExceededMaxVerificationLimit
}
return nil
})
if err != nil && !errors.Is(err, errDoneVerification) {
if errors.Is(err, errExceededMaxVerificationLimit) {
return ocispec.Descriptor{}, verificationOutcomes, err
}
return ocispec.Descriptor{}, nil, err
}
// If there's no signature associated with the reference
if numOfSignatureProcessed == 0 {
return ocispec.Descriptor{}, nil, ErrorSignatureRetrievalFailed{Msg: fmt.Sprintf("no signature is associated with %q, make sure the artifact was signed successfully", artifactRef)}
}
// Verification Failed
if !verificationSucceeded {
logger.Debugf("Signature verification failed for all the signatures associated with artifact %v", artifactDescriptor.Digest)
return ocispec.Descriptor{}, verificationOutcomes, errors.Join(verificationFailedErrorArray...)
}
// Verification Succeeded
return artifactDescriptor, verificationOutcomes, nil
}
func generateAnnotations(signerInfo *signature.SignerInfo, annotations map[string]string) (map[string]string, error) {
// sanity check
if signerInfo == nil {
return nil, errors.New("failed to generate annotations: signerInfo cannot be nil")
}
var thumbprints []string
for _, cert := range signerInfo.CertificateChain {
checkSum := sha256.Sum256(cert.Raw)
thumbprints = append(thumbprints, hex.EncodeToString(checkSum[:]))
}
val, err := json.Marshal(thumbprints)
if err != nil {
return nil, err
}
if annotations == nil {
annotations = make(map[string]string)
}
annotations[envelope.AnnotationX509ChainThumbprint] = string(val)
signingTime, err := envelope.SigningTime(signerInfo)
if err != nil {
return nil, err
}
annotations[ocispec.AnnotationCreated] = signingTime.Format(time.RFC3339)
return annotations, nil
}
func getDescriptorFunc(ctx context.Context, reader io.Reader, contentMediaType string, userMetadata map[string]string) BlobDescriptorGenerator {
return func(hashAlgo digest.Algorithm) (ocispec.Descriptor, error) {
digester := hashAlgo.Digester()
bytes, err := io.Copy(digester.Hash(), reader)
if err != nil {
return ocispec.Descriptor{}, err
}
targetDesc := ocispec.Descriptor{
MediaType: contentMediaType,
Digest: digester.Digest(),
Size: bytes,
}
return addUserMetadataToDescriptor(ctx, targetDesc, userMetadata)
}
}
// Copyright 2023 the cncf-fuzzing authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//
package trustpolicy
import (
"testing"
fuzz "github.com/AdaLogics/go-fuzz-headers"
)
func FuzzDocumentValidate(f *testing.F) {
f.Fuzz(func(t *testing.T, documentData []byte) {
ff := fuzz.NewConsumer(documentData)
policyDoc := &Document{}
ff.GenerateStruct(policyDoc)
policyDoc.Validate()
})
}
// Copyright The Notary Project Authors.
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package trustpolicy provides functionalities for trust policy document
// and trust policy statements.
package trustpolicy
import (
"encoding/json"
"errors"
"fmt"
"io/fs"
"os"
"path/filepath"
"regexp"
"strings"
"github.com/notaryproject/notation-go/dir"
"github.com/notaryproject/notation-go/internal/file"
"github.com/notaryproject/notation-go/internal/pkix"
"github.com/notaryproject/notation-go/internal/slices"
"github.com/notaryproject/notation-go/internal/trustpolicy"
"github.com/notaryproject/notation-go/verifier/truststore"
)
// trustPolicyLink is a tutorial link for creating Notation's trust policy.
const trustPolicyLink = "https://notaryproject.dev/docs/quickstart/#create-a-trust-policy"
// ValidationType is an enum for signature verification types such as Integrity,
// Authenticity, etc.
type ValidationType string
// ValidationAction is an enum for signature verification actions such as
// Enforced, Logged, Skipped.
type ValidationAction string
// VerificationLevel encapsulates the signature verification preset and its
// actions for each verification type
type VerificationLevel struct {
Name string
Enforcement map[ValidationType]ValidationAction
}
const (
TypeIntegrity ValidationType = "integrity"
TypeAuthenticity ValidationType = "authenticity"
TypeAuthenticTimestamp ValidationType = "authenticTimestamp"
TypeExpiry ValidationType = "expiry"
TypeRevocation ValidationType = "revocation"
)
const (
ActionEnforce ValidationAction = "enforce"
ActionLog ValidationAction = "log"
ActionSkip ValidationAction = "skip"
)
var (
LevelStrict = &VerificationLevel{
Name: "strict",
Enforcement: map[ValidationType]ValidationAction{
TypeIntegrity: ActionEnforce,
TypeAuthenticity: ActionEnforce,
TypeAuthenticTimestamp: ActionEnforce,
TypeExpiry: ActionEnforce,
TypeRevocation: ActionEnforce,
},
}
LevelPermissive = &VerificationLevel{
Name: "permissive",
Enforcement: map[ValidationType]ValidationAction{
TypeIntegrity: ActionEnforce,
TypeAuthenticity: ActionEnforce,
TypeAuthenticTimestamp: ActionLog,
TypeExpiry: ActionLog,
TypeRevocation: ActionLog,
},
}
LevelAudit = &VerificationLevel{
Name: "audit",
Enforcement: map[ValidationType]ValidationAction{
TypeIntegrity: ActionEnforce,
TypeAuthenticity: ActionLog,
TypeAuthenticTimestamp: ActionLog,
TypeExpiry: ActionLog,
TypeRevocation: ActionLog,
},
}
LevelSkip = &VerificationLevel{
Name: "skip",
Enforcement: map[ValidationType]ValidationAction{
TypeIntegrity: ActionSkip,
TypeAuthenticity: ActionSkip,
TypeAuthenticTimestamp: ActionSkip,
TypeExpiry: ActionSkip,
TypeRevocation: ActionSkip,
},
}
)
var (
ValidationTypes = []ValidationType{
TypeIntegrity,
TypeAuthenticity,
TypeAuthenticTimestamp,
TypeExpiry,
TypeRevocation,
}
ValidationActions = []ValidationAction{
ActionEnforce,
ActionLog,
ActionSkip,
}
VerificationLevels = []*VerificationLevel{
LevelStrict,
LevelPermissive,
LevelAudit,
LevelSkip,
}
)
var supportedPolicyVersions = []string{"1.0"}
// Document represents a trustPolicy.json document
type Document struct {
// Version of the policy document
Version string `json:"version"`
// TrustPolicies include each policy statement
TrustPolicies []TrustPolicy `json:"trustPolicies"`
}
// TrustPolicy represents a policy statement in the policy document
type TrustPolicy struct {
// Name of the policy statement
Name string `json:"name"`
// RegistryScopes that this policy statement affects
RegistryScopes []string `json:"registryScopes"`
// SignatureVerification setting for this policy statement
SignatureVerification SignatureVerification `json:"signatureVerification"`
// TrustStores this policy statement uses
TrustStores []string `json:"trustStores,omitempty"`
// TrustedIdentities this policy statement pins
TrustedIdentities []string `json:"trustedIdentities,omitempty"`
}
// SignatureVerification represents verification configuration in a trust policy
type SignatureVerification struct {
VerificationLevel string `json:"level"`
Override map[ValidationType]ValidationAction `json:"override,omitempty"`
}
// Validate validates a policy document according to its version's rule set.
// if any rule is violated, returns an error
func (policyDoc *Document) Validate() error {
// sanity check
if policyDoc == nil {
return errors.New("trust policy document cannot be nil")
}
// Validate Version
if policyDoc.Version == "" {
return errors.New("trust policy document is missing or has empty version, it must be specified")
}
if !slices.Contains(supportedPolicyVersions, policyDoc.Version) {
return fmt.Errorf("trust policy document uses unsupported version %q", policyDoc.Version)
}
// Validate the policy according to 1.0 rules
if len(policyDoc.TrustPolicies) == 0 {
return errors.New("trust policy document can not have zero trust policy statements")
}
policyStatementNameCount := make(map[string]int)
for _, statement := range policyDoc.TrustPolicies {
// Verify statement name is valid
if statement.Name == "" {
return errors.New("a trust policy statement is missing a name, every statement requires a name")
}
policyStatementNameCount[statement.Name]++
// Verify signature verification is valid
verificationLevel, err := statement.SignatureVerification.GetVerificationLevel()
if err != nil {
return fmt.Errorf("trust policy statement %q has invalid signatureVerification: %w", statement.Name, err)
}
// Any signature verification other than "skip" needs a trust store and
// trusted identities
if verificationLevel.Name == "skip" {
if len(statement.TrustStores) > 0 || len(statement.TrustedIdentities) > 0 {
return fmt.Errorf("trust policy statement %q is set to skip signature verification but configured with trust stores and/or trusted identities, remove them if signature verification needs to be skipped", statement.Name)
}
} else {
if len(statement.TrustStores) == 0 || len(statement.TrustedIdentities) == 0 {
return fmt.Errorf("trust policy statement %q is either missing trust stores or trusted identities, both must be specified", statement.Name)
}
// Verify Trust Store is valid
if err := validateTrustStore(statement); err != nil {
return err
}
// Verify Trusted Identities are valid
if err := validateTrustedIdentities(statement); err != nil {
return err
}
}
}
// Verify registry scopes are valid
if err := validateRegistryScopes(policyDoc); err != nil {
return err
}
// Verify unique policy statement names across the policy document
for key := range policyStatementNameCount {
if policyStatementNameCount[key] > 1 {
return fmt.Errorf("multiple trust policy statements use the same name %q, statement names must be unique", key)
}
}
// No errors
return nil
}
// GetApplicableTrustPolicy returns a pointer to the deep copied TrustPolicy
// statement that applies to the given registry scope. If no applicable trust
// policy is found, returns an error
// see https://github.com/notaryproject/notaryproject/blob/v1.0.0-rc.2/specs/trust-store-trust-policy.md#selecting-a-trust-policy-based-on-artifact-uri
func (trustPolicyDoc *Document) GetApplicableTrustPolicy(artifactReference string) (*TrustPolicy, error) {
artifactPath, err := getArtifactPathFromReference(artifactReference)
if err != nil {
return nil, err
}
var wildcardPolicy *TrustPolicy
var applicablePolicy *TrustPolicy
for _, policyStatement := range trustPolicyDoc.TrustPolicies {
if slices.Contains(policyStatement.RegistryScopes, trustpolicy.Wildcard) {
// we need to deep copy because we can't use the loop variable
// address. see https://stackoverflow.com/a/45967429
wildcardPolicy = (&policyStatement).clone()
} else if slices.Contains(policyStatement.RegistryScopes, artifactPath) {
applicablePolicy = (&policyStatement).clone()
}
}
if applicablePolicy != nil {
// a policy with exact match for registry scope takes precedence over
// a wildcard (*) policy.
return applicablePolicy, nil
} else if wildcardPolicy != nil {
return wildcardPolicy, nil
} else {
return nil, fmt.Errorf("artifact %q has no applicable trust policy. Trust policy applicability for a given artifact is determined by registryScopes. To create a trust policy, see: %s", artifactReference, trustPolicyLink)
}
}
// LoadDocument loads a trust policy document from a local file system
func LoadDocument() (*Document, error) {
path, err := dir.ConfigFS().SysPath(dir.PathTrustPolicy)
if err != nil {
return nil, err
}
// throw error if path is a directory or a symlink or does not exist.
fileInfo, err := os.Lstat(path)
if err != nil {
if errors.Is(err, os.ErrNotExist) {
return nil, fmt.Errorf("trust policy is not present. To create a trust policy, see: %s", trustPolicyLink)
}
return nil, err
}
mode := fileInfo.Mode()
if mode.IsDir() || mode&fs.ModeSymlink != 0 {
return nil, fmt.Errorf("trust policy is not a regular file (symlinks are not supported). To create a trust policy, see: %s", trustPolicyLink)
}
jsonFile, err := os.Open(path)
if err != nil {
if errors.Is(err, os.ErrPermission) {
return nil, fmt.Errorf("unable to read trust policy due to file permissions, please verify the permissions of %s", filepath.Join(dir.UserConfigDir, dir.PathTrustPolicy))
}
return nil, err
}
defer jsonFile.Close()
policyDocument := &Document{}
err = json.NewDecoder(jsonFile).Decode(policyDocument)
if err != nil {
return nil, fmt.Errorf("malformed trust policy. To create a trust policy, see: %s", trustPolicyLink)
}
return policyDocument, nil
}
// GetVerificationLevel returns VerificationLevel struct for the given
// SignatureVerification struct throws error if SignatureVerification is invalid
func (signatureVerification *SignatureVerification) GetVerificationLevel() (*VerificationLevel, error) {
if signatureVerification.VerificationLevel == "" {
return nil, errors.New("signature verification level is empty or missing in the trust policy statement")
}
var baseLevel *VerificationLevel
for _, l := range VerificationLevels {
if l.Name == signatureVerification.VerificationLevel {
baseLevel = l
}
}
if baseLevel == nil {
return nil, fmt.Errorf("invalid signature verification level %q", signatureVerification.VerificationLevel)
}
if len(signatureVerification.Override) == 0 {
// nothing to override, return the base verification level
return baseLevel, nil
}
if baseLevel == LevelSkip {
return nil, fmt.Errorf("signature verification level %q can't be used to customize signature verification", baseLevel.Name)
}
customVerificationLevel := &VerificationLevel{
Name: "custom",
Enforcement: make(map[ValidationType]ValidationAction),
}
// populate the custom verification level with the base verification
// settings
for k, v := range baseLevel.Enforcement {
customVerificationLevel.Enforcement[k] = v
}
// override the verification actions with the user configured settings
for key, value := range signatureVerification.Override {
var validationType ValidationType
for _, t := range ValidationTypes {
if t == key {
validationType = t
break
}
}
if validationType == "" {
return nil, fmt.Errorf("verification type %q in custom signature verification is not supported, supported values are %q", key, ValidationTypes)
}
var validationAction ValidationAction
for _, action := range ValidationActions {
if action == value {
validationAction = action
break
}
}
if validationAction == "" {
return nil, fmt.Errorf("verification action %q in custom signature verification is not supported, supported values are %q", value, ValidationActions)
}
if validationType == TypeIntegrity {
return nil, fmt.Errorf("%q verification can not be overridden in custom signature verification", key)
} else if validationType != TypeRevocation && validationAction == ActionSkip {
return nil, fmt.Errorf("%q verification can not be skipped in custom signature verification", key)
}
customVerificationLevel.Enforcement[validationType] = validationAction
}
return customVerificationLevel, nil
}
// clone returns a pointer to the deeply copied TrustPolicy
func (t *TrustPolicy) clone() *TrustPolicy {
return &TrustPolicy{
Name: t.Name,
SignatureVerification: t.SignatureVerification,
RegistryScopes: append([]string(nil), t.RegistryScopes...),
TrustedIdentities: append([]string(nil), t.TrustedIdentities...),
TrustStores: append([]string(nil), t.TrustStores...),
}
}
// validateTrustStore validates if the policy statement is following the
// Notary Project spec rules for truststores
func validateTrustStore(statement TrustPolicy) error {
for _, trustStore := range statement.TrustStores {
storeType, namedStore, found := strings.Cut(trustStore, ":")
if !found {
return fmt.Errorf("trust policy statement %q has malformed trust store value %q. The required format is <TrustStoreType>:<TrustStoreName>", statement.Name, trustStore)
}
if !isValidTrustStoreType(storeType) {
return fmt.Errorf("trust policy statement %q uses an unsupported trust store type %q in trust store value %q", statement.Name, storeType, trustStore)
}
if !file.IsValidFileName(namedStore) {
return fmt.Errorf("trust policy statement %q uses an unsupported trust store name %q in trust store value %q. Named store name needs to follow [a-zA-Z0-9_.-]+ format", statement.Name, namedStore, trustStore)
}
}
return nil
}
// validateTrustedIdentities validates if the policy statement is following the
// Notary Project spec rules for trusted identities
func validateTrustedIdentities(statement TrustPolicy) error {
// If there is a wildcard in trusted identies, there shouldn't be any other
//identities
if len(statement.TrustedIdentities) > 1 && slices.Contains(statement.TrustedIdentities, trustpolicy.Wildcard) {
return fmt.Errorf("trust policy statement %q uses a wildcard trusted identity '*', a wildcard identity cannot be used in conjunction with other values", statement.Name)
}
var parsedDNs []parsedDN
// If there are trusted identities, verify they are valid
for _, identity := range statement.TrustedIdentities {
if identity == "" {
return fmt.Errorf("trust policy statement %q has an empty trusted identity", statement.Name)
}
if identity != trustpolicy.Wildcard {
identityPrefix, identityValue, found := strings.Cut(identity, ":")
if !found {
return fmt.Errorf("trust policy statement %q has trusted identity %q missing separator", statement.Name, identity)
}
// notation natively supports x509.subject identities only
if identityPrefix == trustpolicy.X509Subject {
// identityValue cannot be empty
if identityValue == "" {
return fmt.Errorf("trust policy statement %q has trusted identity %q without an identity value", statement.Name, identity)
}
dn, err := pkix.ParseDistinguishedName(identityValue)
if err != nil {
return fmt.Errorf("trust policy statement %q has trusted identity %q with invalid identity value: %w", statement.Name, identity, err)
}
parsedDNs = append(parsedDNs, parsedDN{RawString: identity, ParsedMap: dn})
}
}
}
// Verify there are no overlapping DNs
if err := validateOverlappingDNs(statement.Name, parsedDNs); err != nil {
return err
}
// No error
return nil
}
// validateRegistryScopes validates if the policy document is following the
// Notary Project spec rules for registry scopes
func validateRegistryScopes(policyDoc *Document) error {
registryScopeCount := make(map[string]int)
for _, statement := range policyDoc.TrustPolicies {
// Verify registry scopes are valid
if len(statement.RegistryScopes) == 0 {
return fmt.Errorf("trust policy statement %q has zero registry scopes, it must specify registry scopes with at least one value", statement.Name)
}
if len(statement.RegistryScopes) > 1 && slices.Contains(statement.RegistryScopes, trustpolicy.Wildcard) {
return fmt.Errorf("trust policy statement %q uses wildcard registry scope '*', a wildcard scope cannot be used in conjunction with other scope values", statement.Name)
}
for _, scope := range statement.RegistryScopes {
if scope != trustpolicy.Wildcard {
if err := validateRegistryScopeFormat(scope); err != nil {
return err
}
}
registryScopeCount[scope]++
}
}
// Verify one policy statement per registry scope
for key := range registryScopeCount {
if registryScopeCount[key] > 1 {
return fmt.Errorf("registry scope %q is present in multiple trust policy statements, one registry scope value can only be associated with one statement", key)
}
}
// No error
return nil
}
func validateOverlappingDNs(policyName string, parsedDNs []parsedDN) error {
for i, dn1 := range parsedDNs {
for j, dn2 := range parsedDNs {
if i != j && pkix.IsSubsetDN(dn1.ParsedMap, dn2.ParsedMap) {
return fmt.Errorf("trust policy statement %q has overlapping x509 trustedIdentities, %q overlaps with %q", policyName, dn1.RawString, dn2.RawString)
}
}
}
return nil
}
// isValidTrustStoreType returns true if the given string is a valid
// truststore.Type, otherwise false.
func isValidTrustStoreType(s string) bool {
for _, p := range truststore.Types {
if s == string(p) {
return true
}
}
return false
}
func getArtifactPathFromReference(artifactReference string) (string, error) {
// TODO support more types of URI like "domain.com/repository",
// "domain.com/repository:tag"
i := strings.LastIndex(artifactReference, "@")
if i < 0 {
return "", fmt.Errorf("artifact URI %q could not be parsed, make sure it is the fully qualified OCI artifact URI without the scheme/protocol. e.g domain.com:80/my/repository@sha256:digest", artifactReference)
}
artifactPath := artifactReference[:i]
if err := validateRegistryScopeFormat(artifactPath); err != nil {
return "", err
}
return artifactPath, nil
}
// Internal type to hold raw and parsed Distinguished Names
type parsedDN struct {
RawString string
ParsedMap map[string]string
}
// validateRegistryScopeFormat validates if a scope is following the format
// defined in distribution spec
func validateRegistryScopeFormat(scope string) error {
// Domain and Repository regexes are adapted from distribution
// implementation
// https://github.com/distribution/distribution/blob/main/reference/regexp.go#L31
domainRegexp := regexp.MustCompile(`^(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])(?:(?:\.(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9]))+)?(?::[0-9]+)?$`)
repositoryRegexp := regexp.MustCompile(`^[a-z0-9]+(?:(?:(?:[._]|__|[-]*)[a-z0-9]+)+)?(?:(?:/[a-z0-9]+(?:(?:(?:[._]|__|[-]*)[a-z0-9]+)+)?)+)?$`)
ensureMessage := "make sure it is a fully qualified repository without the scheme, protocol or tag. For example domain.com/my/repository or a local scope like local/myOCILayout"
errorMessage := "registry scope %q is not valid, " + ensureMessage
errorWildCardMessage := "registry scope %q with wild card(s) is not valid, " + ensureMessage
// Check for presence of * in scope
if len(scope) > 1 && strings.Contains(scope, "*") {
return fmt.Errorf(errorWildCardMessage, scope)
}
domain, repository, found := strings.Cut(scope, "/")
if !found {
return fmt.Errorf(errorMessage, scope)
}
if domain == "" || repository == "" || !domainRegexp.MatchString(domain) || !repositoryRegexp.MatchString(repository) {
return fmt.Errorf(errorMessage, scope)
}
// No errors
return nil
}